prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# shablona documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 14 10:29:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# General information about the project.
project = 'shablona'
copyright = '2015, Ariel Rokem'
currentdir = os.path.abspath(os.path.dirname(__file__))
ver_file = os.path.join(currentdir, '..', project, 'version.py')
with open(ver_file) as f:
exec(f.read())
source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(currentdir, 'tools'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0' # numpydoc requires sphinc >= 1.0
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.append(os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'math_dollar', # has to go before numpydoc
'numpydoc',
'github',
'sphinx_gallery.gen_gallery']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# --- Sphinx Gallery ---
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs': '../examples',
# path where to save gallery generated examples
'gallery_dirs': 'auto_examples',
# To auto-generate example sections in the API
'doc_module': ('shablona',),
# Auto-generated mini-galleries go here
'backreferences_dir': 'gen_api'
}
# Automatically generate stub pages for API
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}<|fim▁hole|>
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'shablonadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'shablona.tex', 'shablona Documentation',
'Ariel Rokem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'shablona', 'shablona Documentation',
['Ariel Rokem'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'shablona', 'shablona Documentation',
'Ariel Rokem', 'shablona', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = False
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}<|fim▁end|> | |
<|file_name|>CookieRewriteEngine.java<|end_file_name|><|fim▁begin|>package m.c.m.proxyma.rewrite;
import java.net.URL;<|fim▁hole|>import javax.servlet.http.Cookie;
import m.c.m.proxyma.context.ProxyFolderBean;
import m.c.m.proxyma.context.ProxymaContext;
import m.c.m.proxyma.resource.ProxymaResource;
/**
* <p>
* This Class implements the logic of the Cookies rewriter engine.<br/>
* It is used by the plugins that performs Cookie rewriting stuff.
*
* </p><p>
* NOTE: this software is released under GPL License.
* See the LICENSE of this distribution for more informations.
* </p>
*
* @author Marco Casavecchia Morganti (marcolinuz) [marcolinuz-at-gmail.com];
* @version $Id: CookieRewriteEngine.java 176 2010-07-03 09:02:14Z marcolinuz $
*/
public class CookieRewriteEngine {
public CookieRewriteEngine (ProxymaContext context) {
//initialize the logger for this class.
log = context.getLogger();
urlRewriter = new URLRewriteEngine(context);
}
/**
* Masquerade to the client a cookie that comes froma a remote host by
* setting its domain to the domain of proxyma and the path to the path
* of the current proxy-folder.
*
* @param cookie the cookie to masquerade
* @param aResource the resource that owns the Cookie
*/
public void masqueradeCookie(Cookie cookie, ProxymaResource aResource) {
//calculate the new values of the Set-Cookie header
URL proxymaRootURL = aResource.getProxymaRootURL();
//Calculate the new Cookie Domain
cookie.setDomain(proxymaRootURL.getHost());
// calculate new path of the cookie
if (cookie.getPath() == null) {
cookie.setPath(urlRewriter.masqueradeURL(aResource.getProxyFolder().getDestinationAsURL().getPath(), aResource));
} else {
String newPath = urlRewriter.masqueradeURL(cookie.getPath(), aResource);
if (newPath.startsWith("/")) {
cookie.setPath(newPath);
} else {
cookie.setPath(urlRewriter.masqueradeURL(aResource.getProxyFolder().getDestinationAsURL().getPath(), aResource));
}
}
//set the new value for the cookie
String newValue = PROXYMA_REWRITTEN_HEADER + cookie.getValue();
cookie.setValue(newValue);
log.finer("Masqueraded Cookie, new path=" + cookie.getPath() + "; new value=" + newValue);
}
/**
* Rebuilds the original cookie from a masqueraded one.
* @param cookie the cookie to unmasquerade
* @return an string array with doamain, path and original value of the cookie.
*/
public void unmasqueradeCookie (Cookie cookie) {
String cookieValue = cookie.getValue();
if (cookieValue.startsWith(PROXYMA_REWRITTEN_HEADER)) {
String originalValue = cookieValue.substring(33);
cookie.setValue(originalValue);
log.finer("Unmasqueraded Cookie original value: " + originalValue);
}
}
/**
* The logger for this class
*/
private Logger log = null;
/**
* The url rewriter used to rewrite cookie paths
*/
private URLRewriteEngine urlRewriter = null;
/**
* The header added to the rewritten cookies that can be recognized by the
* preprocessor to restore the original values.
*/
public static final String PROXYMA_REWRITTEN_HEADER = "#%#PROXYMA-NG_REWRITTEN_COOKIE#%#";
}<|fim▁end|> | import java.util.logging.Logger; |
<|file_name|>htmltabledatacellelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTableDataCellElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTableDataCellElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLTableDataCellElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmltablecellelement::HTMLTableCellElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLTableDataCellElement {
pub htmltablecellelement: HTMLTableCellElement,
}
impl HTMLTableDataCellElementDerived for EventTarget {
fn is_htmltabledatacellelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLTableDataCellElementTypeId))
}
}
impl HTMLTableDataCellElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLTableDataCellElement {
HTMLTableDataCellElement {
htmltablecellelement: HTMLTableCellElement::new_inherited(HTMLTableDataCellElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLTableDataCellElement> {
let element = HTMLTableDataCellElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLTableDataCellElementBinding::Wrap)
}<|fim▁hole|>
pub trait HTMLTableDataCellElementMethods {
}
impl Reflectable for HTMLTableDataCellElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmltablecellelement.reflector()
}
}<|fim▁end|> | } |
<|file_name|>sbcsgroupprober.py<|end_file_name|><|fim▁begin|>######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from charsetgroupprober import CharSetGroupProber
from sbcharsetprober import SingleByteCharSetProber
from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from langgreekmodel import Latin7GreekModel, Win1253GreekModel
from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from langthaimodel import TIS620ThaiModel
from langhebrewmodel import Win1255HebrewModel
from hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),<|fim▁hole|> SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()<|fim▁end|> | SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel), |
<|file_name|>valve_acl.py<|end_file_name|><|fim▁begin|>"""Compose ACLs on ports."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from faucet import valve_of
from faucet.conf import InvalidConfigError
def push_vlan(vlan_vid):
"""Push a VLAN tag with optional selection of eth type."""
vid = vlan_vid
vlan_eth_type = None
if isinstance(vlan_vid, dict):
vid = vlan_vid['vid']
if 'eth_type' in vlan_vid:
vlan_eth_type = vlan_vid['eth_type']
if vlan_eth_type is None:
return valve_of.push_vlan_act(vid)
return valve_of.push_vlan_act(vid, eth_type=vlan_eth_type)
def rewrite_vlan(output_dict):
"""Implement actions to rewrite VLAN headers."""
vlan_actions = []
if 'pop_vlans' in output_dict:
for _ in range(output_dict['pop_vlans']):
vlan_actions.append(valve_of.pop_vlan())
# if vlan tag is specified, push it.
if 'vlan_vid' in output_dict:
vlan_actions.extend(push_vlan(output_dict['vlan_vid']))
# swap existing VID
elif 'swap_vid' in output_dict:
vlan_actions.append(
valve_of.set_vlan_vid(output_dict['swap_vid']))
# or, if a list, push them all (all with type Q).
elif 'vlan_vids' in output_dict:
for vlan_vid in output_dict['vlan_vids']:
vlan_actions.extend(push_vlan(vlan_vid))
return vlan_actions
def build_output_actions(output_dict):
"""Implement actions to alter packet/output."""
output_actions = []
output_port = None
ofmsgs = []
# rewrite any VLAN headers first always
vlan_actions = rewrite_vlan(output_dict)
if vlan_actions:<|fim▁hole|> if 'set_fields' in output_dict:
for set_fields in output_dict['set_fields']:
output_actions.append(valve_of.set_field(**set_fields))
if 'port' in output_dict:
output_port = output_dict['port']
output_actions.append(valve_of.output_port(output_port))
if 'ports' in output_dict:
for output_port in output_dict['ports']:
output_actions.append(valve_of.output_port(output_port))
if 'failover' in output_dict:
failover = output_dict['failover']
group_id = failover['group_id']
buckets = []
for port in failover['ports']:
buckets.append(valve_of.bucket(
watch_port=port, actions=[valve_of.output_port(port)]))
ofmsgs.append(valve_of.groupdel(group_id=group_id))
ofmsgs.append(valve_of.groupadd_ff(group_id=group_id, buckets=buckets))
output_actions.append(valve_of.group_act(group_id=group_id))
return (output_port, output_actions, ofmsgs)
# TODO: change this, maybe this can be rewritten easily
# possibly replace with a class for ACLs
def build_acl_entry(rule_conf, meters,
acl_allow_inst, acl_force_port_vlan_inst,
port_num=None, vlan_vid=None):
acl_inst = []
acl_act = []
acl_match_dict = {}
acl_ofmsgs = []
acl_cookie = None
allow_inst = acl_allow_inst
for attrib, attrib_value in list(rule_conf.items()):
if attrib == 'in_port':
continue
if attrib == 'cookie':
acl_cookie = attrib_value
continue
if attrib == 'description':
continue
if attrib == 'actions':
allow = False
allow_specified = False
if 'allow' in attrib_value:
allow_specified = True
if attrib_value['allow'] == 1:
allow = True
if 'force_port_vlan' in attrib_value:
if attrib_value['force_port_vlan'] == 1:
allow_inst = acl_force_port_vlan_inst
if 'meter' in attrib_value:
meter_name = attrib_value['meter']
acl_inst.append(valve_of.apply_meter(meters[meter_name].meter_id))
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
acl_act.append(valve_of.output_port(port_no))
if not allow_specified:
allow = True
if 'output' in attrib_value:
output_port, output_actions, output_ofmsgs = build_output_actions(
attrib_value['output'])
acl_act.extend(output_actions)
acl_ofmsgs.extend(output_ofmsgs)
# if port specified, output packet now and exit pipeline.
if output_port is not None:
continue
if allow:
acl_inst.append(allow_inst)
else:
acl_match_dict[attrib] = attrib_value
if port_num is not None:
acl_match_dict['in_port'] = port_num
if vlan_vid is not None:
acl_match_dict['vlan_vid'] = valve_of.vid_present(vlan_vid)
try:
acl_match = valve_of.match_from_dict(acl_match_dict)
except TypeError:
raise InvalidConfigError('invalid type in ACL')
if acl_act:
acl_inst.append(valve_of.apply_actions(acl_act))
return (acl_match, acl_inst, acl_cookie, acl_ofmsgs)
def build_acl_ofmsgs(acls, acl_table,
acl_allow_inst, acl_force_port_vlan_inst,
highest_priority, meters,
exact_match, port_num=None, vlan_vid=None):
ofmsgs = []
acl_rule_priority = highest_priority
for acl in acls:
for rule_conf in acl.rules:
acl_match, acl_inst, acl_cookie, acl_ofmsgs = build_acl_entry(
rule_conf, meters,
acl_allow_inst, acl_force_port_vlan_inst,
port_num, vlan_vid)
ofmsgs.extend(acl_ofmsgs)
if exact_match:
flowmod = acl_table.flowmod(
acl_match, priority=highest_priority, inst=acl_inst, cookie=acl_cookie)
else:
flowmod = acl_table.flowmod(
acl_match, priority=acl_rule_priority, inst=acl_inst, cookie=acl_cookie)
ofmsgs.append(flowmod)
acl_rule_priority -= 1
return ofmsgs<|fim▁end|> | output_actions.extend(vlan_actions) |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod tank;<|fim▁hole|><|fim▁end|> | pub mod terrain; |
<|file_name|>linereport_test.js<|end_file_name|><|fim▁begin|>if (typeof process !== "undefined") {
require("amd-loader");
require("../../test/setup_paths");
}
define(function(require, exports, module) {
var assert = require("assert");
var report = require("./linereport_base");
module.exports = {
"test parse line" : function(next) {
var results = report.parseOutput("1:2: 3");
console.log(results[0]);
assert.equal(results[0].pos.sl, 0);
assert.equal(results[0].pos.sc, 1);
assert.equal(results[0].message, "3");
next();
},<|fim▁hole|> var results = report.parseOutput("1:1: line 1\n1:2: line 2");
assert.equal(results.length, 2);
next();
},
"test ignore lines" : function(next) {
var results = report.parseOutput("1:1: line 1\n1:2: line 2\bmove zig");
assert.equal(results.length, 2);
next();
}
};
});
if (typeof module !== "undefined" && module === require.main) {
require("asyncjs").test.testcase(module.exports).exec();
}<|fim▁end|> |
"test parse two lines" : function(next) { |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import pypm, sys, time
DEV_NAME = 'nanoPAD2 MIDI 1'
OUT_NAME = 'fs'
#OUT_NAME = 'MIDI IN'
#OUT_NAME = 'Synth input port (20116:0)'
FIRST_NOTE = 24 + 4 # E, not C
SECOND_NOTE = FIRST_NOTE + 5
PADS1 = range(51, 35, -2)
PADS2 = range(50, 34, -2)
shorten_bools = lambda bool_list: ''.join(('0' if b else '.' for b in bool_list))
def find_device(name_):
for i in range(pypm.CountDevices()):
interf,name,inp,outp,opened = pypm.GetDeviceInfo(i)
if name_ == name: return i + 1
dev_num = find_device(DEV_NAME)
if dev_num == None:
print DEV_NAME, 'not found, aborting!'
sys.exit(1)
print DEV_NAME, 'found at number', dev_num
out_num = find_device(OUT_NAME) - 1
if out_num == None:
print OUT_NUM, 'not found, aborting!'
sys.exit(1)
print OUT_NAME, 'found at number', out_num
midi_in = pypm.Input(dev_num)
#midi_out = pypm.Output(pypm.GetDefaultOutputDeviceID(), 0)
midi_out = pypm.Output(out_num, 0)
def noteon(chan, note, vel):
midi_out.Write([[[0x90 + chan, note, vel], pypm.Time()]])
def noteoff(chan, note):
midi_out.Write([[[0x80 + chan, note, 0], pypm.Time()]])
def press(chan, base_note, vel):
noteon(chan, base_note, vel)
noteon(chan, base_note - 7, vel / 3)
noteon(chan, base_note + 7, vel / 4)
noteon(chan, base_note + 12, vel / 5)
def release(chan, base_note):
noteoff(chan, base_note)
noteon(chan, base_note, 24)
pressed = False
pads1_pressed = [False] * 7
pads2_pressed = [False] * 7
note = 0
while True:
while not midi_in.Poll():
time.sleep(0.0001)
continue
midi_data = midi_in.Read(1) # read only 1 message at a time
t = midi_data[0][1]
a, b, c, d = midi_data[0][0][0], midi_data[0][0][1], midi_data[0][0][2], midi_data[0][0][3]
if a == 176:
# touchpad
if b == 16:
pressed = (c == 127)
if pressed:
pass
#midi_out.Write([[[0x90+0, FIRST_NOTE + 0, c], pypm.Time()]])
#print 'on'
else:
midi_out.Write([[[0x80+0, FIRST_NOTE + 0, c], pypm.Time()]])
midi_out.Write([[[0x80+1, SECOND_NOTE + 0, c], pypm.Time()]])
#midi_out.Write([[[0x90+0, FIRST_NOTE + 0, c], pypm.Time()]])
pass
#midi_out.Write([[[0x80+0, FIRST_NOTE + 0, c], pypm.Time()]])
#print 'off'
else:
continue
elif a == 144:
# pad pressed
if b == PADS1[-1]:
# noteon for the first pad row
press(0, FIRST_NOTE, c)
continue
if b == PADS2[-1]:
# noteon for the second pad row
press(1, SECOND_NOTE, c)
continue
if b in PADS1: pads1_pressed[PADS1.index(b)] = True
if b in PADS2: pads2_pressed[PADS2.index(b)] = True
elif a == 128:
if b == PADS1[-1]:
# noteoff for the first pad row
if not pressed:
release(0, FIRST_NOTE)
continue
if b == PADS2[-1]:
# noteoff for the second pad row
if not pressed:
release(1, SECOND_NOTE)
continue
if b in PADS1: pads1_pressed[PADS1.index(b)] = False
if b in PADS2: pads2_pressed[PADS2.index(b)] = False
#else:
# continue
note1 = max([i if p else 0 for i, p in zip(range(1, 8+2-1), pads1_pressed)])
note2 = max([i if p else 0 for i, p in zip(range(1, 8+2-1), pads2_pressed)])<|fim▁hole|> print t, a, b, c, d, '\t', 'X' if pressed else '_',
print note1, shorten_bools(pads1_pressed),
print note2, shorten_bools(pads2_pressed)
del midi_in<|fim▁end|> | midi_out.Write([[[0xe0+0, 0, 0x40 + note1 * 0x4, c], pypm.Time()]])
midi_out.Write([[[0xe0+1, 0, 0x40 + note2 * 0x4, c], pypm.Time()]])
#print [i if p else 0 for i, p in zip(range(1, 8+2), pads1_pressed)] |
<|file_name|>linalg.py<|end_file_name|><|fim▁begin|>"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it<|fim▁hole|> The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
if _isEmpty2d(a):
# determinant of empty matrix is 1
sign = ones(a.shape[:-2], dtype=result_t)
logdet = zeros(a.shape[:-2], dtype=real_t)
return sign, logdet
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
# 0x0 matrices have determinant 1
if _isEmpty2d(a):
return ones(a.shape[:-2], dtype=result_t)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))<|fim▁end|> | satisfies ``U.H = inv(U)``.
|
<|file_name|>app.module.ts<|end_file_name|><|fim▁begin|>import { NgModule, ErrorHandler } from '@angular/core';
import { BrowserModule } from '@angular/platform-browser';
import { IonicApp, IonicModule, IonicErrorHandler } from 'ionic-angular';
import { MyApp } from './app.component';
import { ScanPage } from '../pages/scan/scan';
import { SettingsPage } from '../pages/settings/settings';
import { Enums } from '../providers/enums';
import { Scanner } from '../providers/scanner';
import { ScannerSettings } from '../providers/scanner-settings';
@NgModule({
declarations: [
MyApp,
ScanPage,
SettingsPage,
],
imports: [
BrowserModule,
IonicModule.forRoot(MyApp)
],
bootstrap: [IonicApp],
entryComponents: [
MyApp,
ScanPage,
SettingsPage,
],
providers: [
{ provide: ErrorHandler, useClass: IonicErrorHandler },
Enums,
Scanner,
ScannerSettings,
]
})<|fim▁hole|><|fim▁end|> | export class AppModule {} |
<|file_name|>route.rs<|end_file_name|><|fim▁begin|>use router::criterion::*;
use hyper::method::{Method as hyperMethod};
use hyper::server::{Request, Response};
<|fim▁hole|> criteria: Vec<Criterion>,
handler: BoxedHandler
}
impl Route {
pub fn new(handler: BoxedHandler) -> Self {
return Route{criteria: Vec::new(), handler: handler};
}
pub fn new_with_method_path(handler: BoxedHandler, method: hyperMethod, path: String) -> Self {
let mut route = Route::new(handler);
route.add_criterion(Criterion::Method(vec![method]));
route.add_criterion(Criterion::ExactPath(path));
return route;
}
pub fn add_criterion(&mut self, criterion: Criterion) {
self.criteria.push(criterion);
}
pub fn execute(&self, req: Request, res: Response) {
(self.handler)(req, res);
}
pub fn resolve(&self, request: &Request) -> bool {
let mut bool = true;
for criterion in self.criteria.iter() {
if !check(criterion, request) {
bool = false;
}
}
return bool;
}
}<|fim▁end|> | pub type BoxedHandler = Box<for <'a> Fn(Request, Response) + Send + Sync>;
pub struct Route { |
<|file_name|>APropertyTable.java<|end_file_name|><|fim▁begin|>package com.canoo.ant.table;
import com.canoo.ant.filter.AllEqualsFilter;
import com.canoo.ant.filter.AllFilter;
import com.canoo.ant.filter.ITableFilter;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.*;
public abstract class APropertyTable implements IPropertyTable {
private static final Logger LOG = Logger.getLogger(APropertyTable.class);
private static final int MAX_DEPTH = 10; // max recursion depth
private static final ThreadLocal DEPTH = new ThreadLocal();
private File fContainer;
private String fTable;
private String fPrefix;
private ITableFilter fFilter;
private List fRawTable;
private List fMetaTable;
protected static final String EMPTY = "";
protected static final String KEY_JOIN = "JOIN";
protected APropertyTable() {
fFilter = new AllFilter();
if( DEPTH.get() == null ) {
setDepth(0);
}
}
private static void setDepth(int depth){
DEPTH.set(new Integer(depth));
}
private static int getDepth(){
return((Integer)DEPTH.get()).intValue();
}
/**
* @return columnName -> expander (Type IPropertyTable)
*/
public Map getColumnInfo() {
List meta = getMetaTable();
Map result = new HashMap(meta.size()); // smaller is likely
// find all properties for this table
List tableSpecificColumnInfo = new AllEqualsFilter(TableFactory.KEY_TABLE).filter(meta, getTable());
for (Iterator eachColumnInfo = tableSpecificColumnInfo.iterator(); eachColumnInfo.hasNext();) {
Properties colInfo = (Properties) eachColumnInfo.next();
try {
// tableClass defaults to the current class
IPropertyTable table = TableFactory.createTable(colInfo, getClass().getName());
ITableFilter filter = TableFactory.createFilter(colInfo);
final File container;
if (colInfo.getProperty(TableFactory.KEY_CONTAINER, "").length() > 0) {
container = new File(getContainer().getParentFile(), colInfo.getProperty(TableFactory.KEY_CONTAINER));
colInfo.remove(TableFactory.KEY_CONTAINER); // to be sure that it doesn't get used with wrong path
}
else {
container = getContainer();
}
String key = colInfo.getProperty(TableFactory.KEY_NAME); // no default possible
TableFactory.initOrDefault(table, filter, colInfo, container, key);
result.put(key, table);
} catch (Exception e) {
LOG.error("cannot work with Property: " + colInfo.toString(), e);
throw new RuntimeException("Cannot work with Property: " + colInfo.toString(), e);
}
}
return result;
}
public List getPropertiesList(final String filterValue, final String prefix) {
// start with copy of initial table
// if current filter concerns extension keys, filter before extending
// filtering in advance also lowers memory consumption in the average
List result = getFilter().filter(getRawTable(), filterValue);
if (getDepth() > MAX_DEPTH){
LOG.error("processing grounded due to excessive recursion calls: "+getDepth());
return result;
}
setDepth(getDepth()+1);
final Map colInfo = getColumnInfo();
// only go over entries in the colInfo.
// (property names without colInfo info are not expanded)
for (Iterator eachExpandable = colInfo.keySet().iterator(); eachExpandable.hasNext();) {
String expansionName = (String) eachExpandable.next();
expandName(result, expansionName, colInfo);
}
setDepth(getDepth()-1);
// filter a second time to allow filters to work on expansions
result = getFilter().filter(result, filterValue);
// prefix is processed after filtering
if (prefix!=null && prefix.length()>0){
result = mapPrefix(result, prefix);
}
return result;
}
// like a ruby map!
private List mapPrefix(List result, final String prefix) {
List collect = new ArrayList(result.size());
for (Iterator eachProps = result.iterator(); eachProps.hasNext();) {
Properties props = (Properties) eachProps.next();
Properties mapped = new Properties();
for (Iterator eachKey = props.keySet().iterator(); eachKey.hasNext();) {
String key = (String) eachKey.next();
String value = props.getProperty(key);
mapped.setProperty(prefix+"."+key, value);
}
collect.add(mapped);
}
return collect;
}
protected void expandName(List result, String expansionName, Map colInfo) {
List expansions = new LinkedList(); // cannot add while iterating. store and add later
for (Iterator eachProperties = result.iterator(); eachProperties.hasNext();) {
Properties props = (Properties) eachProperties.next();
List newExpansions = expandProps(props, expansionName, colInfo);
// default behaviour: like OUTER join, we do not shrink if nothing found
if (newExpansions.size() > 0) {
eachProperties.remove();
expansions.addAll(newExpansions);
}
}
result.addAll(expansions);
}
protected List expandProps(Properties props, String expansionName, Map colInfo) {
String value = props.getProperty(expansionName);
List propExpansions = new LinkedList();
IPropertyTable expansionTable = (IPropertyTable) colInfo.get(expansionName);
// recursive call
List expandWith = expansionTable.getPropertiesList(value, expansionTable.getPrefix());
for (Iterator eachExpansion = expandWith.iterator(); eachExpansion.hasNext();) {
Properties expandProps = (Properties) eachExpansion.next();
// merge expansion with current line
expandProps.putAll(props);
// store for later adding
propExpansions.add(expandProps);
}
return propExpansions;
}
//-------------- field accessors ------------------
public File getContainer() {
return fContainer;
}
public void setContainer(File container) {
fContainer = container;
}
public String getTable() {
return fTable;
}
public void setTable(String table) {
fTable = table;
}
public ITableFilter getFilter() {
return fFilter;
}
public void setFilter(ITableFilter filter) {
fFilter = filter;
}
public String getPrefix() {
return fPrefix;
}
public void setPrefix(String prefix) {
fPrefix = prefix;
}
//-------------- how to read specifics ------------------
/** lazy getter, cached */
public List getRawTable() {
fRawTable = getCachedTable(getTable(), fRawTable);
return fRawTable;
}
/** lazy getter, cached */
public List getMetaTable() {
if (hasJoinTable()) {
fMetaTable = getCachedTable(KEY_JOIN, fMetaTable);
}
else {
fMetaTable = Collections.EMPTY_LIST;
}
return fMetaTable;
}
/**
* Indicates if the table container has a JOIN table.
* @return default is <code>true</code>
*/
protected boolean hasJoinTable() {
return true;
}
protected List getCachedTable(final String table, List tableCache) {
if (tableCache != null) {
return tableCache;
}<|fim▁hole|> try {
tableCache = read(table);
}
catch (final IOException e) {
LOG.error("Cannot read " + getContainer() + " " + table, e);
String message = "Cannot read container >" + getContainer() + "<";
if (table != null)
message += " (table " + table + ")";
message += ": " + e.getMessage();
throw new RuntimeException(message, e);
}
if (tableCache.isEmpty()) {
LOG.debug("no entry in " + getContainer() + "/" + table);
}
LOG.debug(tableCache.size()+" entries in "+getContainer()+ " " + table);
return tableCache;
}
protected abstract List read(String table) throws IOException;
}<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># Mostly from http://peterdowns.com/posts/first-time-with-pypi.html
from distutils.core import setup
setup(
name = 'pmdp',<|fim▁hole|> version = '0.3',
description = 'A poor man\'s data pipeline',
author = 'Dan Goldin',
author_email = '[email protected]',
url = 'https://github.com/dangoldin/poor-mans-data-pipeline',
download_url = 'https://github.com/dangoldin/poor-mans-data-pipeline/tarball/0.3',
keywords = ['data', 'data-pipeline'],
classifiers = [],
)<|fim▁end|> | packages = ['pmdp'], |
<|file_name|>log.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
celery.utils.log
~~~~~~~~~~~~~~~~
Logging utilities.
"""
from __future__ import absolute_import, print_function
import logging
import numbers
import os
import sys
import threading
import traceback
from contextlib import contextmanager
from billiard import current_process, util as mputil
from kombu.five import values
from kombu.log import get_logger as _get_logger, LOG_LEVELS
from kombu.utils.encoding import safe_str
from celery.five import string_t, text_t
from .term import colored
__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger',
'set_in_sighandler', 'in_sighandler', 'get_logger',
'get_task_logger', 'mlevel', 'ensure_process_aware_logger',
'get_multiprocessing_logger', 'reset_multiprocessing_logger']
_process_aware = False
PY3 = sys.version_info[0] == 3
MP_LOG = os.environ.get('MP_LOG', False)
# Sets up our logging hierarchy.
#
# Every logger in the celery package inherits from the "celery"
# logger, and every task logger inherits from the "celery.task"
# logger.
base_logger = logger = _get_logger('celery')
mp_logger = _get_logger('multiprocessing')
_in_sighandler = False
def set_in_sighandler(value):
global _in_sighandler
_in_sighandler = value
def iter_open_logger_fds():
seen = set()
loggers = (list(values(logging.Logger.manager.loggerDict)) +
[logging.getLogger(None)])
for logger in loggers:
try:
for handler in logger.handlers:
try:
if handler not in seen:
yield handler.stream
seen.add(handler)
except AttributeError:
pass
except AttributeError: # PlaceHolder does not have handlers
pass
@contextmanager
def in_sighandler():
set_in_sighandler(True)
try:
yield
finally:
set_in_sighandler(False)
def logger_isa(l, p):
this, seen = l, set()
while this:
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
return False
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger):
l.parent = base_logger
return l
task_logger = get_logger('celery.task')
worker_logger = get_logger('celery.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
def mlevel(level):
if level and not isinstance(level, numbers.Integral):
return LOG_LEVELS[level.upper()]
return level
class ColorFormatter(logging.Formatter):
#: Loglevel -> Color mapping.
COLORS = colored().names
colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
def __init__(self, fmt=None, use_color=True):
logging.Formatter.__init__(self, fmt)
self.use_color = use_color
def formatException(self, ei):
if ei and not isinstance(ei, tuple):
ei = sys.exc_info()
r = logging.Formatter.formatException(self, ei)
if isinstance(r, str) and not PY3:
return safe_str(r)
return r
def format(self, record):
msg = logging.Formatter.format(self, record)
color = self.colors.get(record.levelname)
# reset exception info later for other handlers...
einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info
if color and self.use_color:
try:
# safe_str will repr the color object
# and color will break on non-string objects
# so need to reorder calls based on type.
# Issue #427
try:
if isinstance(msg, string_t):
return text_t(color(safe_str(msg)))
return safe_str(color(msg))
except UnicodeDecodeError:
return safe_str(msg) # skip colors
except Exception as exc:
prev_msg, record.exc_info, record.msg = (
record.msg, 1, '<Unrepresentable {0!r}: {1!r}>'.format(
type(msg), exc
),
)
try:
return logging.Formatter.format(self, record)
finally:
record.msg, record.exc_info = prev_msg, einfo
else:
return safe_str(msg)
class LoggingProxy(object):
"""Forward file object to :class:`logging.Logger` instance.
:param logger: The :class:`logging.Logger` instance to forward to.
:param loglevel: Loglevel to use when writing messages.
"""
mode = 'w'
name = None
closed = False
loglevel = logging.ERROR
_thread = threading.local()
def __init__(self, logger, loglevel=None):
self.logger = logger
self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
self._safewrap_handlers()
def _safewrap_handlers(self):
"""Make the logger handlers dump internal errors to
`sys.__stderr__` instead of `sys.stderr` to circumvent
infinite loops."""
def wrap_handler(handler): # pragma: no cover
class WithSafeHandleError(logging.Handler):
def handleError(self, record):
exc_info = sys.exc_info()
try:
try:
traceback.print_exception(exc_info[0],
exc_info[1],
exc_info[2],
None, sys.__stderr__)
except IOError:
pass # see python issue 5971
finally:
del(exc_info)
handler.handleError = WithSafeHandleError().handleError
return [wrap_handler(h) for h in self.logger.handlers]
def write(self, data):
"""Write message to logging object."""
if _in_sighandler:
return print(safe_str(data), file=sys.__stderr__)
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
return
data = data.strip()
if data and not self.closed:
self._thread.recurse_protection = True
try:
self.logger.log(self.loglevel, safe_str(data))
finally:
self._thread.recurse_protection = False<|fim▁hole|>
def writelines(self, sequence):
"""`writelines(sequence_of_strings) -> None`.
Write the strings to the file.
The sequence can be any iterable object producing strings.
This is equivalent to calling :meth:`write` for each string.
"""
for part in sequence:
self.write(part)
def flush(self):
"""This object is not buffered so any :meth:`flush` requests
are ignored."""
pass
def close(self):
"""When the object is closed, no write requests are forwarded to
the logging object anymore."""
self.closed = True
def isatty(self):
"""Always return :const:`False`. Just here for file support."""
return False
def ensure_process_aware_logger(force=False):
"""Make sure process name is recorded when loggers are used."""
global _process_aware
if force or not _process_aware:
logging._acquireLock()
try:
_process_aware = True
Logger = logging.getLoggerClass()
if getattr(Logger, '_process_aware', False): # pragma: no cover
return
class ProcessAwareLogger(Logger):
_signal_safe = True
_process_aware = True
def makeRecord(self, *args, **kwds):
record = Logger.makeRecord(self, *args, **kwds)
record.processName = current_process()._name
return record
def log(self, *args, **kwargs):
if _in_sighandler:
return
return Logger.log(self, *args, **kwargs)
logging.setLoggerClass(ProcessAwareLogger)
finally:
logging._releaseLock()
def get_multiprocessing_logger():
return mputil.get_logger() if mputil else None
def reset_multiprocessing_logger():
if mputil and hasattr(mputil, '_logger'):
mputil._logger = None
def current_process_index(base=1):
if current_process:
index = getattr(current_process(), 'index', None)
return index + base if index is not None else index
ensure_process_aware_logger()<|fim▁end|> | |
<|file_name|>test_progressbar.py<|end_file_name|><|fim▁begin|>import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.parallel import parallel_func
from mne.utils import ProgressBar, array_split_idx, use_log_level
def test_progressbar():
"""Test progressbar class."""
a = np.arange(10)<|fim▁hole|> pbar = ProgressBar(10)
assert pbar.max_value == 10
assert pbar.iterable is None
# Make sure that non-iterable input raises an error
def iter_func(a):
for ii in a:
pass
pytest.raises(Exception, iter_func, ProgressBar(20))
def _identity(x):
return x
def test_progressbar_parallel_basic(capsys):
"""Test ProgressBar with parallel computing, basic version."""
assert capsys.readouterr().out == ''
parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
verbose=True)
with use_log_level(True):
out = parallel(p_fun(x) for x in range(10))
assert out == list(range(10))
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block(x, pb):
for ii in range(len(x)):
pb.update(ii + 1)
return x
def test_progressbar_parallel_advanced(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr)) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(arr, 2))
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=10).sum()
assert sum_ == len(arr)
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
out = np.concatenate(out)
assert_array_equal(out, arr)
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block_wide(x, pb):
for ii in range(len(x)):
for jj in range(2):
pb.update(ii * 2 + jj + 1)
return x, pb.idx
def test_progressbar_parallel_more(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block_wide, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr) * 2) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
arr, 2, n_per_split=2))
idxs = np.concatenate([o[1] for o in out])
assert_array_equal(idxs, np.arange(len(arr) * 2))
out = np.concatenate([o[0] for o in out])
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=len(arr) * 2).sum()
assert sum_ == len(arr) * 2
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
cap = capsys.readouterr()
out = cap.err
assert '100%' in out<|fim▁end|> | pbar = ProgressBar(a)
assert a is pbar.iterable
assert pbar.max_value == 10
|
<|file_name|>handler.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""signal handlers registered by the imager_profile app"""
from __future__ import unicode_literals
from django.conf import settings
from django.db.models.signals import post_save
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from imager_profile.models import ImagerProfile
import logging
logger = logging.getLogger(__name__)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def ensure_imager_profile(sender, **kwargs):
"""Create and save an ImagerProfile after every new User is created."""
if kwargs.get('created', False):
try:
new_profile = ImagerProfile(user=kwargs['instance'])
new_profile.save()
except (KeyError, ValueError):
logger.error('Unable to create ImagerProfile for User instance.')
@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)
def remove_imager_profile(sender, **kwargs):
try:
kwargs['instance'].profile.delete()
except (KeyError, AttributeError):
msg = (<|fim▁hole|> )
logger.warn(msg.format(kwargs['instance']))<|fim▁end|> | "ImagerProfile instance not deleted for {}. "
"Perhaps it does not exist?" |
<|file_name|>experimentHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo of class data.ExperimentHandler
"""
from psychopy import data, logging
from numpy import random
logging.console.setLevel(logging.DEBUG)
exp = data.ExperimentHandler(name='testExp',
version='0.1',
extraInfo={'participant':'jwp', 'ori':45},
runtimeInfo=None,
originPath=None,
savePickle=True,
saveWideText=True,
dataFileName='testExp')
# a first loop (like training?)
conds = data.createFactorialTrialList(
{'faceExpression':['happy', 'sad'], 'presTime':[0.2, 0.3]})
training = data.TrialHandler(trialList=conds, nReps=3, name='train',
method='random',
seed=100) # this will set the global seed - for the whole exp
exp.addLoop(training)
# run those trials<|fim▁hole|> if random.random() > 0.5:
training.addData('training.key', 'left')
else:
training.addData('training.key', 'right')
exp.nextEntry()
# then run 3 repeats of a staircase
outerLoop = data.TrialHandler(trialList=[], nReps=3, name='stairBlock',
method='random')
exp.addLoop(outerLoop)
for thisRep in outerLoop: # the outer loop doesn't save any data
staircase = data.StairHandler(startVal=10, name='staircase', nTrials=5)
exp.addLoop(staircase)
for thisTrial in staircase:
id=random.random()
if random.random() > 0.5:
staircase.addData(1)
else:
staircase.addData(0)
exp.addData('id', id)
exp.nextEntry()
for e in exp.entries:
print(e)
print("Done. 'exp' experimentHandler will now (end of script) save data to testExp.csv")
print(" and also to testExp.psydat, which is a pickled version of `exp`")
# The contents of this file are in the public domain.<|fim▁end|> | for trial in training:
training.addData('training.rt', random.random() * 0.5 + 0.5) |
<|file_name|>macros.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic macros used here and there to simplify and make code more
//! readable.
/// Eliminates some of the verbosity in having iter and collect
/// around every map call.
#[macro_export]
macro_rules! map_vec {
($thing:expr, $mapfn:expr) => {
$thing.iter().map($mapfn).collect::<Vec<_>>()
};
}
/// Same as map_vec when the map closure returns Results. Makes sure the
/// results are "pushed up" and wraps with a try.
#[macro_export]
macro_rules! try_map_vec {
($thing:expr, $mapfn:expr) => {
try_iter_map_vec!($thing.iter(), $mapfn);
};
}
/// Same as try_map_vec when thing is an iterator
#[macro_export]
macro_rules! try_iter_map_vec {
($thing:expr, $mapfn:expr) => {
$thing.map($mapfn).collect::<Result<Vec<_>, _>>()?;
};
}
/// Eliminates some of the verbosity in having iter and collect
/// around every filter_map call.
#[macro_export]
macro_rules! filter_map_vec {
($thing:expr, $mapfn:expr) => {
$thing.iter().filter_map($mapfn).collect::<Vec<_>>();
};
}
/// Allows the conversion of an expression that doesn't return anything to one
/// that returns the provided identifier.
/// Example:
/// let foo = vec![1,2,3]
/// println!(tee!(foo, foo.append(vec![3,4,5]))
#[macro_export]
macro_rules! tee {
($thing:ident, $thing_expr:expr) => {{
$thing_expr;
$thing
}};
}
/// Eliminate some of the boilerplate of deserialization (package ser) by
/// passing just the list of reader function (with optional single param)
/// Example before:
/// let foo = reader.read_u64()?;
/// let bar = reader.read_u32()?;
/// let fixed_byte_var = reader.read_fixed_bytes(64)?;
/// Example after:
/// let (foo, bar, fixed_byte_var) = ser_multiread!(reader, read_u64,
/// read_u32, read_fixed_bytes(64));
#[macro_export]
macro_rules! ser_multiread {
($rdr:ident, $($read_call:ident $(($val:expr)),*),*) => {
( $($rdr.$read_call($($val),*)?),* )
}
}<|fim▁hole|>/// reader.write_u64(42)?;
/// reader.write_u32(100)?;
/// Example after:
/// ser_multiwrite!(writer, [write_u64, 42], [write_u32, 100]);
#[macro_export]
macro_rules! ser_multiwrite {
($wrtr:ident, $([ $write_call:ident, $val:expr ]),* ) => {
$($wrtr.$write_call($val)? );*
}
}
// don't seem to be able to define an Ord implementation for Hash due to
// Ord being defined on all pointers, resorting to a macro instead
macro_rules! hashable_ord {
($hashable:ident) => {
impl Ord for $hashable {
fn cmp(&self, other: &$hashable) -> Ordering {
self.hash().cmp(&other.hash())
}
}
impl PartialOrd for $hashable {
fn partial_cmp(&self, other: &$hashable) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for $hashable {
fn eq(&self, other: &$hashable) -> bool {
self.hash() == other.hash()
}
}
impl Eq for $hashable {}
};
}<|fim▁end|> |
/// Eliminate some of the boilerplate of serialization (package ser) by
/// passing directly pairs of writer function and data to write.
/// Example before: |
<|file_name|>daq_device_info.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from mcculw.enums import (BoardInfo, InfoType, ErrorCode, EventType,
ExpansionInfo)
from .ai_info import AiInfo
from .ao_info import AoInfo
from .ctr_info import CtrInfo
from .daqi_info import DaqiInfo
from .daqo_info import DaqoInfo
from .dio_info import DioInfo
class DaqDeviceInfo:
"""Provides hardware information for the DAQ device configured with the
specified board number.
NOTE: This class is primarily used to provide hardware information for the
library examples and may change some hardware configuration values. It is
recommended that values provided by this class be hard-coded in production
code.
Parameters
----------
board_num : int
The board number associated with the device when created with
:func:`.create_daq_device` or configured with Instacal.
"""
def __init__(self, board_num):
self._board_num = board_num
self._board_type = ul.get_config(InfoType.BOARDINFO, board_num, 0,
BoardInfo.BOARDTYPE)
if self._board_type == 0:
raise ULError(ErrorCode.BADBOARD)
self._ai_info = AiInfo(self._board_num)
self._ao_info = AoInfo(self._board_num)
self._ctr_info = CtrInfo(self._board_num)
self._daqi_info = DaqiInfo(self._board_num)
self._daqo_info = DaqoInfo(self._board_num)
self._dio_info = DioInfo(self._board_num)
@property
def board_num(self): # -> int
return self._board_num
@property
def product_name(self): # -> str
return ul.get_board_name(self._board_num)
@property
def unique_id(self): # -> str
return ul.get_config_string(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DEVUNIQUEID, 32)
@property
def supports_analog_input(self): # -> boolean
return self._ai_info.is_supported
@property
def supports_temp_input(self): # -> boolean
return self._ai_info.temp_supported
def get_ai_info(self): # -> AiInfo
return self._ai_info
@property
def supports_analog_output(self): # -> boolean
return self._ao_info.is_supported
def get_ao_info(self): # -> AoInfo
return self._ao_info
@property
def supports_counters(self): # -> boolean
return self._ctr_info.is_supported
def get_ctr_info(self): # -> CtrInfo
return self._ctr_info
@property
def supports_daq_input(self): # -> boolean
return self._daqi_info.is_supported
def get_daqi_info(self): # -> DaqiInfo
return self._daqi_info
@property
def supports_daq_output(self): # -> boolean
return self._daqo_info.is_supported
def get_daqo_info(self): # -> DaqoInfo
return self._daqo_info
@property
def supports_digital_io(self): # -> boolean
return self._dio_info.is_supported
def get_dio_info(self): # -> DioInfo<|fim▁hole|> @property
def supported_event_types(self): # -> list[EventType]
event_types = []
for event_type in EventType:
try:
ul.disable_event(self._board_num, event_type)
event_types.append(event_type)
except ULError:
pass
return event_types
@property
def num_expansions(self): # -> int
return ul.get_config(InfoType.BOARDINFO, self.board_num, 0,
BoardInfo.NUMEXPS)
@property
def exp_info(self): # -> list[ExpInfo]
exp_info = []
for expansion_num in range(self.num_expansions):
exp_info.append(ExpInfo(self._board_num, expansion_num))
return exp_info
class ExpInfo:
def __init__(self, board_num, expansion_num):
self._board_num = board_num
self._expansion_num = expansion_num
@property
def board_type(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.BOARDTYPE)
@property
def mux_ad_chan(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.MUX_AD_CHAN1)<|fim▁end|> | return self._dio_info
|
<|file_name|>versions_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
"testing"
<|fim▁hole|>)
func TestGetKubeDNSVersion(t *testing.T) {
var tests = []struct {
k8sVersion string
dns string
expected string
}{
{
k8sVersion: "v1.9.0",
dns: kubeadmconstants.KubeDNS,
expected: kubeDNSVersion,
},
{
k8sVersion: "v1.10.0",
dns: kubeadmconstants.KubeDNS,
expected: kubeDNSVersion,
},
{
k8sVersion: "v1.9.0",
dns: kubeadmconstants.CoreDNS,
expected: coreDNSVersion,
},
{
k8sVersion: "v1.10.0",
dns: kubeadmconstants.CoreDNS,
expected: coreDNSVersion,
},
}
for _, rt := range tests {
k8sVersion, err := version.ParseSemantic(rt.k8sVersion)
if err != nil {
t.Fatalf("couldn't parse kubernetes version %q: %v", rt.k8sVersion, err)
}
actualDNSVersion := GetDNSVersion(k8sVersion, rt.dns)
if actualDNSVersion != rt.expected {
t.Errorf(
"failed GetDNSVersion:\n\texpected: %s\n\t actual: %s",
rt.expected,
actualDNSVersion,
)
}
}
}<|fim▁end|> | kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/util/version" |
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-18 10:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.contrib.routable_page.models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
import wagtailmd.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0019_delete_filter'),
('taggit', '0002_auto_20150616_2121'),
('wagtailcore', '0040_page_draft_title'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=80, unique=True)),
],
options={
'verbose_name_plural': 'Categories',
'verbose_name': 'Category',
},
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
bases=(wagtail.contrib.routable_page.models.RoutablePageMixin, 'wagtailcore.page'),
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LandingPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image')), ('two_columns', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), icon='arrow-right', label='Left column content')), ('right_column', wagtail.core.blocks.StreamBlock((('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())), icon='arrow-right', label='Right column content'))))), ('embedded_video', wagtail.embeds.blocks.EmbedBlock(icon='media'))), blank=True, null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtailmd.utils.MarkdownField()),
('date', models.DateTimeField(default=datetime.datetime.today, verbose_name='Post date')),
('excerpt', wagtailmd.utils.MarkdownField(blank=True, verbose_name='excerpt')),
('categories', modelcluster.fields.ParentalManyToManyField(blank=True, to='blog.BlogCategory')),
('header_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Tag',
fields=[
],
options={
'indexes': [],
'proxy': True,
},
bases=('taggit.tag',),
),
migrations.AddField(
model_name='postpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='blog.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='blogpagetag',
name='content_object',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_tags', to='blog.PostPage'),
),<|fim▁hole|> field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag'),
),
]<|fim▁end|> | migrations.AddField(
model_name='blogpagetag',
name='tag', |
<|file_name|>plugin.py<|end_file_name|><|fim▁begin|># Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
from neutron.plugins.common import constants as pconst
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
def get_port_from_device(self, device):
port_id = re.sub(r"^%s" % const.TAP_DEVICE_PREFIX, "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
@property
def l3_plugin(self):
return manager.NeutronManager.get_service_plugins().get(
pconst.L3_ROUTER_NAT)
<|fim▁hole|> admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers and self.l3_plugin:
routers = []
all_routers = self.l3_plugin.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
if self.l3_plugin:
fl_ips = self.l3_plugin.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
elif 'id' in port:
hostid = porttracker_db.get_port_hostid(context, port['id'])
else:
hostid = None
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
# core plugin: context is top level object
# ml2: keeps context in _plugin_context
self.servers.set_context(getattr(context, '_plugin_context', context))
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "binding",
"extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [securitygroups_rpc.SecurityGroupServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach an L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check and self.l3_plugin:
self.l3_plugin.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
if self.l3_plugin:
router_ids = self.l3_plugin.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
if self.l3_plugin:
# now that we've left db transaction, we are safe to notify
self.l3_plugin.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})<|fim▁end|> | def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True): |
<|file_name|>backend.py<|end_file_name|><|fim▁begin|># gcp xml backend
# Copyright (C) 2012 Jesse van den Kieboom <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject, Gcp
from document import Document
class Backend(GObject.Object, Gcp.Backend):
size = GObject.property(type=int, flags = GObject.PARAM_READABLE)
def __init__(self):
GObject.Object.__init__(self)
self.documents = []
def do_get_property(self, spec):
if spec.name == 'size':
return len(self.documents)
GObject.Object.do_get_property(self, spec)
def do_register_document(self, doc):
d = Document(document=doc)
self.documents.append(d)
d.connect('changed', self.on_document_changed)
return d
def do_unregister_document(self, doc):
doc.disconnect_by_func(self.on_document_changed)
self.documents.remove(doc)
def do_get(self, idx):
return self.documents[idx]
def on_document_changed(self, doc):
doc.update()
# ex:ts=4:et:<|fim▁end|> | #
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
<|file_name|>specialerrors.py<|end_file_name|><|fim▁begin|>import sys
from remoteserver import DirectResultRemoteServer
class SpecialErrors(object):
def continuable(self, message, traceback):
return self._special_error(message, traceback, continuable=True)
def fatal(self, message, traceback):
return self._special_error(message, traceback,
fatal='this wins', continuable=42)
def _special_error(self, message, traceback, continuable=False, fatal=False):<|fim▁hole|>
if __name__ == '__main__':
DirectResultRemoteServer(SpecialErrors(), *sys.argv[1:])<|fim▁end|> | return {'status': 'FAIL', 'error': message, 'traceback': traceback,
'continuable': continuable, 'fatal': fatal} |
<|file_name|>Production5628.java<|end_file_name|><|fim▁begin|>package org.gradle.test.performance.mediummonolithicjavaproject.p281;
public class Production5628 {
private String property0;
public String getProperty0() {
return property0;
}
public void setProperty0(String value) {
property0 = value;
}
private String property1;
public String getProperty1() {
return property1;
}
public void setProperty1(String value) {
property1 = value;
}<|fim▁hole|> public String getProperty2() {
return property2;
}
public void setProperty2(String value) {
property2 = value;
}
private String property3;
public String getProperty3() {
return property3;
}
public void setProperty3(String value) {
property3 = value;
}
private String property4;
public String getProperty4() {
return property4;
}
public void setProperty4(String value) {
property4 = value;
}
private String property5;
public String getProperty5() {
return property5;
}
public void setProperty5(String value) {
property5 = value;
}
private String property6;
public String getProperty6() {
return property6;
}
public void setProperty6(String value) {
property6 = value;
}
private String property7;
public String getProperty7() {
return property7;
}
public void setProperty7(String value) {
property7 = value;
}
private String property8;
public String getProperty8() {
return property8;
}
public void setProperty8(String value) {
property8 = value;
}
private String property9;
public String getProperty9() {
return property9;
}
public void setProperty9(String value) {
property9 = value;
}
}<|fim▁end|> |
private String property2;
|
<|file_name|>analyzeBundle.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import os
bundleFilesDir = 'tmp/bundleSizeDownloads'
yarnLockFile = 'yarn.lock'
packagesFile = 'package.json'
def isDividerLine(line):
# At least 80 chars, all slashes except the last (which is newline). The number is inconsistent for some reason.
return (len(line)>=80
and line.endswith("\n")
and all([c=='/' for c in line[0:-1]]))
def isSpacerLine(line):
# At least 80 chars, starting with "//", ending with "//\n", otherwise all spaces
return (len(line)>=80
and line.startswith("//") and line.endswith("//\n")
and all([c==' ' for c in line[2:-3]]))
assert isDividerLine("////////////////////////////////////////////////////////////////////////////////////\n")
assert isSpacerLine("// //\n")
def readFileLines(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
return lines
def bundleFilesToSizeMap():
sizesByFilename = {}
for filename in os.listdir(bundleFilesDir):
lines = readFileLines('%s/%s' % (bundleFilesDir, filename))
sizesByFilename = {**unpackFile(lines), **sizesByFilename}
return sizesByFilename
def unpackFile(lines):
sizes = {}
currentFileStart = None
currentFileName = None
for i in range(0,len(lines)):
if i+4<len(lines) and isDividerLine(lines[i]) and isSpacerLine(lines[i+1]) and isSpacerLine(lines[i+3]) and isDividerLine(lines[i+4]):
if currentFileName:
fileContents = '\n'.join(lines[currentFileStart:i])
sizes[currentFileName] = len(fileContents)
currentFileStart = i+5
currentFileName = lines[i+2].strip()[2:-2].strip()
if currentFileName:
fileContents = '\n'.join(lines[currentFileStart:i])
sizes[currentFileName] = len(fileContents)
return sizes
def ancestorPaths(filename):
pathComponents = filename.split('/')
return ['.']+['/'.join(pathComponents[0:i]) for i in range(1,len(pathComponents))]
def sumSizesInDirectories(sizesByFilename):
sizesByDirectory = {}
for filename in sizesByFilename:
for path in ancestorPaths(filename):
sizesByDirectory[path] = sizesByDirectory[path]+sizesByFilename[filename] if path in sizesByDirectory else sizesByFilename[filename]
return sizesByDirectory
# Given the name of a yarn lockfile (yarn.lock), produce a dictionary from
# package -> array of dependencies of that package.
# The idea of this is to be able to identify when a package is depended on by
# only one other package, so that we can attribute the size of the depended-on
# package to the package that imported it.
#
#def yarnLockToDependencyGraph(lockfileName):
# dependenciesByPackage = {}
# lockfileLines = readFileLines(lockfileName)
#
# def backtrackToPackageName(lines, i):
# #TODO
# pass
# def forwardOverDependencies(lines, i):
# #TODO
# pass
#
# for i in range(0,len(lines)):
# if lockfileLines[0].strip()=='dependencies:':
# packageName = backtrackToPackageName(lines, i)
# dependencies = forwardOverDependencies(lines, i)
# if packageName in dependencies:
# dependenciesByPackage[packageName] = {**dependencies[packageName], **dependencies}
# else:
# dependenciesByPackage[packageName] = dependencies
def packagesFileToDependencyRoots(packagesFileName):
f = open(packagesFileName, 'r')
packagesJson = json.loads(f.read())
f.close()
return packagesJson[dependencies]
def rightalign(num, width):
return (' ' * (width-len(str(num)))) + str(num)
#def getAdjustedPackageSizes(sizesByDirectory, dependencyRoots, dependencyGraph):
# #TODO
# return {}<|fim▁hole|>sizesByFilename = bundleFilesToSizeMap()
sizesByDirectory = sumSizesInDirectories(sizesByFilename)
for path in sorted(list(sizesByDirectory.keys())):
print("%s %s" % (rightalign(sizesByDirectory[path], 10), path))<|fim▁end|> | |
<|file_name|>struct-literal-in-if.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.<|fim▁hole|>}
impl Foo {
fn hi(&self) -> bool {
true
}
}
fn main() {
if Foo {
x: 3 //~ ERROR expected one of `!`, `.`, `::`, `;`, `{`, `}`, or an operator, found `:`
}.hi() {
println!("yo");
}
}<|fim▁end|> |
struct Foo {
x: int, |
<|file_name|>bitcoin_fa.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fa" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Babylonian</source>
<translation>در مورد Babylonian</translation>
</message>
<message>
<location line="+39"/>
<source><b>Babylonian</b> version</source>
<translation>نسخه Babylonian</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>⏎ ⏎ این نسخه نرم افزار آزمایشی است⏎ ⏎ نرم افزار تحت لیسانس MIT/X11 منتشر شده است. به فایل coping یا آدرس http://www.opensource.org/licenses/mit-license.php. مراجعه شود⏎ ⏎ این محصول شامل نرم افزاری است که با OpenSSL برای استفاده از OpenSSL Toolkit (http://www.openssl.org/) و نرم افزار نوشته شده توسط اریک یانگ ([email protected] ) و UPnP توسط توماس برنارد طراحی شده است.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Babylonian developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>فهرست آدرس</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>برای ویرایش آدرس یا بر چسب دو بار کلیک کنید</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>آدرس جدید ایجاد کنید</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>آدرس انتخاب شده در سیستم تخته رسم گیره دار کپی کنید</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>آدرس جدید</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Babylonian addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>این آدرسها، آدرسهای Babylonian شما برای دریافت وجوه هستند. شما ممکن است آدرسهای متفاوت را به هر گیرنده اختصاص دهید که بتوانید مواردی که پرداخت می کنید را پیگیری نمایید</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>کپی آدرس</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>نمایش &کد QR</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Babylonian address</source>
<translation>پیام را برای اثبات آدرس Babylonian خود امضا کنید</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>امضا و پیام</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>آدرس انتخاب شده در سیستم تخته رسم گیره دا حذف</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>داده ها نوارِ جاری را به فایل انتقال دهید</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Babylonian address</source>
<translation>یک پیام را برای حصول اطمینان از ورود به سیستم با آدرس Babylonian مشخص، شناسایی کنید</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>شناسایی پیام</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>حذف</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Babylonian addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>کپی و برچسب گذاری</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>ویرایش</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>آدرس انتخاب شده در سیستم تخته رسم گیره دار کپی کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Comma separated file (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>خطای صدور</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>تا فایل %1 نمی شود نوشت</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>بر چسب</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>بدون برچسب</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>دیالوگ Passphrase </translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>وارد عبارت عبور</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>عبارت عبور نو</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>تکرار عبارت عبور نو</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>وارد کنید..&lt;br/&gt عبارت عبور نو در پنجره
10 یا بیشتر کاراکتورهای تصادفی استفاده کنید &lt;b&gt لطفا عبارت عبور</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>رمز بندی پنجره</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>این عملیت نیاز عبارت عبور پنجره شما دارد برای رمز گشایی آن</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>تکرار عبارت عبور نو</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>این عملیت نیاز عبارت عبور شما دارد برای رمز بندی آن</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>رمز بندی پنجره</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>تغییر عبارت عبور</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>عبارت عبور نو و قدیم در پنجره وارد کنید</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>تایید رمز گذاری</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR QUARKCOINS</b>!</source>
<translation>هشدار: اگر wallet رمزگذاری شود و شما passphrase را گم کنید شما همه اطلاعات Babylonian را از دست خواهید داد.</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>آیا اطمینان دارید که می خواهید wallet رمزگذاری شود؟</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>هشدار: Caps lock key روشن است</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>تغییر عبارت عبور</translation>
</message>
<message>
<location line="-56"/>
<source>Babylonian will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your quarkcoins from being stolen by malware infecting your computer.</source>
<translation>Biticon هم اکنون بسته میشود تا فرایند رمزگذاری را تمام کند. به خاطر داشته باشید که رمزگذاری کیف پولتان نمیتواند به طور کامل بیتیکونهای شما را در برابر دزدیده شدن توسط بدافزارهایی که رایانه شما را آلوده میکنند، محافظت نماید.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>عبارت عبور نو و قدیم در پنجره وارد کنید</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>تنا موفق رمز بندی پنجره ناشی از خطای داخل شد. پنجره شما مرز بندی نشده است</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>عبارت عبور عرضه تطابق نشد</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>نجره رمز گذار شد</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>اموفق رمز بندی پنجر</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>ناموفق رمز بندی پنجره</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>wallet passphrase با موفقیت تغییر یافت</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>امضا و پیام</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>همگام سازی با شبکه ...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>بررسی اجمالی</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>نمای کلی پنجره نشان بده</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&معاملات</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>نمایش تاریخ معاملات</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>ویرایش لیست آدرسها و بر چسب های ذخیره ای</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>نمایش لیست آدرس ها برای در یافت پر داخت ها</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>خروج</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>خروج از برنامه </translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Babylonian</source>
<translation>نمایش اطلاعات در مورد بیتکویین</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>درباره &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>نمایش اطلاعات درباره Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>تنظیمات...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>رمزگذاری wallet</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>پشتیبان گیری از wallet</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>تغییر Passphrase</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Babylonian address</source>
<translation>سکه ها را به آدرس bitocin ارسال کن</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Babylonian</source>
<translation>انتخابهای پیکربندی را برای Babylonian اصلاح کن</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>نسخه پیشتیبان wallet را به محل دیگر انتقال دهید</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>عبارت عبور رمز گشایی پنجره تغییر کنید</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>اشکال زدایی از صفحه</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>کنسول اشکال زدایی و تشخیص را باز کنید</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>بازبینی پیام</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Babylonian</source>
<translation>یت کویین </translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>wallet</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Babylonian</source>
<translation>در مورد Babylonian</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&نمایش/ عدم نمایش</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Babylonian addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Babylonian addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>فایل</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>تنظیمات</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>کمک</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>نوار ابزار زبانه ها</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>آزمایش شبکه</translation>
</message>
<message>
<location line="+47"/>
<source>Babylonian client</source>
<translation>مشتری Babylonian</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Babylonian network</source>
<translation><numerusform>در صد ارتباطات فعال بیتکویین با شبکه %n</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>تا تاریخ</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>ابتلا به بالا</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>هزینه تراکنش را تایید کنید</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>معامله ارسال شده</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>معامله در یافت شده</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>تاریخ %1
مبلغ%2
نوع %3
آدرس %4</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>مدیریت URI</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Babylonian address or malformed URI parameters.</source>
<translation>URI قابل تحلیل نیست. این خطا ممکن است به دلیل ادرس Babylonian اشتباه یا پارامترهای اشتباه URI رخ داده باشد</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>زمایش شبکهه</translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>زمایش شبکه</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Babylonian can no longer continue safely and will quit.</source>
<translation>خطا روی داده است. Babylonian نمی تواند بدون مشکل ادامه دهد و باید بسته شود</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>پیام شبکه</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>اصلاح آدرس</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>بر چسب</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>بر چسب با دفتر آدرس ورود مرتبط است</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>آدرس</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>آدرس با دفتر آدرس ورودی مرتبط است. این فقط در مورد آدرسهای ارسال شده است</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>آدرس در یافت نو</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>آدرس ارسال نو</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>اصلاح آدرس در یافت</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>اصلاح آدرس ارسال</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>%1آدرس وارد شده دیگر در دفتر آدرس است</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Babylonian address.</source>
<translation>آدرس وارد شده %1 یک ادرس صحیح Babylonian نیست</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>رمز گشایی پنجره امکان پذیر نیست</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>کلید نسل جدید ناموفق است</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Babylonian-Qt</source>
<translation>Babylonian-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>نسخه</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>ستفاده :</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>انتخابها برای خطوط دستور command line</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>انتخابهای UI </translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>زبان را تنظیم کنید برای مثال "de_DE" (پیش فرض: system locale)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>شروع حد اقل</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>نمایش صفحه splash در STARTUP (پیش فرض:1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>اصلی</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>اصلی</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>دستمزد&پر داخت معامله</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Babylonian after logging in to the system.</source>
<translation>در زمان ورود به سیستم به صورت خودکار Babylonian را اجرا کن</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Babylonian on system login</source>
<translation>اجرای Babylonian در زمان ورود به سیستم</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>شبکه</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Babylonian client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>اتوماتیک باز کردن بندر بیتکویین در روتر . این فقط در مواردی می باشد که روتر با کمک یو پ ن پ کار می کند</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>درگاه با استفاده از</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Babylonian network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>اتصال به شبکه Babylonian از طریق پراکسی ساکس (برای مثال وقتی از طریق نرم افزار TOR متصل می شوید)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>اتصال با پراکسی SOCKS</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>پراکسی و آی.پی.</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>درس پروکسی</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>درگاه</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>درگاه پراکسی (مثال 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS و نسخه</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>نسخه SOCKS از پراکسی (مثال 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>صفحه</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>tray icon را تنها بعد از کوچک کردن صفحه نمایش بده</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>حد اقل رساندن در جای نوار ابزار ها</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>حد اقل رساندن در جای خروج بر نامه وقتیکه پنجره بسته است.وقتیکه این فعال است برنامه خاموش می شود بعد از انتخاب دستور خاموش در منیو</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>کوچک کردن صفحه در زمان بستن</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>نمایش</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>میانجی کاربر و زبان</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Babylonian.</source>
<translation>زبان میانجی کاربر می تواند در اینجا تنظیم شود. این تنظیمات بعد از شروع دوباره RESTART در Babylonian اجرایی خواهند بود.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>واحد برای نمایش میزان وجوه در:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>بخش فرعی پیش فرض را برای نمایش میانجی و زمان ارسال سکه ها مشخص و انتخاب نمایید</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Babylonian addresses in the transaction list or not.</source>
<translation>تا آدرسهای bITCOIN در فهرست تراکنش نمایش داده شوند یا نشوند.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>نمایش آدرسها در فهرست تراکنش</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>تایید</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>رد</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>انجام</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>پیش فرض</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>هشدار</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Babylonian.</source>
<translation>این تنظیمات پس از اجرای دوباره Babylonian اعمال می شوند</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>آدرس پراکسی داده شده صحیح نیست</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>تراز</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Babylonian network after a connection is established, but this process has not completed yet.</source>
<translation>اطلاعات نمایش داده شده روزآمد نیستند.wallet شما به صورت خودکار با شبکه Babylonian بعد از برقراری اتصال روزآمد می شود اما این فرایند هنوز کامل نشده است.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>راز:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>تایید نشده</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>wallet</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>نابالغ</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>بالانس/تتمه حساب استخراج شده، نابالغ است /تکمیل نشده است</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation>اخرین معاملات&lt</translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>تزار جاری شما</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>تعداد معاملات که تایید شده ولی هنوز در تزار جاری شما بر شمار نرفته است</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>روزآمد نشده</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start Babylonian: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>دیالوگ QR CODE</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>درخواست پرداخت</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>مقدار:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>برچسب:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>پیام</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&ذخیره به عنوان...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>خطا در زمان رمزدار کردن URI در کد QR</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>میزان وجه وارد شده صحیح نیست، لطفا بررسی نمایید</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>URI ذکر شده بسیار طولانی است، متن برچسب/پیام را کوتاه کنید</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>ذخیره کد QR</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>تصاویر با فرمت PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>نام مشتری</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>-</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>نسخه مشتری</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>اطلاعات</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>استفاده از نسخه OPENSSL</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>زمان آغاز STARTUP</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>شبکه</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>تعداد اتصالات</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>در testnetکها</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>زنجیره بلاک</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>تعداد کنونی بلاکها</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>تعداد تخمینی بلاکها</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>زمان آخرین بلاک</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>باز کردن</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>گزینه های command-line</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Babylonian-Qt help message to get a list with possible Babylonian command-line options.</source>
<translation>پیام راهنمای Babylonian-Qt را برای گرفتن فهرست گزینه های command-line نشان بده</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>نمایش</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>کنسول</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>ساخت تاریخ</translation>
</message>
<message>
<location line="-104"/>
<source>Babylonian - Debug window</source>
<translation>صفحه اشکال زدایی Babylonian </translation>
</message>
<message>
<location line="+25"/>
<source>Babylonian Core</source>
<translation> هسته Babylonian </translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>فایلِ لاگِ اشکال زدایی</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Babylonian debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>فایلِ لاگِ اشکال زدایی Babylonian را از دایرکتوری جاری داده ها باز کنید. این عملیات ممکن است برای فایلهای لاگِ حجیم طولانی شود.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>پاکسازی کنسول</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Babylonian RPC console.</source>
<translation>به کنسول Babylonian RPC خوش آمدید</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>دکمه های بالا و پایین برای مرور تاریخچه و Ctrl-L برای پاکسازی صفحه</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>با تایپ عبارت HELP دستورهای در دسترس را مرور خواهید کرد</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>ارسال سکه ها</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>ارسال چندین در یافت ها فورا</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>اضافه کردن دریافت کننده</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>پاک کردن تمام ستونهای تراکنش</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>پاکسازی همه</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>تزار :</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 بتس</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>عملیت دوم تایید کنید</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&;ارسال</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation>(%3) تا <b>%1</b> درصد%2</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>ارسال سکه ها تایید کنید</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation> %1شما متماینید که می خواهید 1% ارسال کنید ؟</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>و</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>آدرس گیرنده نادرست است، لطفا دوباره بررسی کنید.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>مبلغ پر داخت باید از 0 بیشتر باشد </translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>میزان وجه از بالانس/تتمه حساب شما بیشتر است</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>کل میزان وجه از بالانس/تتمه حساب شما بیشتر می شود وقتی %1 هزینه تراکنش نیز به ین میزان افزوده می شود</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>آدرس تکراری یافت شده است، در زمان انجام عملیات به هر آدرس تنها یکبار می توانید اطلاعات ارسال کنید</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>خطا: تراکنش تایید نشد. این پیام زمانی روی می دهد که مقداری از سکه های WALLET شما استفاده شده اند برای مثال اگر شما از WALLET.DAT استفاده کرده اید، ممکن است سکه ها استفاده شده باشند اما در اینجا نمایش داده نشوند</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>تراز</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>A&مبلغ :</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>به&پر داخت :</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>برای آدرس بر پسب وارد کنید که در دفتر آدرس اضافه شود</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&بر چسب </translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>اآدرسن ازدفتر آدرس انتخاب کنید</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>آدرس از تخته رسم گیره دار پست کنید </translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>بر داشتن این در یافت کننده</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Babylonian address (e.g. Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>آدرس بیتکویین وارد کنید (bijvoorbeeld: Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>امضا - امضا کردن /شناسایی یک پیام</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&امضای پیام</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>شما می توانید پیامها را با آدرس خودتان امضا نمایید تا ثابت شود متعلق به شما هستند. مواظب باشید تا چیزی که بدان مطمئن نیستنید را امضا نکنید زیرا حملات فیشینگ در زمان ورود شما به سیستم فریبنده هستند. تنها مواردی را که حاوی اطلاعات دقیق و قابل قبول برای شما هستند را امضا کنید</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>آدرس برای امضا کردن پیام با (برای مثال Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>یک آدرس را از فهرست آدرسها انتخاب کنید</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>آدرس از تخته رسم گیره دار پست کنید </translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>پیامی را که میخواهید امضا کنید در اینجا وارد کنید</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>این امضا را در system clipboard کپی کن</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Babylonian address</source>
<translation>پیام را برای اثبات آدرس Babylonian خود امضا کنید</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>تنظیم دوباره تمامی فیلدهای پیام</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>پاکسازی همه</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>تایید پیام</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>آدرس/پیام خود را وارد کنید (مطمئن شوید که فاصله بین خطوط، فاصله ها، تب ها و ... را دقیقا کپی می کنید) و سپس امضا کنید تا پیام تایید شود. مراقب باشید که پیام را بیشتر از مطالب درون امضا مطالعه نمایید تا فریب شخص سوم/دزدان اینترنتی را نخورید.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>آدرس برای امضا کردن پیام با (برای مثال Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Babylonian address</source>
<translation>پیام را برای اطمنان از ورود به سیستم با آدرس Babylonian مشخص خود،تایید کنید</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>تنظیم دوباره تمامی فیلدهای پیام تایید شده</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Babylonian address (e.g. Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>آدرس بیتکویین وارد کنید (bijvoorbeeld: Qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>با کلیک بر "امضای پیام" شما یک امضای جدید درست می کنید</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Babylonian signature</source>
<translation>امضای BITOCOIN خود را وارد کنید</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>آدرس وارد شده صحیح نیست</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>اطفا آدرس را بررسی کرده و دوباره امتحان کنید</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>آدرس وارد شده با کلید وارد شده مرتبط نیست</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>قفل کردن wallet انجام نشد</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>کلید شخصی برای آدرس وارد شده در دسترس نیست</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>پیام امضا کردن انجام نشد</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>پیام امضا شد</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>امضا نمی تواند رمزگشایی شود</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>لطفا امضا را بررسی و دوباره تلاش نمایید</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>امضا با تحلیلِ پیام مطابقت ندارد</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>عملیات شناسایی پیام انجام نشد</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>پیام شناسایی شد</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Babylonian developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>آزمایش شبکه</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>باز کردن تا%1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1 آفلاین</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1 تایید نشده </translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>ایید %1 </translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>وضعیت</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>انتشار از طریق n% گره
انتشار از طریق %n گره</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>تاریخ </translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>منبع</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>تولید شده</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>فرستنده</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>گیرنده</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>آدرس شما</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>برچسب</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>بدهی </translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>بلوغ در n% از بیشتر بلاکها
بلوغ در %n از بیشتر بلاکها</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>غیرقابل قبول</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>اعتبار</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>هزینه تراکنش</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>هزینه خالص</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>پیام</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>نظر</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>شناسه کاربری برای تراکنش</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 240 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>سکه های ایجاد شده باید 240 بلاک را قبل از استفاده بالغ کنند. در هنگام ایجاد بلاک، آن بلاک در شبکه منتشر می شود تا به زنجیره بلاکها بپیوندد. اگر در زنجیره قرار نگیرد، پیام وضعیت به غیرقابل قبول تغییر می بپیابد و قابل استفاده نیست. این مورد معمولا زمانی پیش می آید که گره دیگری به طور همزمان بلاکی را با فاصل چند ثانیه ای از شما ایجاد کند.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>اشکال زدایی طلاعات</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>تراکنش</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>درونداد</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>صحیح</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>نادرست</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>هنوز با مو فقیت ارسال نشده</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>مشخص نیست </translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>جزییات معاملات</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>در این قاب شیشه توصیف دقیق معامله نشان می شود</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>تاریخ</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>نوع</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>ایل جدا </translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>از شده تا 1%1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>افلایین (%1)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>تایید نشده (%1/%2)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>تایید شده (%1)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>بالانس/تتمه حساب استخراج شده زمانی که %n از بیشتر بلاکها بالغ شدند در دسترس خواهد بود
بالانس/تتمه حساب استخراج شده زمانی که n% از بیشتر بلاکها بالغ شدند در دسترس خواهد بود</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>این بلوک از دیگر گره ها در یافت نشده بدین دلیل شاید قابل قابول نیست</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>تولید شده ولی قبول نشده</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>در یافت با :</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>دریافتی از</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>ارسال به :</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>پر داخت به خودتان</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>استخراج</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(کاربرد ندارد)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>وضعیت معالمه . عرصه که تعداد تایید نشان می دهد</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>تاریخ و ساعت در یافت معامله</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>نوع معاملات</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>آدرس مقصود معاملات </translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>مبلغ از تزار شما خارج یا وارد شده</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>همه</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>امروز</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>این هفته</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>این ماه</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>ماه گذشته</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>امسال</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>محدوده </translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>در یافت با</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>ارسال به</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>به خودتان </translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>استخراج</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>یگر </translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>برای جستوجو نشانی یا برچسب را وارد کنید</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>حد اقل مبلغ </translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>کپی آدرس </translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>کپی بر چسب</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>روگرفت مقدار</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>اصلاح بر چسب</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>جزئیات تراکنش را نمایش بده</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>صادرات تاریخ معامله</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Comma فایل جدا </translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>تایید شده</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>تاریخ </translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>نوع </translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>ر چسب</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>ایل جدا </translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>مبلغ</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>آی دی</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>خطای صادرت</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>تا فایل %1 نمی شود نوشت</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>>محدوده</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>به</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>ارسال سکه ها</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>داده ها نوارِ جاری را به فایل انتقال دهید</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>Babylonian-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Babylonian version</source>
<translation>سخه بیتکویین</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>ستفاده :</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or quarkcoind</source>
<translation>ارسال فرمان به سرور یا باتکویین</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>لیست فومان ها</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>کمک برای فرمان </translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>تنظیمات</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: Babylonian.conf)</source>
<translation>(: Babylonian.confپیش فرض: )فایل تنظیمی خاص </translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: quarkcoind.pid)</source>
<translation>(quarkcoind.pidپیش فرض : ) فایل پید خاص</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>دایرکتور اطلاعاتی خاص</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>سایز کَش بانک داده را بر حسب مگابایت تنظیم کنید (پیش فرض:25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 8333 or testnet: 18333)</source>
<translation>برای اتصالات به <port> (پیشفرض: 8333 یا تستنت: 18333) گوش کنید</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>حداکثر <n> اتصال با همکاران برقرار داشته باشید (پیشفرض: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>اتصال به گره برای دریافت آدرسهای قرینه و قطع اتصال</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>آدرس عمومی خود را ذکر کنید</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>آستانه برای قطع ارتباط با همکاران بدرفتار (پیشفرض: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>مدت زمان به ثانیه برای جلوگیری از همکاران بدرفتار برای اتصال دوباره (پیشفرض: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>در زمان تنظیم درگاه RPX %u در فهرست کردن %s اشکالی رخ داده است</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 8332 or testnet: 18332)</source>
<translation>( 8332پیش فرض :) &lt;poort&gt; JSON-RPC شنوایی برای ارتباطات</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>JSON-RPC قابل فرمانها و</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>اجرای در پس زمینه به عنوان شبح و قبول فرمان ها</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>استفاده شبکه آزمایش</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>پذیرش اتصالات از بیرون (پیش فرض:1 بدون پراکسی یا اتصال)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=quarkcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Babylonian Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Babylonian is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>حجم حداکثر تراکنشهای با/کم اهمیت را به بایت تنظیم کنید (پیش فرض:27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>هشدار:paytxfee بسیار بالا تعریف شده است! این هزینه تراکنش است که باید در زمان ارسال تراکنش بپردازید</translation>
</message>
<message><|fim▁hole|> <source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>هشدار: تراکنش نمایش داده شده ممکن است صحیح نباشد! شما/یا یکی از گره ها به روزآمد سازی نیاز دارید </translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Babylonian will not work properly.</source>
<translation>هشدار: لطفا زمان و تاریخ رایانه خود را تصحیح نمایید! اگر ساعت رایانه شما اشتباه باشد Babylonian ممکن است صحیح کار نکند</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>بستن گزینه ایجاد</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>تنها در گره (های) مشخص شده متصل شوید</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>آدرس آی.پی. خود را شناسایی کنید (پیش فرض:1 در زمان when listening وno -externalip)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>شنیدن هر گونه درگاه انجام پذیر نیست. ازlisten=0 برای اینکار استفاده کیند.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>قرینه ها را برای جستجوی DNS بیاب (پیش فرض: 1 مگر در زمان اتصال)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>آدرس نرم افزار تور غلط است %s</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>حداکثر بافر دریافت شده بر اساس اتصال <n>* 1000 بایت (پیش فرض:5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>حداکثر بافر دریافت شده بر اساس اتصال <n>* 1000 بایت (پیش فرض:1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>تنها =به گره ها در شبکه متصا شوید <net> (IPv4, IPv6 or Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>برونداد اطلاعات اشکال زدایی اضافی. گزینه های اشکال زدایی دیگر رفع شدند</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>برونداد اطلاعات اشکال زدایی اضافی برای شبکه</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>به خروجی اشکالزدایی برچسب زمان بزنید</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Babylonian Wiki for SSL setup instructions)</source>
<translation>گزینه ssl (به ویکیquarkcoin برای راهنمای راه اندازی ssl مراجعه شود)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>نسخه ای از پراکسی ساکس را برای استفاده انتخاب کنید (4-5 پیش فرض:5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>اطلاعات ردگیری/اشکالزدایی را به جای فایل لاگ اشکالزدایی به کنسول بفرستید</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>اطلاعات ردگیری/اشکالزدایی را به اشکالزدا بفرستید</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>حداکثر سایز بلاک بر اساس بایت تنظیم شود (پیش فرض: 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>حداقل سایز بلاک بر اساس بایت تنظیم شود (پیش فرض: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>فایل debug.log را در startup مشتری کوچک کن (پیش فرض:1 اگر اشکال زدایی روی نداد)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>(میلی ثانیه )فاصله ارتباط خاص</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>از UPnP برای شناسایی درگاه شنیداری استفاده کنید (پیش فرض:0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>از UPnP برای شناسایی درگاه شنیداری استفاده کنید (پیش فرض:1 در زمان شنیدن)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>برای دستیابی به سرویس مخفیانه نرم افزار تور از پراکسی استفاده کنید (پیش فرض:same as -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC شناسه برای ارتباطات</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>هشدار: این نسخه قدیمی است، روزآمدسازی مورد نیاز است</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC عبارت عبور برای ارتباطات</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>از آدرس آی پی خاص JSON-RPC قبول ارتباطات</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>(127.0.0.1پیش فرض: ) &lt;ip&gt; دادن فرمانها برای استفاده گره ها روی</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>زمانی که بهترین بلاک تغییر کرد، دستور را اجرا کن (%s در cmd با block hash جایگزین شده است)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>wallet را به جدیدترین فرمت روزآمد کنید</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation> (100پیش فرض:)&lt;n&gt; گذاشتن اندازه کلید روی </translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>اسکان مجدد زنجیر بلوکها برای گم والت معامله</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>JSON-RPCبرای ارتباطات استفاده کنید OpenSSL (https)</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation> (server.certپیش فرض: )گواهی نامه سرور</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>(server.pemپیش فرض: ) کلید خصوصی سرور</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>رمز های قابل قبول( TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>پیام کمکی</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>امکان اتصال به %s از این رایانه وجود ندارد ( bind returned error %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>اتصال از طریق پراکسی ساکس</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>به DNS اجازه بده تا برای addnode ، seednode و اتصال جستجو کند</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>بار گیری آدرس ها</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>خطا در بارگیری wallet.dat: کیف پول خراب شده است</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Babylonian</source>
<translation>خطا در بارگیری wallet.dat: کیف پول به ویرایش جدیدتری از Biticon نیاز دارد</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Babylonian to complete</source>
<translation>سلام</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>خطا در بارگیری wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>آدرس پراکسی اشتباه %s</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>شبکه مشخص شده غیرقابل شناسایی در onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>نسخه پراکسی ساکس غیرقابل شناسایی درخواست شده است: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>آدرس قابل اتصال- شناسایی نیست %s</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>آدرس خارجی قابل اتصال- شناسایی نیست %s</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>میزان وجه اشتباه برای paytxfee=<میزان وجه>: %s</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>میزان وجه اشتباه</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>بود جه نا کافی </translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>بار گیری شاخص بلوک</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>به اتصال یک گره اضافه کنید و اتصال را باز نگاه دارید</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Babylonian is probably already running.</source>
<translation>اتصال به %s از این رایانه امکان پذیر نیست. Babylonian احتمالا در حال اجراست.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>پر داجت برای هر کیلو بیت برای اضافه به معامله ارسال</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>بار گیری والت</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>امکان تنزل نسخه در wallet وجود ندارد</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>آدرس پیش فرض قابل ذخیره نیست</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>اسکان مجدد</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>بار گیری انجام شده است</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>برای استفاده از %s از انتخابات</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>خطا</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>%s، شما باید یک rpcpassword را در فایل پیکربندی تنظیم کنید :⏎%s⏎ اگر فایل ایجاد نشد، یک فایل فقط متنی ایجاد کنید.
</translation>
</message>
</context>
</TS><|fim▁end|> | <location line="+3"/> |
<|file_name|>TillDeaf.java<|end_file_name|><|fim▁begin|>package org.bitspilani.pearl;
import android.app.Activity;
import android.os.Bundle;
import android.view.Window;
import android.widget.TextView;
public class TillDeaf extends Activity {
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
getWindow().requestFeature(Window.FEATURE_ACTION_BAR);
getActionBar().hide();
setContentView(R.layout.event1);
TextView tv = (TextView)findViewById(R.id.title);
tv.setText(R.string.tillDeaf);
TextView tv1 = (TextView)findViewById(R.id.description);<|fim▁hole|><|fim▁end|> | tv1.setText(R.string.tilldeaf_ds);
}
} |
<|file_name|>platform.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
let _isWindows = false;
let _isMacintosh = false;
let _isLinux = false;
let _isNative = false;<|fim▁hole|>let _language: string | undefined = undefined;
let _translationsConfigFile: string | undefined = undefined;
interface NLSConfig {
locale: string;
availableLanguages: { [key: string]: string; };
_translationsConfigFile: string;
}
export interface IProcessEnvironment {
[key: string]: string;
}
interface INodeProcess {
platform: string;
env: IProcessEnvironment;
getuid(): number;
nextTick: Function;
}
declare let process: INodeProcess;
declare let global: any;
interface INavigator {
userAgent: string;
language: string;
}
declare let navigator: INavigator;
declare let self: any;
export const LANGUAGE_DEFAULT = 'en';
// OS detection
if (typeof process === 'object' && typeof process.nextTick === 'function' && typeof process.platform === 'string') {
_isWindows = (process.platform === 'win32');
_isMacintosh = (process.platform === 'darwin');
_isLinux = (process.platform === 'linux');
_locale = LANGUAGE_DEFAULT;
_language = LANGUAGE_DEFAULT;
const rawNlsConfig = process.env['VSCODE_NLS_CONFIG'];
if (rawNlsConfig) {
try {
const nlsConfig: NLSConfig = JSON.parse(rawNlsConfig);
const resolved = nlsConfig.availableLanguages['*'];
_locale = nlsConfig.locale;
// VSCode's default language is 'en'
_language = resolved ? resolved : LANGUAGE_DEFAULT;
_translationsConfigFile = nlsConfig._translationsConfigFile;
} catch (e) {
}
}
_isNative = true;
} else if (typeof navigator === 'object') {
const userAgent = navigator.userAgent;
_isWindows = userAgent.indexOf('Windows') >= 0;
_isMacintosh = userAgent.indexOf('Macintosh') >= 0;
_isLinux = userAgent.indexOf('Linux') >= 0;
_isWeb = true;
_locale = navigator.language;
_language = _locale;
}
export const enum Platform {
Web,
Mac,
Linux,
Windows
}
export function PlatformToString(platform: Platform) {
switch (platform) {
case Platform.Web: return 'Web';
case Platform.Mac: return 'Mac';
case Platform.Linux: return 'Linux';
case Platform.Windows: return 'Windows';
}
}
let _platform: Platform = Platform.Web;
if (_isNative) {
if (_isMacintosh) {
_platform = Platform.Mac;
} else if (_isWindows) {
_platform = Platform.Windows;
} else if (_isLinux) {
_platform = Platform.Linux;
}
}
export const isWindows = _isWindows;
export const isMacintosh = _isMacintosh;
export const isLinux = _isLinux;
export const isNative = _isNative;
export const isWeb = _isWeb;
export const platform = _platform;
export function isRootUser(): boolean {
return _isNative && !_isWindows && (process.getuid() === 0);
}
/**
* The language used for the user interface. The format of
* the string is all lower case (e.g. zh-tw for Traditional
* Chinese)
*/
export const language = _language;
/**
* The OS locale or the locale specified by --locale. The format of
* the string is all lower case (e.g. zh-tw for Traditional
* Chinese). The UI is not necessarily shown in the provided locale.
*/
export const locale = _locale;
/**
* The translatios that are available through language packs.
*/
export const translationsConfigFile = _translationsConfigFile;
const _globals = (typeof self === 'object' ? self : typeof global === 'object' ? global : {} as any);
export const globals: any = _globals;
let _setImmediate: ((callback: (...args: any[]) => void) => number) | null = null;
export function setImmediate(callback: (...args: any[]) => void): number {
if (_setImmediate === null) {
if (globals.setImmediate) {
_setImmediate = globals.setImmediate.bind(globals);
} else if (typeof process !== 'undefined' && typeof process.nextTick === 'function') {
_setImmediate = process.nextTick.bind(process);
} else {
_setImmediate = globals.setTimeout.bind(globals);
}
}
return _setImmediate!(callback);
}
export const enum OperatingSystem {
Windows = 1,
Macintosh = 2,
Linux = 3
}
export const OS = (_isMacintosh ? OperatingSystem.Macintosh : (_isWindows ? OperatingSystem.Windows : OperatingSystem.Linux));
export const enum AccessibilitySupport {
/**
* This should be the browser case where it is not known if a screen reader is attached or no.
*/
Unknown = 0,
Disabled = 1,
Enabled = 2
}<|fim▁end|> | let _isWeb = false;
let _locale: string | undefined = undefined; |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Trait Resolution. See the [rustc-dev-guide] for more information on how this works.
//!
//! [rustc-dev-guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
mod engine;
pub mod error_reporting;
mod project;
mod structural_impls;
pub mod util;
use rustc_hir as hir;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::{self, Const, Ty};
use rustc_span::Span;
pub use self::FulfillmentErrorCode::*;
pub use self::ImplSource::*;
pub use self::ObligationCauseCode::*;
pub use self::SelectionError::*;
pub use self::engine::{TraitEngine, TraitEngineExt};
pub use self::project::MismatchedProjectionTypes;
pub(crate) use self::project::UndoLog;
pub use self::project::{
Normalized, NormalizedTy, ProjectionCache, ProjectionCacheEntry, ProjectionCacheKey,
ProjectionCacheStorage, Reveal,
};
pub use rustc_middle::traits::*;
/// An `Obligation` represents some trait reference (e.g., `i32: Eq`) for
/// which the "impl_source" must be found. The process of finding an "impl_source" is
/// called "resolving" the `Obligation`. This process consists of
/// either identifying an `impl` (e.g., `impl Eq for i32`) that
/// satisfies the obligation, or else finding a bound that is in
/// scope. The eventual result is usually a `Selection` (defined below).
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct Obligation<'tcx, T> {
/// The reason we have to prove this thing.
pub cause: ObligationCause<'tcx>,
/// The environment in which we should prove this thing.
pub param_env: ty::ParamEnv<'tcx>,
/// The thing we are trying to prove.
pub predicate: T,
/// If we started proving this as a result of trying to prove
/// something else, track the total depth to ensure termination.
/// If this goes over a certain threshold, we abort compilation --
/// in such cases, we can not say whether or not the predicate
/// holds for certain. Stupid halting problem; such a drag.
pub recursion_depth: usize,
}
pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
// `PredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(PredicateObligation<'_>, 32);
pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
pub type Selection<'tcx> = ImplSource<'tcx, PredicateObligation<'tcx>>;
pub struct FulfillmentError<'tcx> {
pub obligation: PredicateObligation<'tcx>,
pub code: FulfillmentErrorCode<'tcx>,
/// Diagnostics only: we opportunistically change the `code.span` when we encounter an
/// obligation error caused by a call argument. When this is the case, we also signal that in
/// this field to ensure accuracy of suggestions.
pub points_at_arg_span: bool,
/// Diagnostics only: the 'root' obligation which resulted in
/// the failure to process `obligation`. This is the obligation
/// that was initially passed to `register_predicate_obligation`
pub root_obligation: PredicateObligation<'tcx>,
}
#[derive(Clone)]
pub enum FulfillmentErrorCode<'tcx> {
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
CodeSubtypeError(ExpectedFound<Ty<'tcx>>, TypeError<'tcx>), // always comes from a SubtypePredicate
CodeConstEquateError(ExpectedFound<&'tcx Const<'tcx>>, TypeError<'tcx>),
CodeAmbiguity,
}
impl<'tcx, O> Obligation<'tcx, O> {
pub fn new(
cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
predicate: O,
) -> Obligation<'tcx, O> {
Obligation { cause, param_env, recursion_depth: 0, predicate }
}
pub fn with_depth(
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
predicate: O,
) -> Obligation<'tcx, O> {
Obligation { cause, param_env, recursion_depth, predicate }
}<|fim▁hole|> body_id: hir::HirId,
param_env: ty::ParamEnv<'tcx>,
trait_ref: O,
) -> Obligation<'tcx, O> {
Obligation::new(ObligationCause::misc(span, body_id), param_env, trait_ref)
}
pub fn with<P>(&self, value: P) -> Obligation<'tcx, P> {
Obligation {
cause: self.cause.clone(),
param_env: self.param_env,
recursion_depth: self.recursion_depth,
predicate: value,
}
}
}
impl<'tcx> FulfillmentError<'tcx> {
pub fn new(
obligation: PredicateObligation<'tcx>,
code: FulfillmentErrorCode<'tcx>,
root_obligation: PredicateObligation<'tcx>,
) -> FulfillmentError<'tcx> {
FulfillmentError { obligation, code, points_at_arg_span: false, root_obligation }
}
}
impl<'tcx> TraitObligation<'tcx> {
pub fn self_ty(&self) -> ty::Binder<'tcx, Ty<'tcx>> {
self.predicate.map_bound(|p| p.self_ty())
}
}<|fim▁end|> |
pub fn misc(
span: Span, |
<|file_name|>comp-1119.component.spec.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { Comp1119Component } from './comp-1119.component';
describe('Comp1119Component', () => {
let component: Comp1119Component;
let fixture: ComponentFixture<Comp1119Component>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ Comp1119Component ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(Comp1119Component);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});<|fim▁hole|>});<|fim▁end|> | |
<|file_name|>zelos.js<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright 2018 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview Zelos theme.
*/
'use strict';
/**
* Zelos theme.
* @namespace Blockly.Themes.Zelos
*/
goog.module('Blockly.Themes.Zelos');
const {Theme} = goog.require('Blockly.Theme');
const defaultBlockStyles = {
'colour_blocks': {
'colourPrimary': '#CF63CF',
'colourSecondary': '#C94FC9',
'colourTertiary': '#BD42BD',
},
'list_blocks': {
'colourPrimary': '#9966FF',
'colourSecondary': '#855CD6',
'colourTertiary': '#774DCB',
},
'logic_blocks': {
'colourPrimary': '#4C97FF',
'colourSecondary': '#4280D7',
'colourTertiary': '#3373CC',
},
'loop_blocks': {
'colourPrimary': '#0fBD8C',<|fim▁hole|> 'math_blocks': {
'colourPrimary': '#59C059',
'colourSecondary': '#46B946',
'colourTertiary': '#389438',
},
'procedure_blocks': {
'colourPrimary': '#FF6680',
'colourSecondary': '#FF4D6A',
'colourTertiary': '#FF3355',
},
'text_blocks': {
'colourPrimary': '#FFBF00',
'colourSecondary': '#E6AC00',
'colourTertiary': '#CC9900',
},
'variable_blocks': {
'colourPrimary': '#FF8C1A',
'colourSecondary': '#FF8000',
'colourTertiary': '#DB6E00',
},
'variable_dynamic_blocks': {
'colourPrimary': '#FF8C1A',
'colourSecondary': '#FF8000',
'colourTertiary': '#DB6E00',
},
'hat_blocks': {
'colourPrimary': '#4C97FF',
'colourSecondary': '#4280D7',
'colourTertiary': '#3373CC',
'hat': 'cap',
},
};
const categoryStyles = {
'colour_category': {'colour': '#CF63CF'},
'list_category': {'colour': '#9966FF'},
'logic_category': {'colour': '#4C97FF'},
'loop_category': {'colour': '#0fBD8C'},
'math_category': {'colour': '#59C059'},
'procedure_category': {'colour': '#FF6680'},
'text_category': {'colour': '#FFBF00'},
'variable_category': {'colour': '#FF8C1A'},
'variable_dynamic_category': {'colour': '#FF8C1A'},
};
/**
* Zelos theme.
* @type {Theme}
* @alias Blockly.Themes.Zelos
*/
const Zelos = new Theme('zelos', defaultBlockStyles, categoryStyles);
exports.Zelos = Zelos;<|fim▁end|> | 'colourSecondary': '#0DA57A',
'colourTertiary': '#0B8E69',
}, |
<|file_name|>s3.py<|end_file_name|><|fim▁begin|>import os
import mimetypes
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
try:
from S3 import AWSAuthConnection, QueryStringAuthGenerator, CallingFormat
except ImportError:
raise ImproperlyConfigured("Could not load amazon's S3 bindings.\nSee "
"http://developer.amazonwebservices.com/connect/entry.jspa?externalID=134")
ACCESS_KEY_NAME = getattr(settings, 'AWS_S3_ACCESS_KEY_ID', getattr(settings, 'AWS_ACCESS_KEY_ID', None))
SECRET_KEY_NAME = getattr(settings, 'AWS_S3_SECRET_ACCESS_KEY', getattr(settings, 'AWS_SECRET_ACCESS_KEY', None))
HEADERS = getattr(settings, 'AWS_HEADERS', {})
DEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read') #access control policy (private, or public-read)
QUERYSTRING_ACTIVE = getattr(settings, 'AWS_QUERYSTRING_ACTIVE', False)
QUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 60)
SECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', False)
BUCKET_PREFIX = getattr(settings, 'AWS_BUCKET_PREFIX', '')
CALLING_FORMAT = getattr(settings, 'AWS_CALLING_FORMAT', CallingFormat.PATH)
PRELOAD_METADATA = getattr(settings, 'AWS_PRELOAD_METADATA', False)
<|fim▁hole|> 'text/css',
'application/javascript',
'application/x-javascript'
))
if IS_GZIPPED:
from gzip import GzipFile
class S3Storage(Storage):
"""Amazon Simple Storage Service"""
def __init__(self, bucket=settings.AWS_STORAGE_BUCKET_NAME,
access_key=None, secret_key=None, acl=DEFAULT_ACL,
calling_format=CALLING_FORMAT, encrypt=False,
gzip=IS_GZIPPED, gzip_content_types=GZIP_CONTENT_TYPES,
preload_metadata=PRELOAD_METADATA):
warnings.warn(
"The s3 backend is deprecated and will be removed in version 1.2. "
"Use the s3boto backend instead.",
PendingDeprecationWarning
)
self.bucket = bucket
self.acl = acl
self.encrypt = encrypt
self.gzip = gzip
self.gzip_content_types = gzip_content_types
self.preload_metadata = preload_metadata
if encrypt:
try:
import ezPyCrypto
except ImportError:
raise ImproperlyConfigured("Could not load ezPyCrypto.\nSee "
"http://www.freenet.org.nz/ezPyCrypto/ to install it.")
self.crypto_key = ezPyCrypto.key
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = AWSAuthConnection(access_key, secret_key,
calling_format=calling_format)
self.generator = QueryStringAuthGenerator(access_key, secret_key,
calling_format=calling_format,
is_secure=SECURE_URLS)
self.generator.set_expires_in(QUERYSTRING_EXPIRE)
self.headers = HEADERS
self._entries = {}
def _get_access_keys(self):
access_key = ACCESS_KEY_NAME
secret_key = SECRET_KEY_NAME
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
@property
def entries(self):
if self.preload_metadata and not self._entries:
self._entries = dict((entry.key, entry)
for entry in self.connection.list_bucket(self.bucket).entries)
return self._entries
def _get_connection(self):
return AWSAuthConnection(*self._get_access_keys())
def _clean_name(self, name):
# Useful for windows' paths
return os.path.join(BUCKET_PREFIX, os.path.normpath(name).replace('\\', '/'))
def _compress_string(self, s):
"""Gzip a given string."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def _put_file(self, name, content):
if self.encrypt:
# Create a key object
key = self.crypto_key()
# Read in a public key
fd = open(settings.CRYPTO_KEYS_PUBLIC, "rb")
public_key = fd.read()
fd.close()
# import this public key
key.importKey(public_key)
# Now encrypt some text against this public key
content = key.encString(content)
content_type = mimetypes.guess_type(name)[0] or "application/x-octet-stream"
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_string(content)
self.headers.update({'Content-Encoding': 'gzip'})
self.headers.update({
'x-amz-acl': self.acl,
'Content-Type': content_type,
'Content-Length' : str(len(content)),
})
response = self.connection.put(self.bucket, name, content, self.headers)
if response.http_response.status not in (200, 206):
raise IOError("S3StorageError: %s" % response.message)
def _open(self, name, mode='rb'):
name = self._clean_name(name)
remote_file = S3StorageFile(name, self, mode=mode)
return remote_file
def _read(self, name, start_range=None, end_range=None):
name = self._clean_name(name)
if start_range is None:
headers = {}
else:
headers = {'Range': 'bytes=%s-%s' % (start_range, end_range)}
response = self.connection.get(self.bucket, name, headers)
if response.http_response.status not in (200, 206):
raise IOError("S3StorageError: %s" % response.message)
headers = response.http_response.msg
if self.encrypt:
# Read in a private key
fd = open(settings.CRYPTO_KEYS_PRIVATE, "rb")
private_key = fd.read()
fd.close()
# Create a key object, and auto-import private key
key = self.crypto_key(private_key)
# Decrypt this file
response.object.data = key.decString(response.object.data)
return response.object.data, headers.get('etag', None), headers.get('content-range', None)
def _save(self, name, content):
name = self._clean_name(name)
content.open()
if hasattr(content, 'chunks'):
content_str = ''.join(chunk for chunk in content.chunks())
else:
content_str = content.read()
self._put_file(name, content_str)
return name
def delete(self, name):
name = self._clean_name(name)
response = self.connection.delete(self.bucket, name)
if response.http_response.status != 204:
raise IOError("S3StorageError: %s" % response.message)
def exists(self, name):
name = self._clean_name(name)
if self.entries:
return name in self.entries
response = self.connection._make_request('HEAD', self.bucket, name)
return response.status == 200
def size(self, name):
name = self._clean_name(name)
if self.entries:
entry = self.entries.get(name)
if entry:
return entry.size
return 0
response = self.connection._make_request('HEAD', self.bucket, name)
content_length = response.getheader('Content-Length')
return content_length and int(content_length) or 0
def url(self, name):
name = self._clean_name(name)
if QUERYSTRING_ACTIVE:
return self.generator.generate_url('GET', self.bucket, name)
else:
return self.generator.make_bare_url(self.bucket, name)
def modified_time(self, name):
try:
from dateutil import parser, tz
except ImportError:
raise NotImplementedError()
name = self._clean_name(name)
if self.entries:
last_modified = self.entries.get(name).last_modified
else:
response = self.connection._make_request('HEAD', self.bucket, name)
last_modified = response.getheader('Last-Modified')
# convert to string to date
last_modified_date = parser.parse(last_modified)
# if the date has no timzone, assume UTC
if last_modified_date.tzinfo == None:
last_modified_date = last_modified_date.replace(tzinfo=tz.tzutc())
# convert date to local time w/o timezone
return last_modified_date.astimezone(tz.tzlocal()).replace(tzinfo=None)
## UNCOMMENT BELOW IF NECESSARY
#def get_available_name(self, name):
# """ Overwrite existing file with the same name. """
# name = self._clean_name(name)
# return name
class PreloadingS3Storage(S3Storage):
pass
class S3StorageFile(File):
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self.start_range = 0
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if num_bytes is None:
args = []
self.start_range = 0
else:
args = [self.start_range, self.start_range+num_bytes-1]
data, etags, content_range = self._storage._read(self._name, *args)
if content_range is not None:
current_range, size = content_range.split(' ', 1)[1].split('/', 1)
start_range, end_range = current_range.split('-', 1)
self._size, self.start_range = int(size), int(end_range)+1
self.file = StringIO(data)
return self.file.getvalue()
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
self._storage._put_file(self._name, self.file.getvalue())
self.file.close()<|fim▁end|> | IS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)
GZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', ( |
<|file_name|>ClipboardTest.js<|end_file_name|><|fim▁begin|>asynctest(
'browser.tinymce.plugins.table.ClipboardTest',
[
'ephox.agar.api.Pipeline',
'ephox.mcagar.api.LegacyUnit',
'ephox.mcagar.api.TinyLoader',
'tinymce.core.util.Tools',
'tinymce.plugins.table.Plugin',
'tinymce.themes.modern.Theme'
],
function (Pipeline, LegacyUnit, TinyLoader, Tools, Plugin, Theme) {
var success = arguments[arguments.length - 2];
var failure = arguments[arguments.length - 1];
var suite = LegacyUnit.createSuite();
Plugin();
Theme();
var cleanTableHtml = function (html) {
return html.replace(/<p>( |<br[^>]+>)<\/p>$/, '');
};
var selectRangeXY = function (editor, start, end) {
start = editor.$(start)[0];
end = editor.$(end)[0];
editor.fire('mousedown', { target: start });
editor.fire('mouseover', { target: end });
editor.fire('mouseup', { target: end });
};
suite.test("mceTablePasteRowBefore command", function (editor) {
editor.setContent(
'<table>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(1) td', 0);
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'tr:nth-child(2) td', 0);
editor.execCommand('mceTablePasteRowBefore');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'</tbody>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(2) td', 0);
editor.execCommand('mceTablePasteRowBefore');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("mceTablePasteRowAfter command", function (editor) {
editor.setContent(
'<table>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(1) td', 0);
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'tr:nth-child(2) td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'</tbody>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(2) td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(<|fim▁hole|>
'<table>' +
'<tbody>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("mceTablePasteRowAfter from merged row source", function (editor) {
editor.setContent(
'<table>' +
'<tbody>' +
'<tr><td colspan="2">1 2</td><td rowspan="2">3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'</tbody>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(1) td', 0);
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'tr:nth-child(2) td:nth-child(2)', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td colspan="2">1 2</td><td rowspan="2">3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td colspan="2">1 2</td><td>3</td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("mceTablePasteRowAfter from merged row source to merged row target", function (editor) {
editor.setContent(
'<table>' +
'<tbody>' +
'<tr><td colspan="2">1 2</td><td rowspan="2">3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'</tbody>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(1) td', 0);
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'tr:nth-child(1) td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td colspan="2">1 2</td><td>3</td></tr>' +
'<tr><td colspan="2">1 2</td><td>3</td></tr>' +
'<tr><td>1</td><td>2</td><td> </td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("mceTablePasteRowAfter to wider table", function (editor) {
editor.setContent(
'<table>' +
'<tbody>' +
'<tr><td>1a</td><td>2a</td><td>3a</td></tr>' +
'</tbody>' +
'</table>' +
'<table>' +
'<tbody>' +
'<tr><td>1b</td><td>2b</td><td>3b</td><td>4b</td></tr>' +
'</tbody>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'table:nth-child(1) tr:nth-child(1) td', 0);
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'table:nth-child(2) td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td>1a</td><td>2a</td><td>3a</td></tr>' +
'</tbody>' +
'</table>' +
'<table>' +
'<tbody>' +
'<tr><td>1b</td><td>2b</td><td>3b</td><td>4b</td></tr>' +
'<tr><td>1a</td><td>2a</td><td>3a</td><td> </td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("mceTablePasteRowAfter to narrower table", function (editor) {
editor.setContent(
'<table>' +
'<tbody>' +
'<tr><td>1a</td><td>2a</td><td>3a</td><td>4a</td><td>5a</td></tr>' +
'<tr><td>1a</td><td colspan="3">2a</td><td>5a</td></tr>' +
'</tbody>' +
'</table>' +
'<table>' +
'<tbody>' +
'<tr><td>1b</td><td>2b</td><td>3b</td></tr>' +
'</tbody>' +
'</table>'
);
selectRangeXY(editor, 'table:nth-child(1) tr:nth-child(1) td:nth-child(1)', 'table:nth-child(1) tr:nth-child(2) td:nth-child(3)');
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'table:nth-child(2) tr td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td>1a</td><td>2a</td><td>3a</td><td>4a</td><td>5a</td></tr>' +
'<tr><td>1a</td><td colspan="3">2a</td><td>5a</td></tr>' +
'</tbody>' +
'</table>' +
'<table>' +
'<tbody>' +
'<tr><td>1b</td><td>2b</td><td>3b</td></tr>' +
'<tr><td>1a</td><td>2a</td><td>3a</td></tr>' +
'<tr><td>1a</td><td colspan="2">2a</td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("Copy/paste several rows with multiple rowspans", function (editor) {
editor.setContent(
'<table>' +
'<tbody>' +
'<tr><td rowspan="2">1</td><td>2</td><td>3</td></tr>' +
'<tr><td>2</td><td rowspan="3">3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'</tbody>' +
'</table>'
);
selectRangeXY(editor, 'table tr:nth-child(1) td:nth-child(1)', 'table tr:nth-child(3) td:nth-child(2)');
editor.execCommand('mceTableCopyRow');
LegacyUnit.setSelection(editor, 'table tr:nth-child(4) td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td rowspan="2">1</td><td>2</td><td>3</td></tr>' +
'<tr><td>2</td><td rowspan="3">3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td rowspan="2">1</td><td>2</td><td>3</td></tr>' +
'<tr><td>2</td><td rowspan="2">3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'</tbody>' +
'</table>'
);
});
suite.test("row clipboard api", function (editor) {
var clipboardRows;
function createRow(cellContents) {
var tr = editor.dom.create('tr');
Tools.each(cellContents, function (html) {
tr.appendChild(editor.dom.create('td', null, html));
});
return tr;
}
editor.setContent(
'<table>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'</table>'
);
LegacyUnit.setSelection(editor, 'tr:nth-child(1) td', 0);
editor.execCommand('mceTableCopyRow');
clipboardRows = editor.plugins.table.getClipboardRows();
LegacyUnit.equal(clipboardRows.length, 1);
LegacyUnit.equal(clipboardRows[0].tagName, 'TR');
editor.plugins.table.setClipboardRows(clipboardRows.concat([
createRow(['a', 'b']),
createRow(['c', 'd'])
]));
LegacyUnit.setSelection(editor, 'tr:nth-child(2) td', 0);
editor.execCommand('mceTablePasteRowAfter');
LegacyUnit.equal(
cleanTableHtml(editor.getContent()),
'<table>' +
'<tbody>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>2</td><td>3</td></tr>' +
'<tr><td>1</td><td>2</td></tr>' +
'<tr><td>a</td><td>b</td></tr>' +
'<tr><td>c</td><td>d</td></tr>' +
'</tbody>' +
'</table>'
);
});
TinyLoader.setup(function (editor, onSuccess, onFailure) {
Pipeline.async({}, suite.toSteps(editor), onSuccess, onFailure);
}, {
plugins: 'table',
indent: false,
valid_styles: {
'*': 'width,height,vertical-align,text-align,float,border-color,background-color,border,padding,border-spacing,border-collapse'
},
skin_url: '/project/src/skins/lightgray/dist/lightgray'
}, success, failure);
}
);<|fim▁end|> | cleanTableHtml(editor.getContent()), |
<|file_name|>ovirt_quota_facts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#<|fim▁hole|>
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_quota_facts
short_description: Retrieve facts about one or more oVirt/RHV quotas
version_added: "2.3"
author: "Red Hat"
description:
- "Retrieve facts about one or more oVirt/RHV quotas."
notes:
- "This module creates a new top-level C(ovirt_quotas) fact, which
contains a list of quotas."
options:
data_center:
description:
- "Name of the datacenter where quota resides."
required: true
name:
description:
- "Name of the quota, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about quota named C<myquota> in Default datacenter:
- ovirt_quota_facts:
data_center: Default
name: myquota
- debug:
var: ovirt_quotas
'''
RETURN = '''
ovirt_quotas:
description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
data_center=dict(required=True),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
if module._name == 'ovirt_quotas_facts':
module.deprecate("The 'ovirt_quotas_facts' module is being renamed 'ovirt_quota_facts'", version=2.8)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
datacenters_service = connection.system_service().data_centers_service()
dc_name = module.params['data_center']
dc = search_by_name(datacenters_service, dc_name)
if dc is None:
raise Exception("Datacenter '%s' was not found." % dc_name)
quotas_service = datacenters_service.service(dc.id).quotas_service()
if module.params['name']:
quotas = [
e for e in quotas_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
else:
quotas = quotas_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_quotas=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in quotas
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()<|fim▁end|> | # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# |
<|file_name|>sale_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.model
def default_get(self, fields_list):
default_vals = super(SaleOrder, self).default_get(fields_list)
if "sale_order_template_id" in fields_list and not default_vals.get("sale_order_template_id"):
company_id = default_vals.get('company_id', False)
company = self.env["res.company"].browse(company_id) if company_id else self.env.company
default_vals['sale_order_template_id'] = company.sale_order_template_id.id
return default_vals
sale_order_template_id = fields.Many2one(
'sale.order.template', 'Quotation Template',
readonly=True, check_company=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
sale_order_option_ids = fields.One2many(
'sale.order.option', 'order_id', 'Optional Products Lines',
copy=True, readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
@api.constrains('company_id', 'sale_order_option_ids')
def _check_optional_product_company_id(self):
for order in self:
companies = order.sale_order_option_ids.product_id.company_id
if companies and companies != order.company_id:
bad_products = order.sale_order_option_ids.product_id.filtered(lambda p: p.company_id and p.company_id != order.company_id)
raise ValidationError(_(
"Your quotation contains products from company %(product_company)s whereas your quotation belongs to company %(quote_company)s. \n Please change the company of your quotation or remove the products from other companies (%(bad_products)s).",
product_company=', '.join(companies.mapped('display_name')),
quote_company=order.company_id.display_name,
bad_products=', '.join(bad_products.mapped('display_name')),
))
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if self.sale_order_template_id and self.sale_order_template_id.number_of_days > 0:
default = dict(default or {})
default['validity_date'] = fields.Date.context_today(self) + timedelta(self.sale_order_template_id.number_of_days)
return super(SaleOrder, self).copy(default=default)
@api.onchange('partner_id')
def onchange_partner_id(self):
super(SaleOrder, self).onchange_partner_id()
template = self.sale_order_template_id.with_context(lang=self.partner_id.lang)
self.note = template.note or self.note
def _compute_line_data_for_template_change(self, line):
return {
'display_type': line.display_type,
'name': line.name,
'state': 'draft',
}
def _compute_option_data_for_template_change(self, option):
price = option.product_id.lst_price
discount = 0
if self.pricelist_id:
pricelist_price = self.pricelist_id.with_context(uom=option.uom_id.id).get_product_price(option.product_id, 1, False)
if self.pricelist_id.discount_policy == 'without_discount' and price:
discount = max(0, (price - pricelist_price) * 100 / price)
else:
price = pricelist_price
return {
'product_id': option.product_id.id,
'name': option.name,
'quantity': option.quantity,
'uom_id': option.uom_id.id,
'price_unit': price,
'discount': discount
}
def update_prices(self):
self.ensure_one()
res = super().update_prices()
for line in self.sale_order_option_ids:
line.price_unit = self.pricelist_id.get_product_price(line.product_id, line.quantity, self.partner_id, uom_id=line.uom_id.id)
return res
@api.onchange('sale_order_template_id')
def onchange_sale_order_template_id(self):
if not self.sale_order_template_id:
self.require_signature = self._get_default_require_signature()
self.require_payment = self._get_default_require_payment()
return
template = self.sale_order_template_id.with_context(lang=self.partner_id.lang)
# --- first, process the list of products from the template<|fim▁hole|> if line.product_id:
price = line.product_id.lst_price
discount = 0
if self.pricelist_id:
pricelist_price = self.pricelist_id.with_context(uom=line.product_uom_id.id).get_product_price(line.product_id, 1, False)
if self.pricelist_id.discount_policy == 'without_discount' and price:
discount = max(0, (price - pricelist_price) * 100 / price)
else:
price = pricelist_price
data.update({
'price_unit': price,
'discount': discount,
'product_uom_qty': line.product_uom_qty,
'product_id': line.product_id.id,
'product_uom': line.product_uom_id.id,
'customer_lead': self._get_customer_lead(line.product_id.product_tmpl_id),
})
order_lines.append((0, 0, data))
self.order_line = order_lines
self.order_line._compute_tax_id()
# then, process the list of optional products from the template
option_lines = [(5, 0, 0)]
for option in template.sale_order_template_option_ids:
data = self._compute_option_data_for_template_change(option)
option_lines.append((0, 0, data))
self.sale_order_option_ids = option_lines
if template.number_of_days > 0:
self.validity_date = fields.Date.context_today(self) + timedelta(template.number_of_days)
self.require_signature = template.require_signature
self.require_payment = template.require_payment
if template.note:
self.note = template.note
def action_confirm(self):
res = super(SaleOrder, self).action_confirm()
for order in self:
if order.sale_order_template_id and order.sale_order_template_id.mail_template_id:
self.sale_order_template_id.mail_template_id.send_mail(order.id)
return res
def get_access_action(self, access_uid=None):
""" Instead of the classic form view, redirect to the online quote if it exists. """
self.ensure_one()
user = access_uid and self.env['res.users'].sudo().browse(access_uid) or self.env.user
if not self.sale_order_template_id or (not user.share and not self.env.context.get('force_website')):
return super(SaleOrder, self).get_access_action(access_uid)
return {
'type': 'ir.actions.act_url',
'url': self.get_portal_url(),
'target': 'self',
'res_id': self.id,
}
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
_description = "Sales Order Line"
sale_order_option_ids = fields.One2many('sale.order.option', 'line_id', 'Optional Products Lines')
# Take the description on the order template if the product is present in it
@api.onchange('product_id')
def product_id_change(self):
domain = super(SaleOrderLine, self).product_id_change()
if self.product_id and self.order_id.sale_order_template_id:
for line in self.order_id.sale_order_template_id.sale_order_template_line_ids:
if line.product_id == self.product_id:
self.name = line.with_context(lang=self.order_id.partner_id.lang).name + self._get_sale_order_line_multiline_description_variants()
break
return domain
class SaleOrderOption(models.Model):
_name = "sale.order.option"
_description = "Sale Options"
_order = 'sequence, id'
is_present = fields.Boolean(string="Present on Quotation",
help="This field will be checked if the option line's product is "
"already present in the quotation.",
compute="_compute_is_present", search="_search_is_present")
order_id = fields.Many2one('sale.order', 'Sales Order Reference', ondelete='cascade', index=True)
line_id = fields.Many2one('sale.order.line', ondelete="set null", copy=False)
name = fields.Text('Description', required=True)
product_id = fields.Many2one('product.product', 'Product', required=True, domain=[('sale_ok', '=', True)])
price_unit = fields.Float('Unit Price', required=True, digits='Product Price')
discount = fields.Float('Discount (%)', digits='Discount')
uom_id = fields.Many2one('uom.uom', 'Unit of Measure ', required=True, domain="[('category_id', '=', product_uom_category_id)]")
product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id', readonly=True)
quantity = fields.Float('Quantity', required=True, digits='Product Unit of Measure', default=1)
sequence = fields.Integer('Sequence', help="Gives the sequence order when displaying a list of optional products.")
@api.depends('line_id', 'order_id.order_line', 'product_id')
def _compute_is_present(self):
# NOTE: this field cannot be stored as the line_id is usually removed
# through cascade deletion, which means the compute would be false
for option in self:
option.is_present = bool(option.order_id.order_line.filtered(lambda l: l.product_id == option.product_id))
def _search_is_present(self, operator, value):
if (operator, value) in [('=', True), ('!=', False)]:
return [('line_id', '=', False)]
return [('line_id', '!=', False)]
@api.onchange('product_id', 'uom_id', 'quantity')
def _onchange_product_id(self):
if not self.product_id:
return
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id,
quantity=self.quantity,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.uom_id.id,
fiscal_position=self.env.context.get('fiscal_position')
)
self.name = product.get_product_multiline_description_sale()
self.uom_id = self.uom_id or product.uom_id
# To compute the discount a so line is created in cache
values = self._get_values_to_add_to_order()
new_sol = self.env['sale.order.line'].new(values)
new_sol._onchange_discount()
self.discount = new_sol.discount
if self.order_id.pricelist_id and self.order_id.partner_id:
self.price_unit = new_sol._get_display_price(product)
def button_add_to_order(self):
self.add_option_to_order()
def add_option_to_order(self):
self.ensure_one()
sale_order = self.order_id
if sale_order.state not in ['draft', 'sent']:
raise UserError(_('You cannot add options to a confirmed order.'))
values = self._get_values_to_add_to_order()
order_line = self.env['sale.order.line'].create(values)
order_line._compute_tax_id()
self.write({'line_id': order_line.id})
if sale_order:
sale_order.add_option_to_order_with_taxcloud()
def _get_values_to_add_to_order(self):
self.ensure_one()
return {
'order_id': self.order_id.id,
'price_unit': self.price_unit,
'name': self.name,
'product_id': self.product_id.id,
'product_uom_qty': self.quantity,
'product_uom': self.uom_id.id,
'discount': self.discount,
'company_id': self.order_id.company_id.id,
}<|fim▁end|> | order_lines = [(5, 0, 0)]
for line in template.sale_order_template_line_ids:
data = self._compute_line_data_for_template_change(line)
|
<|file_name|>MeshBuilder.cpp<|end_file_name|><|fim▁begin|>/**
@file MeshBuilder.cpp
@maintainer Morgan McGuire, [email protected]
@created 2002-02-27
@edited 2005-02-24
*/
#include "G3D/MeshBuilder.h"
#include "G3D/MeshAlg.h"
namespace G3D {
void MeshBuilder::setName(const std::string& n) {
name = n;
}
void MeshBuilder::commit(std::string& n, Array<int>& indexArray, Array<Vector3>& outvertexArray) {
n = name;
// Make the data fit in a unit cube
centerTriList();
Array<int> toNew, toOld;
if (close == MeshBuilder::AUTO_WELD) {
Array<int> index;
MeshAlg::createIndexArray(triList.size(), index);
double minEdgeLen, maxEdgeLen, meanEdgeLen, medianEdgeLen;
double minFaceArea, maxFaceArea, meanFaceArea, medianFaceArea;
MeshAlg::computeAreaStatistics(triList, index,
minEdgeLen, meanEdgeLen, medianEdgeLen, maxEdgeLen,
minFaceArea, meanFaceArea, medianFaceArea, maxFaceArea);
close = minEdgeLen * 0.1;
}
MeshAlg::computeWeld(triList, outvertexArray, toNew, toOld, close);
// Construct triangles
for (int t = 0; t < triList.size(); t += 3) {
int index[3];
for (int i = 0; i < 3; ++i) {
index[i] = toNew[t + i];
}
// Throw out zero size triangles
if ((index[0] != index[1]) &&
(index[1] != index[2]) &&
(index[2] != index[0])) {
indexArray.append(index[0], index[1], index[2]);
}
}
}
void MeshBuilder::centerTriList() {
// Compute the range of the vertices
Vector3 vmin, vmax;
computeBounds(vmin, vmax);
Vector3 diagonal = vmax - vmin;
double scale = max(max(diagonal.x, diagonal.y), diagonal.z) / 2;
debugAssert(scale > 0);
Vector3 translation = vmin + diagonal / 2;
// Center and scale all vertices in the input list
int v;
//Matrix3 rot90 = Matrix3::fromAxisAngle(Vector3::UNIT_Y, toRadians(180)) * Matrix3::fromAxisAngle(Vector3::UNIT_X, toRadians(90));
for (v = 0; v < triList.size(); ++v) {
triList[v] = (triList[v] - translation) / scale;
//triList[v] = rot90 * triList[v];
}
}
void MeshBuilder::computeBounds(Vector3& min, Vector3& max) {
min = Vector3::inf();
max = -min;
int v;
for (v = 0; v < triList.size(); ++v) {
min = min.min(triList[v]);
max = max.max(triList[v]);
}
}
void MeshBuilder::addTriangle(const Vector3& a, const Vector3& b, const Vector3& c) {
triList.append(a, b, c);
<|fim▁hole|>
void MeshBuilder::addQuad(const Vector3& a, const Vector3& b, const Vector3& c, const Vector3& d) {
addTriangle(a, b, c);
addTriangle(a, c, d);
}
void MeshBuilder::addTriangle(const Triangle& t) {
addTriangle(t.vertex(0), t.vertex(1), t.vertex(2));
}
} // namespace<|fim▁end|> | if (_twoSided) {
triList.append(c, b, a);
}
} |
<|file_name|>polishNotation.py<|end_file_name|><|fim▁begin|>#The new version is in polishNotation2.py. Use that version instead of using this version.
#To do:
#Find out how to split a string using matches of a regular expression as the separator.
#Test everything in polyglotCodeGenerator.py
#Use re.match(expr, stringToSplit).groups() to split a string with its parameters:
#http://stackoverflow.com/questions/18903923/how-to-split-a-string-in-python-without-redundant-output
from pyparsing import OneOrMore, nestedExpr
import re
def splitParameterString(theString):
toFilter = re.compile("(<<(?:[^\s]+)>>)").split(theString)
return filter(lambda a: a != '', toFilter)
def getRegexFromString(theString):
theSplitString = splitParameterString(theString)
for x in range(0, len(theSplitString)):
if theSplitString[x].startswith("<<") and theSplitString[x].endswith(">>"):
theSplitString[x] = "([^\s]+)"
return re.compile("".join(theSplitString))
def splitStatement(theRegex, stringToSplit):
return re.match(theRegex, stringToSplit).groups()
def getThingToCheckAgainstRegex(theArray):
theCounter = 0
toReturn = ""
for idx, current in enumerate(theArray):
if(idx != 0):
toReturn += " "
if (type(current) != str or (type(current) == str) and (("'" in current) or ('"' in current))):
theCounter += 1
toReturn += "<<" + str(theCounter) + ">>"
else:
toReturn += current
return toReturn
stringToTest = "(replace(?: each| every|)) <<foo>> (in|inside(?: of)|within) <<bar>> (with) <<baz>>"
theRegex = getRegexFromString(stringToTest)
print(splitParameterString(stringToTest))
print(splitStatement(theRegex, "replace (a) in b with c"))
print(splitStatement(theRegex, "replace a within b with c"))
print(splitStatement(theRegex, "replace a inside of b with c"))
print(splitStatement(theRegex, "replace every a in b with c"))
#I'm still working on crossLanguageParser.py, but I'm trying to see if I can get this new syntax to work.
#This is supposed to be a re-write of crossLanguageParser.py, using Polish notation.
#evaluateMacro is the main function here.
#print(getThingToCheckAgainstRegex(["the", "type", "of", ["foo", "goo"], "is", "'bar'"]))
def isParameter(theString):
if theString.startswith("<<") and theString.endswith(">>"):
return True
arrayOfOutputs = [
[["<<type>> [ <<dimensions>> ] <<name>> = <<initialValue>>", "<<type>> <<name>> [ <<dimensions>> ] = <<initialValue>>"], "initializeVar('<<name>>', '<<type>>', <<initialValue>>, <<dimensions>>)", "final"],
[["<<type>> <<name>> = <<initialValue>>"], "(<<type>> [ None ] <<name>> = <<initialValue>>)"],
#def initializeVar(variableName, variableType, initialValue, arrayDimensions):
[["def <<isStatic>> <<returnType>> <<functionName>> <<parameterNames>> <<parameterTypes>> <<body>>"], "getFunction('<<functionName>>', '<<isStatic>>', <<parameterNames>>, <<parameterTypes>>, '<<returnType>>', <<body>>)", "final"],
[["return <<toReturn>>",], "Return(<<toReturn>>)", "final"],
[["while <<condition>> <<action>>"], "whileLoop([<<action>>], <<condition>>)", "final"],
[["switch <<condition>> <<action>>",], "switch(<<condition>>, [<<action>>])", "final"],
[["case <<condition>> <<action>>"], "case(<<condition>>, [<<action>>])", "final"],
[["else <<action>>", "else { <<action>> }"], "Else([<<action>>])", "final"],
[["if <<condition>> then <<output>>", "<<output>> unless <<condition>> is false", "if <<condition>> { <<output>> }", "<<output>> if <<condition>>", "<<output>> if and only if <<condition>>", "if <<condition>> <<output>>"], "If(<<condition>>, [<<output>>])", "final"],
[["elif <<condition>> <<action>>", "else if <<condition>> then <<action>>"], "Elif(<<condition>>, [<<action>>])", "final"],
[["<<param1>> ; <<param2>>", "<<param1>> , <<param2>>"], "<<param1>>,<<param2>>", "final"],
[["<<param1>> ;", "<<param1>> ,"], "<<param1>>,", "final"],
[["module <<body>>"], "module([<<body>>])", "final"],
[["main <<body>>"], "main([<<body>>])", "final"],
[["<<value1>> == <<value2>>", "<<value1>> is <<value2>>", "<<value1>> equals <<value2>>", "<<value1>> is equal to <<value2>>"], "equals(<<value1>>, <<value2>>, 'int')", "final"],<|fim▁hole|>#If there are only 3 items in the array, then the output is translated into another macro
[["unless <<condition>> <<action>>", "<<action>> unless <<condition>>"], "(if (not <<condition>>) then <<action>>)"],
[["while <<condition>> <<action>>", "<<action>> while <<condition>>", "do <<action>> while <<condition>> is true", "<<action>> until <<condition>> becomes false"], "while(<<condition>>){<<action>>}", "final"],
#"eval" means the output string will be directly evaluated.
[["<<thing1>> means <<thing2>>"], "addToArray(<<thing1>>, <<thing2>>)", "eval"],
[["<<functionName>> { <<parameterList>> }"], "callFunction('<<functionName>>', None, [<<parameterList>>])", "final"],
[["<<param1>> + <<param2>>", "<<param1>> plus <<param2>>"], "add([<<param1>>, <<param2>>])", "final"],
[["<<param1>> - <<param2>>"], "subtract(<<param1>>, <<param2>>)", "final"],
[["<<param1>> * <<param2>>"], "multiply(<<param1>>, <<param2>>)", "final"],
[["<<param1>> / <<param2>>", "<<param1>> divided by <<param2>>"], "divide(<<param1>>, <<param2>>)", "final"],
[["<<param1>> % <<param2>>"], "Mod([<<param1>>, <<param2>>])", "final"],
[["<<param1>> or <<param2>>", "<<param1>> || <<param2>>"], "Or(<<param1>>, <<param2>>)", "final"],
[["<<param1>> > <<param2>>", "<<param1>> is greater than <<param2>>"], "greaterThan(<<param1>>, <<param2>>)", "final"],
[["<<param1>> < <<param2>>", "<<param1>> is less than <<param2>>>>"], "lessThan(<<param1>>, <<param2>>)", "final"],
[["<<param1>> <= <<param2>>"], "lessThanOrEqualTo(<<param1>>, <<param2>>)", "final"],
[["<<param1>> >= <<param2>>"], "greaterThanOrEqualTo(<<param1>>, <<param2>>)", "final"],
[["<<param1>> and <<param2>>", "<<param1>> && <<param2>>" "<<param1>> & <<param2>>"], "And(<<param1>>, <<param2>>)", "final"],
[["class <<className>> { <<body>> }",], "getClass(<<className>>, <<body>>)", "final"],
#def getClass(className, body):
[["<<param>> ++"], "(<<param>> += 1)"],
[["<<param>> --"], "(<<param>> -= 1)"],
[["seriesOfStatements <<param>>", "series of statements <<param>>"], "seriesOfStatements([<<param>>])", "final"],
[["<<param1>> += <<param2>>"], "(<<param1>> = (<<param1>> + <<param2>>))"],
[["<<param1>> -= <<param2>>"], "(<<param1>> = (<<param1>> - <<param2>>))"],
[["<<param1>> *= <<param2>>"], "(<<param1>> = (<<param1>> * <<param2>>))"],
[["<<param1>> ^= <<param2>>"], "(<<param1>> = (<<param1>> ^ <<param2>>))"],
[["<<param1>> = <<param2>>"], "setVar(<<param2>>, <<param1>>)", "final"],
#def setVar(valueToGet, valueToChange):
[["for <<initializer>> <<condition>> <<increment>> <<action>>", "for <<initializer>> ; <<condition>> ; <<increment>> { <<action>> }"], "forLoop(<<action>>, <<initializer>>, <<condition>>, <<increment>>)", "final"],
#def forLoop(body, initializer, condition, increment):
[["for <<variable>> from <<start>> to <<end>> <<action>>"], "(for [ (<<variable>> = <<start>>) ; (<<variable>> < <<end>>) ; (<<variable>> ++) ] { <<action>> })"],
[["<<param1>> ^ <<param2>>", "<<param1>> to the power of <<param2>>", "param1 ** param2"], "<<param1>>^<<param2>>", "final"],
[["[ <<param>> ]"], "[<<param>>]", "final"],
[["<<className>> . <<methodName>> { <<methodParameters>> }"], "<<className>>.<<methodName>>(<<methodParameters>>)", "final"]
]
def addToArray(thing1, thing2):
global arrayOfOutputs
thing2 = ("(" + thing2 + ")")
thing2 = list(OneOrMore(nestedExpr()).parseString(thing2)[0])
thing1 = thing1.split(" ")
arrayOfOutputs += [[thing1, thing2]]
for idx1, current1 in enumerate(arrayOfOutputs):
currentStringOutput = current1[1]
for idx2, current2 in enumerate(current1[0]):
current1[0][idx2] = current1[0][idx2].split(" ")
if(len(current1) == 2):
current1[1] = OneOrMore(nestedExpr()).parseString(currentStringOutput)[0]
#print(arrayOfOutputs)
def compareStringLists(listWithParameters, listDefiningThePattern):
if(len(listWithParameters) != len(listDefiningThePattern)):
return False
for idx, current in enumerate(listWithParameters):
if(not isParameter(listDefiningThePattern[idx])):
if(not (listWithParameters[idx] == listDefiningThePattern[idx])):
return False
return True
def replaceInMultiDimensionalArray(theArray, toReplace, newReplace):
for idx, current in enumerate(theArray):
if current == toReplace:
theArray[idx] = newReplace
if type(current) != str:
theArray[idx] = replaceInMultiDimensionalArray(current, toReplace, newReplace)
return theArray
#print(replaceInMultiDimensionalArray(['hello', 'dude', ['lol', 'lol', 'hello', ['woo', 'hello', 'woo']]], 'hello', 'hoohoo'))
#print(getRegexStringFromArray(["Hi", ["lol", 1, 2, "what is this"]]))
def putInsideArray(theArray, startingIndex, endingIndex):
theSubArray = theArray[startingIndex:(endingIndex+1)]
theArray[startingIndex:endingIndex] = []
theArray[startingIndex] = theSubArray
return theArray
#print(putInsideArray([1,3,3,4,5], 1, 3))
#requires putInsideArray and compareStringLists
#This surrounds the last match of the pattern with parentheses.
def putPatternInsideArray(theArray, thePattern):
for current in reversed(range(0,len(theArray))):
theTestArray = theArray[current:(current+len(thePattern))]
if(compareStringLists(theTestArray, thePattern) == True):
return putInsideArray(theArray, current, current+len(thePattern)-1)
#print(putPatternInsideArray(["hello", "hello", ["woop", "woop"]], ["hello", "<<derp>>"]))
#print(range(0, 5))
arrayToCheck = ["lol", "wut", "hello", "world", "lol"]
#print(putPatternInsideArray(arrayToCheck, ["hello", "world"]))
#print(putPatternInsideArray(arrayToCheck, ["wut", "<<testingThing>>", "lol"]))
def putFirstPatternInsideArray(theArray, thePatterns):
firstLength = len(theArray)
for current in thePatterns:
putPatternInsideArray(theArray, current)
if(len(theArray) != firstLength):
return theArray
def putEveryPatternInsideArray(theArray):
arrayOfPatterns = []
for current in arrayOfOutputs:
arrayOfPatterns += current[0]
arrayOfPatterns.sort(key=len)
arrayOfPatterns = arrayOfPatterns[::-1]
#print(arrayOfPatterns)
while True:
oldArrayLength = len(theArray)
putFirstPatternInsideArray(theArray, arrayOfPatterns)
if(len(theArray) == oldArrayLength):
break;
if(len(theArray) == 1):
return theArray[0]
else:
return theArray
putEveryPatternInsideArray(["hi", "lol"])
def evaluateMacro(theList):
theList = list(theList)
theList = putEveryPatternInsideArray(theList)
#print(theList)
if (len(theList) == 1):
return evaluateMacro(theList[0])
#print regexString
for idx, currentOutputArray in enumerate(arrayOfOutputs):
currentOutputString = currentOutputArray[1]
currentSplitStringList = currentOutputArray[0]
for idx1, currentSplitStringList1 in enumerate(currentSplitStringList):
if(compareStringLists(theList, currentSplitStringList1)):
currentSplitStringList = currentSplitStringList[idx1]
toReturn = currentOutputString
if((len(currentOutputArray) == 3)):
for idx2, currentParameter in enumerate(theList):
if type(currentParameter) != str:
theList[idx2] = evaluateMacro(currentParameter)
if isParameter(currentSplitStringList[idx2]):
toReturn = toReturn.replace(currentSplitStringList[idx2], theList[idx2])
if currentOutputArray[2] == "final":
return toReturn
elif currentOutputArray[2] == "eval":
exec toReturn;
return ""
else:
for idx2, currentParameter in enumerate(theList):
if isParameter(currentSplitStringList[idx2]):
toReturn = replaceInMultiDimensionalArray(toReturn, currentSplitStringList[idx2], currentParameter)
#print(toReturn)
return evaluateMacro(toReturn)
raise Exception(str(theList) + " does not match any pattern.")
#evaluateMacro(OneOrMore(nestedExpr()).parseString("('gorp <<toPrint>>' means 'print <<toPrint>>')")[0])
#evaluateMacro(OneOrMore(nestedExpr()).parseString("('<<action>> unless <<condition>>' means 'if (not <<condition>>) then <<action>>')")[0])
#print("The output is " + evaluateMacro(OneOrMore(nestedExpr()).parseString("(gorp 1)")[0]))
#print(arrayOfOutputs[len(arrayOfOutputs) - 1])
#print(arrayOfOutputs[4])
def printOutput(theInput):
print("The output of "+theInput+" is " + evaluateMacro(OneOrMore(nestedExpr()).parseString(theInput)[0]))
def getStringOutput(theInput):
return evaluateMacro(OneOrMore(nestedExpr()).parseString("(" + theInput + ")")[0])
#printOutput("(x is in arr1)")
#printOutput("(arr1 contains 'a string')")
#printOutput("(if (x equals true) then (print { goodbye }) (else print { hello }))")
#printOutput(
'''
(
(if (x == 3)
(print { x + 1 })
(else if (x == 4) then
(print { x ^ 5 })
(else
print { x + 3 + 4 }
)
)
)
)
'''#)
#printOutput("(foo { ([ 3 , 4 , 5 , 6 , 5 , 4 ]) })")
#printOutput("(print { sum { product { lol * derp } } })")
#printOutput(
'''
((print { x }) if and only if
(x is true)
(else
print { hello }
)
)
'''
#)
#printOutput("([ 1 , ([ 2 , 7 , 9 ]), 3 ])")
#printOutput("(function paramNames paramTypes (return 3))")
#printOutput("(class HelloWorld { print { hello } })")
#printOutput("(foo plus bar plus baz)")
#printOutput("(foo < bar < baz)")
#printOutput("(foo is greater than bar)")
#printOutput("(foo >= bar >= baz)")
#printOutput("(foo <= bar <= baz)")
#printOutput("(foo ++)")
#printOutput("(foo --)")
#printOutput("(foo to the power of 3)")
#printOutput("([ 1 , ([ 1 , 2 , 3 , ([ lol , derp , hee , hoo ]) ]) , 1 ])")
#printOutput(
'''
(for (i = 0) ; (i < 10) ; (i ++) {
(print { i })
})
'''
#)
#printOutput(
'''
(for i from 1 to 10 {
(print { i } ; print { (i + 1) } ; print { (i to the power of 2) } ;)
})
'''
#)
#printOutput("(lol . derp { hi })")
#printOutput(
'''(
main
print { hello } ; print { derp } ; print { i + 1 + 3 } ;
)'''#)
#semicolon(semicolon(print(hello)), semicolon(print(derp)), print(add(i, add(1, 3))))<|fim▁end|> | [["<<item>> is in <<array>>", "<<array>> contains <<item>>"], "(<<item>> in <<array>>)", "final"],
#If it ends in "final", then the output string is directly returned.
[["not <<x>>", "! <<x>>"], "Not(<<x>>)", "final"],
[["replace each <<contained>> in <<container>> with <<replacement>>", "replace every <<contained>> in <<container>> with <<replacement>>"], "replace each <<contained>> in <<container>> with <<replacement>>", "final"], |
<|file_name|>bdgbroadcall_cmd.py<|end_file_name|><|fim▁begin|># Time-stamp: <2019-09-25 10:04:48 taoliu>
"""Description: Fine-tuning script to call broad peaks from a single
bedGraph track for scores.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import os
import logging
from MACS2.IO import BedGraphIO
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def run( options ):
info("Read and build bedGraph...")
bio = BedGraphIO.bedGraphIO(options.ifile)
btrack = bio.build_bdgtrack(baseline_value=0)
info("Call peaks from bedGraph...")
bpeaks = btrack.call_broadpeaks (lvl1_cutoff=options.cutoffpeak, lvl2_cutoff=options.cutofflink, min_length=options.minlen, lvl1_max_gap=options.lvl1maxgap, lvl2_max_gap=options.lvl2maxgap)
info("Write peaks...")
if options.ofile:
bf = open( os.path.join( options.outdir, options.ofile ), "w" )
options.oprefix = options.ofile
else:<|fim▁hole|><|fim▁end|> | bf = open ( os.path.join( options.outdir, "%s_c%.1f_C%.2f_l%d_g%d_G%d_broad.bed12" % (options.oprefix,options.cutoffpeak,options.cutofflink,options.minlen,options.lvl1maxgap,options.lvl2maxgap)), "w" )
bpeaks.write_to_gappedPeak(bf, name_prefix=(options.oprefix+"_broadRegion").encode(), score_column="score", trackline=options.trackline)
info("Done") |
<|file_name|>calc2.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use oak::oak;
use self::Expression::*;
use self::BinOp::*;
use std::str::FromStr;
pub type PExpr = Box<Expression>;
#[derive(Debug)]
pub enum Expression {
Variable(String),
Number(u32),
BinaryExpr(BinOp, PExpr, PExpr),
LetIn(String, PExpr, PExpr)
}
#[derive(Debug)]
pub enum BinOp {
Add, Sub, Mul, Div, Exp<|fim▁hole|>
oak! {
// Optional stream declaration.
type Stream<'a> = StrStream<'a>;
program = spacing expression
expression
= term (term_op term)* > fold_left
term
= exponent (factor_op exponent)* > fold_left
exponent
= (factor exponent_op)* factor > fold_right
factor: PExpr
= number > box Number
/ identifier > box Variable
/ let_expr > box LetIn
/ lparen expression rparen
let_expr = let_kw let_binding in_kw expression
let_binding = identifier bind_op expression
term_op: BinOp
= add_op > Add
/ sub_op > Sub
factor_op: BinOp
= mul_op > Mul
/ div_op > Div
exponent_op: BinOp = exp_op > Exp
identifier = !digit !keyword ident_char+ spacing > to_string
ident_char = ["a-zA-Z0-9_"]
digit = ["0-9"]
number = digit+ spacing > to_number
spacing = [" \n\r\t"]*:(^)
kw_tail = !ident_char spacing
keyword = let_kw / in_kw
let_kw = "let" kw_tail
in_kw = "in" kw_tail
bind_op = "=" spacing
add_op = "+" spacing
sub_op = "-" spacing
mul_op = "*" spacing
div_op = "/" spacing
exp_op = "^" spacing
lparen = "(" spacing
rparen = ")" spacing
fn to_number(raw_text: Vec<char>) -> u32 {
u32::from_str(&*to_string(raw_text)).unwrap()
}
fn to_string(raw_text: Vec<char>) -> String {
raw_text.into_iter().collect()
}
fn fold_left(head: PExpr, rest: Vec<(BinOp, PExpr)>) -> PExpr {
rest.into_iter().fold(head,
|accu, (op, expr)| Box::new(BinaryExpr(op, accu, expr)))
}
fn fold_right(front: Vec<(PExpr, BinOp)>, last: PExpr) -> PExpr {
front.into_iter().rev().fold(last,
|accu, (expr, op)| Box::new(BinaryExpr(op, expr, accu)))
}
}<|fim▁end|> | } |
<|file_name|>test_smtplib.py<|end_file_name|><|fim▁begin|>import asyncore
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('[email protected]'), expected)
self.assertEqual(smtp.verify('[email protected]'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<[email protected]>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='[email protected]', to_addrs='[email protected]')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'[email protected]'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('[email protected]', 'Jeff', '[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = '[email protected]'
m['Resent-From'] = 'Martha <[email protected]>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'[email protected]':'John A',
'[email protected]':'Sally B',
'[email protected]':'Ruth C',
}
sim_auth = ('[email protected]', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_user = 'TXIUQUBZB21LD2HLCMUUY29T'
sim_auth_plain = 'AE1YLKFAC29TZXDOZXJLLMNVBQBZB21LCGFZC3DVCMQ='
sim_lists = {'list-1':['[email protected]','[email protected]'],
'list-2':['[email protected]',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
mech = arg.strip().lower()
if mech=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
elif mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
elif mech=='plain':
self.push('334 ')
elif mech=='login':
self.push('334 ')
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = '[email protected]'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
# SimSMTPChannel doesn't fully support AUTH because it requires a
# synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1], initial_response_ok=False)
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_plain, str(err))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def test_auth_function(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
self.serv.add_feature("AUTH CRAM-MD5")
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
supported = {'CRAM-MD5': smtp.auth_cram_md5,
'PLAIN': smtp.auth_plain,
'LOGIN': smtp.auth_login,
}
for mechanism, method in supported.items():
try: smtp.auth(mechanism, method, initial_response_ok=False)
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials[mechanism.lower()].upper(),
str(err))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('[email protected]', ['[email protected]'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)<|fim▁hole|>
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <fő[email protected]>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'fő[email protected]')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError,
smtp.send_message(msg))
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(
BadHELOServerTests,
DebuggingServerTests,
GeneralTests,
NonConnectingTests,
SMTPAUTHInitialResponseSimTests,
SMTPSimTests,
TooLongLineTests,
)
if __name__ == '__main__':
test_main()<|fim▁end|> | |
<|file_name|>solutions.py<|end_file_name|><|fim▁begin|>__problem_title__ = "Comfortable distance"
__problem_url___ = "https://projecteuler.net/problem=364"
__problem_description__ = "There are seats in a row. people come after each other to fill the " \
"seats according to the following rules: We can verify that T(10) = " \
"61632 and T(1 000) mod 100 000 007 = 47255094. Find T(1 000 000) mod " \
"100 000 007."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():<|fim▁hole|> setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()<|fim▁end|> | |
<|file_name|>fields.py<|end_file_name|><|fim▁begin|>import bleach
from django.db.models.fields import TextField
from django.utils.encoding import smart_text
from .widgets import RichTextareaWidget
class RichTextarea(TextField):
"""
"""<|fim▁hole|> """
"""
if value:
html = value.replace(' ', ' ')
html = smart_text(html.encode('utf-8'))
ALLOWED_TAGS = [
'p',
'br',
'i',
'strong',
'b',
'ul',
'li',
'ol',
'table',
'tr',
'th',
'td',
]
ALLOWED_ATTRIBUTES = {
}
html = bleach.clean(html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True)
return html
else:
return value
def formfield(self, **kwargs):
kwargs['widget'] = RichTextareaWidget
return super(RichTextarea, self).formfield(**kwargs)<|fim▁end|> |
def to_python(self, value): |
<|file_name|>ISpiritHealer.java<|end_file_name|><|fim▁begin|>package li.cryx.minecraft.death;
import java.util.logging.Logger;
import li.cryx.minecraft.death.i18n.ITranslator;
import li.cryx.minecraft.death.persist.AbstractPersistManager;
import org.bukkit.Location;
import org.bukkit.Material;
import org.bukkit.entity.Player;
public interface ISpiritHealer {
void addAltarLocation(Location loc);
Material getAltarBaseMaterial();
Material getAltarMaterial();
// FileConfiguration getConfig();
Logger getLogger();
AbstractPersistManager getPersist();
<|fim▁hole|> boolean isAltar(Location loc);
void restoreItems(Player player);
}<|fim▁end|> | ITranslator getTranslator();
|
<|file_name|>start.ts<|end_file_name|><|fim▁begin|>import { navigator, window } from 'global';
import addons, { DecorateStoryFunction, Channel } from '@storybook/addons';
import createChannel from '@storybook/channel-postmessage';
import { ClientApi, ConfigApi, StoryStore } from '@storybook/client-api';
import Events from '@storybook/core-events';
import { getSelectionSpecifierFromPath, setPath } from './url';
import { RenderStoryFunction } from './types';
import { loadCsf } from './loadCsf';
import { StoryRenderer } from './StoryRenderer';
const isBrowser =
navigator &&
navigator.userAgent &&
navigator.userAgent !== 'storyshots' &&
!(navigator.userAgent.indexOf('Node.js') > -1) &&
!(navigator.userAgent.indexOf('jsdom') > -1);
function getOrCreateChannel() {
let channel = null;
if (isBrowser) {
try {
channel = addons.getChannel();
} catch (e) {
channel = createChannel({ page: 'preview' });
addons.setChannel(channel);
}
}
return channel;
}
function getClientApi(decorateStory: DecorateStoryFunction, channel?: Channel) {
let storyStore: StoryStore;
let clientApi: ClientApi;
if (
typeof window !== 'undefined' &&
window.__STORYBOOK_CLIENT_API__ &&
window.__STORYBOOK_STORY_STORE__
) {
clientApi = window.__STORYBOOK_CLIENT_API__;
storyStore = window.__STORYBOOK_STORY_STORE__;
} else {
storyStore = new StoryStore({ channel });
clientApi = new ClientApi({ storyStore, decorateStory });
}
return { clientApi, storyStore };
}
function focusInInput(event: Event) {
const target = event.target as Element;
return /input|textarea/i.test(target.tagName) || target.getAttribute('contenteditable') !== null;
}
// todo improve typings
export default function start(
render: RenderStoryFunction,
{ decorateStory }: { decorateStory?: DecorateStoryFunction } = {}
) {
const channel = getOrCreateChannel();
const { clientApi, storyStore } = getClientApi(decorateStory, channel);
const configApi = new ConfigApi({ storyStore });
const storyRenderer = new StoryRenderer({ render, channel, storyStore });
// Only try and do URL/event based stuff in a browser context (i.e. not in storyshots)
if (isBrowser) {
const selectionSpecifier = getSelectionSpecifierFromPath();
if (selectionSpecifier) {
storyStore.setSelectionSpecifier(selectionSpecifier);
}
channel.on(Events.CURRENT_STORY_WAS_SET, setPath);<|fim▁hole|> window.onkeydown = (event: KeyboardEvent) => {
if (!focusInInput(event)) {
// We have to pick off the keys of the event that we need on the other side
const { altKey, ctrlKey, metaKey, shiftKey, key, code, keyCode } = event;
channel.emit(Events.PREVIEW_KEYDOWN, {
event: { altKey, ctrlKey, metaKey, shiftKey, key, code, keyCode },
});
}
};
}
if (typeof window !== 'undefined') {
window.__STORYBOOK_CLIENT_API__ = clientApi;
window.__STORYBOOK_STORY_STORE__ = storyStore;
window.__STORYBOOK_ADDONS_CHANNEL__ = channel; // may not be defined
}
const configure = loadCsf({ clientApi, storyStore, configApi });
return {
configure,
clientApi,
configApi,
channel,
forceReRender: () => storyRenderer.forceReRender(),
};
}<|fim▁end|> |
// Handle keyboard shortcuts |
<|file_name|>conferencing.py<|end_file_name|><|fim▁begin|>"Conferencing code"
# XXX A relatively simple enhancement to this would be to store the
# volumes for each source in the conference, and use an exponential
# decay type algorithm to determine the "loudest".
from shtoom.doug.source import Source
from twisted.internet.task import LoopingCall
from twisted.python import log
from sets import Set
class ConferenceError(Exception): pass
class ConferenceClosedError(ConferenceError): pass
class ConferenceMemberNotFoundError(ConferenceError): pass
CONFDEBUG = True
CONFDEBUG = False
class ConfSource(Source):
"A ConfSource connects a voiceapp, and via that, a leg, to a room"
def __init__(self, room, leg):
self._user = leg.getDialog().getRemoteTag().getURI()
self._room = room
self._room.addMember(self)
self._quiet = False
self.makeBuffer()
super(ConfSource, self).__init__()
def makeBuffer(self):
try:
from collections import deque
except ImportError:
# not optimal, but the queue isn't large
self.deque = list()
self.popleft = lambda: self.deque.pop(0)
else:
self.deque = deque()
self.popleft = self.deque.popleft
def truncBuffer(self):
while len(self.deque) > 3:
self.popleft()
def isPlaying(self):
return True
def isRecording(self):
return True
def read(self):
try:
ret = self._room.readAudio(self)
except ConferenceClosedError:
return self.app._va_sourceDone(self)
if not ret:
if not self._quiet:
log.msg("%r is now receiving silence"%(self))
self._quiet = True
elif self._quiet:
log.msg("%r has stopped receiving silence"%(self))
self._quiet = False
return ret
def close(self):
self._room.removeMember(self)
def write(self, bytes):
self.deque.append(bytes)
self.truncBuffer()
if not self._room.isOpen():
self.app._va_sourceDone(self)<|fim▁hole|> "get audio into the room"
# XXX tofix - might not have enough data (short packets). rock on.
if len(self.deque):
bytes = self.popleft()
return bytes
def __repr__(self):
return "<ConferenceUser %s in room %s at %x>"%(self._user,
self._room.getName(), id(self))
class Room:
"""A room is a conference. Everyone in the room hears everyone else
(well, kinda)
"""
# Theory of operation. Rather than rely on the individual sources
# timer loops (which would be, well, horrid), we trigger off our
# own timer.
# This means we don't have to worry about the end systems not
# contributing during a window.
_open = False
def __init__(self, name, MaxSpeakers=4):
self._name = name
self._members = Set()
self._audioOut = {}
self._audioOutDefault = ''
self._maxSpeakers = MaxSpeakers
self.start()
def start(self):
self._audioCalcLoop = LoopingCall(self.mixAudio)
self._audioCalcLoop.start(0.020)
self._open = True
def getName(self):
return self._name
def __repr__(self):
if self._open:
o = ''
else:
o = ' (closed)'
return "<ConferenceRoom %s%s with %d members>"%(self._name, o,
len(self._members))
def shutdown(self):
if hasattr(self._audioCalcLoop, 'cancel'):
self._audioCalcLoop.cancel()
else:
self._audioCalcLoop.stop()
# XXX close down any running sources!
self._members = Set()
del self._audioOut
self._open = False
removeRoom(self._name)
def addMember(self, confsource):
self._members.add(confsource)
if CONFDEBUG:
print "added", confsource, "to room", self
if not self._open:
self.start()
def removeMember(self, confsource):
if len(self._members) and confsource in self._members:
self._members.remove(confsource)
if CONFDEBUG:
print "removed", confsource, "from", self
else:
raise ConferenceMemberNotFoundError(confsource)
if not len(self._members):
if CONFDEBUG:
print "No members left, shutting down"
self.shutdown()
def isMember(self, confsource):
return confsource in self._members
def isOpen(self):
return self._open
def memberCount(self):
return len(self._members)
def readAudio(self, confsource):
if self._open:
return self._audioOut.get(confsource, self._audioOutDefault)
else:
raise ConferenceClosedError()
def mixAudio(self):
# XXX see the comment above about storing a decaying number for the
# volume. For instance, each time round the loop, take the calculated
# volume, and the stored volume, and do something like:
# newStoredVolume = (oldStoredVolume * 0.33) + (thisPacketVolume * 0.66)
import audioop
self._audioOut = {}
if not self._open:
log.msg('mixing closed room %r'%(self,), system='doug')
return
audioIn = {}
for m in self._members:
bytes = m.getAudioForRoom()
if bytes: audioIn[m] = bytes
if CONFDEBUG:
print "room %r has %d members"%(self, len(self._members))
print "got %d samples this time"%len(audioIn)
print "samples: %r"%(audioIn.items(),)
# short-circuit this case
if len(self._members) < 2:
if CONFDEBUG:
print "less than 2 members, no sound"
self._audioOutDefault = ''
return
# Samples is (confsource, audio)
samples = audioIn.items()
# power is three-tuples of (rms,audio,confsource)
power = [ (audioop.rms(x[1],2),x[1], x[0]) for x in samples ]
power.sort(); power.reverse()
if CONFDEBUG:
for rms,audio,confsource in power:
print confsource, rms
# Speakers is a list of the _maxSpeakers loudest speakers
speakers = Set([x[2] for x in power[:self._maxSpeakers]])
# First we calculate the 'default' audio. Used for everyone who's
# not a speaker in the room.
samples = [ x[1] for x in power[:self._maxSpeakers] ]
scaledsamples = [ audioop.mul(x, 2, 1.0/len(samples)) for x in samples ]
if scaledsamples:
# ooo. a use of reduce. first time for everything...
try:
combined = reduce(lambda x,y: audioop.add(x, y, 2), scaledsamples)
except audioop.error, exc:
# XXX tofix!
print "combine got error %s"%(exc,)
print "lengths", [len(x) for x in scaledsamples]
combined = ''
else:
combined = ''
self._audioOutDefault = combined
# Now calculate output for each speaker.
allsamples = {}
for p,sample,speaker in power:
allsamples[speaker] = p, sample
for s in speakers:
# For each speaker, take the set of (other speakers), grab
# the top N speakers, and combine them. Add to the _audioOut
# dictionary
all = allsamples.copy()
del all[s]
power = all.values()
power.sort() ; power.reverse()
samples = [ x[1] for x in power[:self._maxSpeakers] ]
if samples:
scaled = [ audioop.mul(x, 2, 1.0/len(samples)) for x in samples]
try:
out = reduce(lambda x,y: audioop.add(x, y, 2), scaled)
except audioop.error, exc:
# XXX tofix!
print "combine got error %s"%(exc,)
print "lengths", [len(x) for x in scaled]
out = ''
else:
out = ''
if CONFDEBUG:
print "calc for", s, "is", audioop.rms(out, 2)
self._audioOut[s] = out
_RegisterOfAllRooms = {}
_StickyRoomNames = {}
def removeRoom(roomname):
global _RegisterOfAllRooms
if roomname in _RegisterOfAllRooms and roomname not in _StickyRoomNames:
del _RegisterOfAllRooms[roomname]
def newConferenceMember(roomname, leg):
global _RegisterOfAllRooms
if not roomname in _RegisterOfAllRooms:
_RegisterOfAllRooms[roomname] = Room(roomname)
room = _RegisterOfAllRooms[roomname]
return ConfSource(room, leg)<|fim▁end|> |
def getAudioForRoom(self): |
<|file_name|>ring.py<|end_file_name|><|fim▁begin|>from bisect import bisect
from uhashring.ring_ketama import KetamaRing
from uhashring.ring_meta import MetaRing
class HashRing:
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict,"
" got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True<|fim▁hole|>
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [
c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")
]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
@property
def conf(self):
return self.runtime._nodes
nodes = conf
@property
def distribution(self):
return self.runtime._distribution
@property
def ring(self):
return self.runtime._ring
continuum = ring
@property
def size(self):
return len(self.runtime._ring)
@property
def _ring(self):
return self.runtime._ring
@property
def _nodes(self):
return self.runtime._nodes
@property
def _keys(self):
return self.runtime._keys<|fim▁end|> | self.runtime._nodes[nodename] = conf
return conf_changed |
<|file_name|>0011_drop_old_slug.py<|end_file_name|><|fim▁begin|>from south.db import db
from django.db import models
from albums.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'AlbumItem.slug'
db.delete_column('albums_albumitem', 'slug')
def backwards(self, orm):
# Adding field 'AlbumItem.slug'
db.add_column('albums_albumitem', 'slug', orm['albums.albumitem:slug'])
models = {
'albums.album': {
'album_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'albumitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumItem']", 'unique': 'True', 'primary_key': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'highlight_parent'", 'null': 'True', 'to': "orm['albums.AlbumItem']"}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']"})
},
'albums.albumconvertableitem': {
'Meta': {'unique_together': "(('item_slug', 'parent'),)"},
'albumitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumItem']", 'unique': 'True', 'primary_key': 'True'}),
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'allow_ratings': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'appearances': ('django.contrib.contenttypes.generic.GenericRelation', [], {'to': "orm['appearances.Appearance']"}),
'converted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True'}),
'item_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['albums.Album']"}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),<|fim▁hole|> 'thumbFilename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'albums.albumitem': {
'created': ('django.db.models.fields.DateTimeField', [], {}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'albums.image': {
'albumconvertableitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumConvertableItem']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'imageFilename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'albums.video': {
'albumconvertableitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumConvertableItem']", 'unique': 'True', 'primary_key': 'True'}),
'flvFilename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'appearances.appearance': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['albums']<|fim▁end|> | |
<|file_name|>root.py<|end_file_name|><|fim▁begin|>'''
Puck: FreeBSD virtualization guest configuration server
Copyright (C) 2011 The Hotel Communication Network inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os.path
import cherrypy
from libs.controller import *
import models
from models import Users
class RootController(Controller):
crumbs = [Crumb("/", "Home")]
def __init__(self, lookup):
Controller.__init__(self, lookup)
self._lookup = lookup
self._routes = {}
@cherrypy.expose
@cherrypy.tools.myauth()
def index(self):
return self.render("index.html", self.crumbs[:-1])
@cherrypy.expose
def login(self, **post):
if post:
self._login(post)
return self.render("login.html", self.crumbs[:-1])
@cherrypy.expose
def logout(self, **post):
cherrypy.session.delete()
raise cherrypy.HTTPRedirect("/login")
def add(self, route, cls):
self._routes[route] = cls
def load(self):
[setattr(self, route, self._routes[route](self._lookup)) for route in self._routes]
def _login(self, post):
fields = ['user.username', 'user.password']
for f in fields:
if not f in post:
cherrypy.session['flash'] = "Invalid form data."
return False
hash_password = Users.hash_password(post['user.password'])<|fim▁hole|> if not user:
cherrypy.session['flash'] = 'Invalid username or password.'
return False
creds = user.generate_auth()
cherrypy.session['user.id'] = user.id
cherrypy.session['user.group'] = user.user_group
cherrypy.session['credentials'] = creds
raise cherrypy.HTTPRedirect('/index')<|fim▁end|> | user = Users.first(username=post['user.username'], password=hash_password)
|
<|file_name|>pm.js<|end_file_name|><|fim▁begin|>/*
Copyright 2019-2022 Michael Pozhidaev <[email protected]>
This file is part of LUWRAIN.
<|fim▁hole|> modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
LUWRAIN is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
*/
Luwrain.addCommand("suspend", function(){
if (!Luwrain.popups.confirmDefaultYes(Luwrain.i18n.static.powerSuspendPopupName, Luwrain.i18n.static.powerSuspendPopupText))
return;
Luwrain.os.suspend();
});<|fim▁end|> | LUWRAIN is free software; you can redistribute it and/or |
<|file_name|>rpcbind_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = dogecoind_processes[0].pid<|fim▁hole|> stop_nodes(nodes)
wait_dogecoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://wowsuchtest:3kt4yEUdDJ4YGzsGNADvjYwubwaFhEEYjotPJDU2XMgG@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_dogecoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave dogecoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing dogecoind/dogecoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_dogecoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()<|fim▁end|> | assert_equal(set(get_bind_addrs(pid)), set(expected))
finally: |
<|file_name|>registry_init.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""Load all flows so that they are visible in the registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# pylint: disable=unused-import
# These imports populate the Flow registry
from grr_response_server.flows.general import administrative
from grr_response_server.flows.general import apple_firmware
from grr_response_server.flows.general import artifact_fallbacks
from grr_response_server.flows.general import ca_enroller
from grr_response_server.flows.general import checks
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import discovery
from grr_response_server.flows.general import export
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import filesystem
from grr_response_server.flows.general import filetypes
from grr_response_server.flows.general import find
from grr_response_server.flows.general import fingerprint
from grr_response_server.flows.general import hardware
from grr_response_server.flows.general import memory<|fim▁hole|>from grr_response_server.flows.general import osquery
from grr_response_server.flows.general import processes
from grr_response_server.flows.general import registry
from grr_response_server.flows.general import timeline
from grr_response_server.flows.general import transfer
from grr_response_server.flows.general import webhistory
from grr_response_server.flows.general import windows_vsc<|fim▁end|> | from grr_response_server.flows.general import network |
<|file_name|>loadbalancerloadbalancingrules.go<|end_file_name|><|fim▁begin|>package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// LoadBalancerLoadBalancingRulesClient is the network Client
type LoadBalancerLoadBalancingRulesClient struct {
BaseClient
}
// NewLoadBalancerLoadBalancingRulesClient creates an instance of the LoadBalancerLoadBalancingRulesClient client.
func NewLoadBalancerLoadBalancingRulesClient(subscriptionID string) LoadBalancerLoadBalancingRulesClient {
return NewLoadBalancerLoadBalancingRulesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewLoadBalancerLoadBalancingRulesClientWithBaseURI creates an instance of the LoadBalancerLoadBalancingRulesClient
// client.
func NewLoadBalancerLoadBalancingRulesClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerLoadBalancingRulesClient {
return LoadBalancerLoadBalancingRulesClient{NewWithBaseURI(baseURI, subscriptionID)}<|fim▁hole|>
// Get gets the specified load balancer load balancing rule.
// Parameters:
// resourceGroupName - the name of the resource group.
// loadBalancerName - the name of the load balancer.
// loadBalancingRuleName - the name of the load balancing rule.
func (client LoadBalancerLoadBalancingRulesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, loadBalancingRuleName string) (result LoadBalancingRule, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerLoadBalancingRulesClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, loadBalancerName, loadBalancingRuleName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client LoadBalancerLoadBalancingRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, loadBalancingRuleName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loadBalancerName": autorest.Encode("path", loadBalancerName),
"loadBalancingRuleName": autorest.Encode("path", loadBalancingRuleName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client LoadBalancerLoadBalancingRulesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client LoadBalancerLoadBalancingRulesClient) GetResponder(resp *http.Response) (result LoadBalancingRule, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all the load balancing rules in a load balancer.
// Parameters:
// resourceGroupName - the name of the resource group.
// loadBalancerName - the name of the load balancer.
func (client LoadBalancerLoadBalancingRulesClient) List(ctx context.Context, resourceGroupName string, loadBalancerName string) (result LoadBalancerLoadBalancingRuleListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerLoadBalancingRulesClient.List")
defer func() {
sc := -1
if result.lblbrlr.Response.Response != nil {
sc = result.lblbrlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, loadBalancerName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.lblbrlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", resp, "Failure sending request")
return
}
result.lblbrlr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client LoadBalancerLoadBalancingRulesClient) ListPreparer(ctx context.Context, resourceGroupName string, loadBalancerName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loadBalancerName": autorest.Encode("path", loadBalancerName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client LoadBalancerLoadBalancingRulesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client LoadBalancerLoadBalancingRulesClient) ListResponder(resp *http.Response) (result LoadBalancerLoadBalancingRuleListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client LoadBalancerLoadBalancingRulesClient) listNextResults(ctx context.Context, lastResults LoadBalancerLoadBalancingRuleListResult) (result LoadBalancerLoadBalancingRuleListResult, err error) {
req, err := lastResults.loadBalancerLoadBalancingRuleListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client LoadBalancerLoadBalancingRulesClient) ListComplete(ctx context.Context, resourceGroupName string, loadBalancerName string) (result LoadBalancerLoadBalancingRuleListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerLoadBalancingRulesClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName, loadBalancerName)
return
}<|fim▁end|> | } |
<|file_name|>mesh.py<|end_file_name|><|fim▁begin|>#import matplotlib.pyplot as plt
import numpy as np
from collections import deque
import numbers
"""
Created on Jun 29, 2016
@author: hans-werner
"""
def convert_to_array(x, dim=None, return_is_singleton=False):
"""
Convert point or list of points to a numpy array.
Inputs:
x: (list of) point(s) to be converted to an array. Allowable inputs are
1. a list of Vertices,
2. a list of tuples,
3. a list of numbers or (2,) arrays
4. a numpy array of the approriate size
dim: int, (1 or 2) optional number used to adjudicate ambiguous cases.
return_is_singleton: bool, if True, return whether the input x is a
singleton.
Outputs:
x: double, numpy array containing the points in x.
If x is one-dimensional (i.e. a list of 1d Vertices, 1-tuples, or
a 1d vector), convert to an (n,1) array.
If x is two-dimensional (i.e. a list of 2d Vertices, 2-tupples, or
a 2d array), return an (n,2) array.
"""
is_singleton = False
if type(x) is list:
#
# Points in list
#
if all(isinstance(xi, Vertex) for xi in x):
#
# All points are of type vertex
#
x = [xi.coordinates() for xi in x]
x = np.array(x)
elif all(type(xi) is tuple for xi in x):
#
# All points are tuples
#
x = np.array(x)
elif all(type(xi) is numbers.Real for xi in x):
#
# List of real numbers -> turn into (n,1) array
#
x = np.array(x)
x = x[:,np.newaxis]
elif all(type(xi) is np.ndarray for xi in x):
#
# list of (2,) arrays
#
x = np.array(x)
else:
raise Exception(['For x, use arrays or lists'+\
'of tuples or vertices.'])
elif isinstance(x, Vertex):
#
# A single vertex
#
x = np.array([x.coordinates()])
is_singleton = True
elif isinstance(x, numbers.Real):
if dim is not None:
assert dim==1, 'Dimension should be 1.'
x = np.array([[x]])
is_singleton = True
elif type(x) is tuple:
#
# A tuple
#
if len(x)==1:
#
# A oneple
#
x, = x
x = np.array([[x]])
is_singleton = True
elif len(x)==2:
#
# A tuple
#
x,y = x
x = np.array([[x,y]])
is_singleton = True
elif type(x) is np.ndarray:
#
# Points in numpy array
#
if len(x.shape)==1:
#
# x is a one-dimensional vector
if len(x)==1:
#
# x is a vector with one entry
#
if dim is not None:
assert dim==1, 'Incompatible dimensions'
x = x[:,np.newaxis]
if len(x) == 2:
#
# x is a vector 2 entries: ambiguous
#
if dim == 2:
#
# Turn 2-vector into a (1,2) array
#
x = x[np.newaxis,:]
else:
#
# Turn vector into (2,1) array
#
x = x[:,np.newaxis]
else:
#
# Turn vector into (n,1) array
#
x = x[:,np.newaxis]
elif len(x.shape)==2:
assert x.shape[1]<=2,\
'Dimension of array should be at most 2'
else:
raise Exception('Only 1- or 2 dimensional arrays allowed.')
if return_is_singleton:
# Specify whether x is a singleton
return x, is_singleton
else:
return x
class Markable(object):
"""
Description: Any object that can be assigned a flag
"""
def __init__(self):
"""
Constructor
"""
self.__flag = None
def mark(self, flag):
"""
"""
pass
def unmark(self, flag):
"""
Remove flag
"""
pass
def is_marked(self, flag):
"""
Determine whether
"""
pass
class Tree(object):
"""
Description: Tree object for storing and manipulating adaptively
refined quadtree meshes.
Attributes:
node_type: str, specifying node's relation to parents and/or children
'ROOT' (no parent node),
'BRANCH' (parent & children), or
'LEAF' (parent but no children)
address: int, list allowing access to node's location within the tree
General form [k0, k1, ..., kd], d=depth, ki in [0,...,n_children_i]
address = [] if ROOT node.
depth: int, depth within the tree (ROOT nodes are at depth 0).
parent: Tree/Mesh whose child this is
children: list of child nodes.
flag: set, of str/int/bool allowing tree nodes to be marked.
"""
def __init__(self, n_children=None, regular=True, flag=None,
parent=None, position=None, forest=None):
"""
Constructor
"""
#
# Set some attributes
#
self._is_regular = regular
self._parent = parent
self._forest = None
self._in_forest = False
self._node_position = position
#
# Set flags
#
self._flags = set()
if flag is not None:
if type(flag) is set:
# Add all flags in set
for f in flag:
self.mark(f)
else:
# Add single flag
self.mark(flag)
if parent is None:
#
# ROOT Tree
#
self._node_type = 'ROOT'
self._node_depth = 0
self._node_address = []
if self.is_regular():
# Ensure that the number of ROOT children is specified
assert n_children is not None, \
'ROOT node: Specify number of children.'
else:
# Not a regular tree: number of children 0 initially
n_children = 0
if forest is not None:
#
# Tree contained in a Forest
#
assert isinstance(forest, Forest), \
'Input grid must be an instance of Grid class.'
#
# Add tree to forest
#
forest.add_tree(self)
self._in_forest = True
self._forest = forest
self._node_address = [self.get_node_position()]
else:
#
# Free standing ROOT cell
#
assert self.get_node_position() is None, \
'Unattached ROOT cell has no position.'
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
else:
#
# LEAF Node
#
position_missing = 'Position within parent cell must be specified.'
assert self.get_node_position() is not None, position_missing
self._node_type = 'LEAF'
# Determine cell's depth and address
self._node_depth = parent.get_depth() + 1
self._node_address = parent.get_node_address() + [position]
if regular:
#
# Regular tree -> same number of children in every generation
#
if n_children is not None:
assert n_children == self.get_parent().n_children(),\
'Regular tree: parents should have the same ' + \
'number of children than oneself.'
else:
n_children = self.get_parent().n_children()
else:
n_children = 0
#
# Assign space for children
#
self._children = [None]*n_children
self._n_children = n_children
# Change parent type (from LEAF)
if parent.get_node_type() == 'LEAF':
parent.set_node_type('BRANCH')
def info(self):
"""
Display essential information about Tree
"""
print('')
print('-'*50)
print('Tree Info')
print('-'*50)
print('{0:10}: {1}'.format('Address', self._node_address))
print('{0:10}: {1}'.format('Type', self._node_type))
if self._node_type != 'ROOT':
print('{0:10}: {1}'.format('Parent', \
self.get_parent().get_node_address()))
print('{0:10}: {1}'.format('Position', self._node_position))
print('{0:10}: {1}'.format('Flags', self._flags))
if self.has_children():
child_string = ''
for i in range(len(self._children)):
child = self.get_child(i)
if child is not None:
child_string += str(i) + ': 1, '
else:
child_string += str(i) + ': 0, '
print('{0:10}: {1}'.format('Children',child_string))
else:
child_string = 'None'
print('{0:10}: {1}'.format('Children',child_string))
print('')
def get_node_type(self):
"""
Returns whether node is a ROOT, a BRANCH, or a LEAF
"""
return self._node_type
def get_node_position(self):
"""
Returns position of current node within parent/forest
"""
return self._node_position
def set_node_type(self, node_type):
"""
Sets a node's type
"""
assert node_type in ['ROOT', 'BRANCH', 'LEAF'], \
'Input "node_type" should be "ROOT", "BRANCH", or "LEAF".'
if node_type == 'ROOT':
assert not self.has_parent(), \
'ROOT nodes should not have a parent.'
elif node_type == 'LEAF':
assert not self.has_children(), \
'LEAF nodes should not have children.'
elif node_type == 'BRANCH':
assert self.has_parent(),\
'BRANCH nodes should have a parent.'
self._node_type = node_type
def get_node_address(self):
"""
Return address of the node
"""
return self._node_address
def get_depth(self):
"""
Return depth of current node
"""
return self._node_depth
def tree_depth(self, flag=None):
"""
Return the maximum depth of the tree
"""
depth = self.get_depth()
if self.has_children():
for child in self.get_children(flag=flag):
d = child.tree_depth()
if d > depth:
depth = d
return depth
def in_forest(self):
"""
Determine whether a (ROOT)cell lies within a forest
"""
return self._in_forest
def get_forest(self):
"""
Returns the forest containing the node
"""
return self._forest
def plant_in_forest(self, forest, position):
"""
Modify own attributes to reflect node's containment within a forest
"""
assert self.get_node_type() == 'ROOT', \
'Only ROOT nodes are in the forest.'
self._node_position = position
self._node_address = [position]
self._in_forest = True
self._forest = forest
def remove_from_forest(self):
"""
Remove node from forest
"""
self._in_forest = False
self._node_position = None
self._node_address = []
self._forest = None
def is_regular(self):
"""
Determine whether node is a regular tree, that is all subnodes
have the same number of children.
"""
return self._is_regular
def mark(self, flag=None, recursive=False, reverse=False):
"""
Mark Tree and its progeny/ancestors
Inputs:
flag: int, optional label used to mark node
recursive: bool, also mark all sub-/super nodes
"""
if flag is None:
#
# No flag specified: add "True" flag
#
self._flags.add(True)
else:
#
# Add given flag
#
self._flags.add(flag)
#
# Add flag to progeny/parents
#
if recursive:
if reverse:
#
# Mark ancestors
#
if self.has_parent():
parent = self.get_parent()
parent.mark(flag=flag, recursive=recursive, \
reverse=reverse)
else:
#
# Mark progeny
#
if self.has_children():
for child in self.get_children():
child.mark(flag=flag, recursive=recursive)
def unmark(self, flag=None, recursive=False, reverse=False):
"""
Unmark Cell
Inputs:
flag: label to be removed
recursive: bool, also unmark all subcells
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
#
# Remove label from children if applicable
#
if recursive:
if reverse:
#
# Unmark ancestors
#
if self.has_parent():
parent = self.get_parent()
parent.unmark(flag=flag, recursive=recursive, \
reverse=reverse)
else:
#
# Unmark progeny
#
if self.has_children():
for child in self.get_children():
child.unmark(flag=flag, recursive=recursive)
def is_marked(self,flag=None):
"""
Check whether cell is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
def has_parent(self, flag=None):
"""
Returns True if node has (flagged) parent node, False otherwise
"""
if flag is not None:
return self._parent is not None and self._parent.is_marked(flag)
else:
return self._parent is not None
def get_parent(self, flag=None):
"""
Return cell's parent, or first ancestor with given flag (None if there
are none).
"""
if flag is None:
if self.has_parent():
return self._parent
else:
if self.has_parent(flag):
parent = self._parent
if parent.is_marked(flag):
return parent
else:
return parent.get_parent(flag=flag)
def get_root(self):
"""
Find the ROOT cell for a given cell
"""
if self._node_type == 'ROOT':
return self
else:
return self._parent.get_root()
def has_children(self, position=None, flag=None):
"""
Determine whether node has children
Inputs:
position: int, position of the child node within self
flag: str/int/bool, required marker for positive answer
Output:
has_children: bool, true if self has (marked) children, false
otherwise.
"""
if position is None:
#
# Check for any children
#
if flag is None:
return any(child is not None for child in self._children)
else:
#
# Check for flagged children
#
for child in self._children:
if child is not None and child.is_marked(flag):
return True
return False
else:
#
# Check for child in specific position
#
# Ensure position is valid
assert position < self._n_children, \
'Position exceeds the number of children.'
if flag is None:
#
# No flag specified
#
return self._children[position] is not None
else:
#
# With flag
#
return (self._children[position] is not None) and \
self._children[position].is_marked(flag)
def get_child(self, position):
"""
Return the child in a given position
"""
assert position<self.n_children() and position>-self.n_children(), \
'Input "position" exceeds number of children.'
return self._children[position]
def get_children(self, flag=None, reverse=False):
"""
Iterator: Returns (flagged) children, in (reverse) order
Inputs:
flag: [None], optional marker
reverse: [False], option to list children in reverse order
(useful for the 'traverse' function).
Note: Only returns children that are not None
Use this to obtain a consistent iteration of children
"""
if self.has_children(flag=flag):
if not reverse:
#
# Go in usual order
#
for child in self._children:
if child is not None:
if flag is None:
yield child
elif child.is_marked(flag):
yield child
else:
#
# Go in reverse order
#
for child in reversed(self._children):
if child is not None:
if flag is None:
yield child
elif child.is_marked(flag):
yield child
def n_children(self):
"""
Returns the number of children
"""
return self._n_children
def remove(self):
"""
Remove node (self) from parent's list of children
"""
assert self.get_node_type() != 'ROOT', 'Cannot delete ROOT node.'
self.get_parent()._children[self._node_position] = None
def add_child(self):
"""
Add a child to current node (only works if node is not regular).
"""
assert not self.is_regular(),\
'Regular tree: add children by method "split".'
child = Tree(parent=self, regular=False, position=self.n_children())
self._children.append(child)
self._n_children += 1
def delete_children(self, position=None):
"""
Delete all sub-nodes of given node
"""
#
# Change children to None
#
if position is None:
for child in self.get_children():
child.remove()
else:
assert position < self.n_children(), \
'Position exceeds number of children '
child = self._children[position]
child.remove()
#
# Change node type from LEAF to BRANCH
#
if self._node_type == 'BRANCH' and not self.has_children():
self._node_type = 'LEAF'
def split(self, n_children=None):
"""
Split node into subnodes
"""
if self.is_regular():
#
# Regular tree: Number of grandchildren inherited
#
for i in range(self.n_children()):
#
# Instantiate Children
#
self._children[i] = Tree(parent=self, position=i)
else:
#
# Not a regular tree: Must specify number of children
#
assert self.n_children() == 0, \
'Cannot split irregular tree with children. ' + \
'Use "add_child" method.'
for i in range(n_children):
#
# Instantiate Children
#
self.add_child()
def traverse(self, queue=None, flag=None, mode='depth-first'):
"""
Iterator: Return current cell and all its (flagged) sub-cells
Inputs:
flag [None]: cell flag
mode: str, type of traversal
'depth-first' [default]: Each cell's progeny is visited before
proceeding to next cell.
'breadth-first': All cells at a given depth are returned before
proceeding to the next level.
Output:
all_nodes: list, of all nodes in tree (marked with flag).
"""
if queue is None:
queue = deque([self])
while len(queue) != 0:
if mode == 'depth-first':
node = queue.pop()
elif mode == 'breadth-first':
node = queue.popleft()
else:
raise Exception('Input "mode" must be "depth-first"'+\
' or "breadth-first".')
if node.has_children():
reverse = True if mode=='depth-first' else False
for child in node.get_children(reverse=reverse):
queue.append(child)
if flag is not None:
if node.is_marked(flag):
yield node
else:
yield node
def get_leaves(self, flag=None, subtree_flag=None, mode='breadth-first'):
"""
Return all marked LEAF nodes (nodes with no children) of current subtree
Inputs:
*flag: If flag is specified, return all leaf nodes within rooted
subtree marked with flag (or an empty list if there are none).
*subtree_flag: Label specifying the rooted subtree (rs) within which
to search for (flagged) leaves.
*mode: Method by which to traverse the tree ('breadth-first' or
'depth-first').
Outputs:
leaves: list, of LEAF nodes.
Note:
The rooted subtree must contain all ancestors of a marked node
"""
#
# Get all leaves of the subtree
#
leaves = []
for node in self.traverse(flag=subtree_flag, mode=mode):
#
# Iterate over all sub-nodes within subtree
#
if not node.has_children(flag=subtree_flag):
#
# Nodes without marked children are the subtree leaves
#
leaves.append(node)
#
# Return marked leaves
#
if flag is None:
return leaves
else:
return [leaf for leaf in leaves if leaf.is_marked(flag)]
def make_rooted_subtree(self, flag):
"""
Mark all ancestors of flagged node with same flag, to turn flag into
a subtree marker.
"""
#
# Search through all nodes
#
for node in self.get_root().traverse(mode='breadth-first'):
if node.is_marked(flag):
#
# If node is flagged, mark all its ancestors
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
ancestor.mark(flag)
def is_rooted_subtree(self, flag):
"""
Determine whether a given flag defines a rooted subtree
Note: This takes roughly the same amount of work as make_rooted_subtree
"""
#
# Search through all nodes
#
for node in self.get_root().traverse(mode='breadth-first'):
if node.is_marked(flag):
#
# Check that ancestors of flagged node are also marked
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
if not ancestor.is_marked(flag):
#
# Ancestor not marked: not a rooted subtree
#
return False
#
# No problems: it's a rooted subtree
#
return True
def find_node(self, address):
"""
Locate node by its address
"""
node = self.get_root()
if address != []:
#
# Not the ROOT node
#
for a in address:
if node.has_children() and a in range(node.n_children()):
node = node.get_child(a)
else:
return None
return node
def nearest_ancestor(self, flag):
"""
Returns the nearest ancestor with given flag
"""
if flag is None:
return self
candidate = self
while not candidate.is_marked(flag):
if candidate.get_depth()==0:
return None
else:
candidate = candidate.get_parent()
return candidate
def contains(self, tree):
"""
Determine whether self contains a given node
"""
if tree.get_depth() < self.get_depth():
return False
elif tree == self:
return True
else:
while tree.get_depth() > self.get_depth():
tree = tree.get_parent()
if self == tree:
return True
#
# Reached the end
#
return False
def coarsen(self, subforest_flag=None, coarsening_flag=None,
new_label=None, clean_up=True, debug=False):
"""
Coarsen tree by
"""
if subforest_flag is not None:
#
# Subforest specified
#
if not self.is_marked(subforest_flag):
#
# Tree not in subforest, nothing to coarsen
#
return
#
# Check whether to coarsen
#
coarsen = False
if coarsening_flag is not None:
#
# Check whether tree is flagged (if applicable)
#
if self.is_marked(coarsening_flag):
coarsen = True
else:
#
# Are children LEAF nodes?
#
if self.has_children(flag=subforest_flag):
#
# Check if children are in subforest
#
for child in self.get_children():
#
# All children have to be LEAF nodes
#
coarsen = True
if child.get_node_type()!='LEAF':
coarsen = False
break
if new_label is not None:
#
# Apply new label to node (regardless of whether to coarsen)
#
self.mark(new_label)
if coarsen:
#
# Coarsen tree
#
if new_label is not None:
#
# If new_label specified, don't mess with children
#
pass
elif subforest_flag is not None:
#
# Remove subforest flag from children
#
for child in self.get_children():
child.unmark(subforest_flag)
else:
#
# Delete children
#
self.delete_children()
if coarsening_flag is not None and clean_up:
#
# Remove coarsening flag if necessary
#
self.unmark(coarsening_flag)
else:
#
# Recursion step, check children
#
if self.has_children(flag=subforest_flag):
for child in self.get_children():
child.coarsen(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag,
new_label=new_label,
clean_up=clean_up,
debug=debug)
class Forest(object):
"""
Collection of Trees
"""
def __init__(self, trees=None, n_trees=None):
"""
Constructor
"""
if trees is not None:
#
# List of trees specified
#
assert type(trees) is list, 'Trees should be passed as a list.'
self._trees = []
for tree in trees:
self.add_tree(tree)
elif n_trees is not None:
#
# No trees specified, only the number of slots
#
assert type(n_trees) is np.int and n_trees > 0,\
'Input "n_children" should be a positive integer.'
self._trees = [None]*n_trees
else:
#
# No trees specified: create an empty list.
#
self._trees = []
def n_children(self):
"""
Return the number of trees
"""
return len(self._trees)
def is_regular(self):
"""
Determine whether the forest contains only regular trees
"""
for tree in self._trees:
if not tree.is_regular():
return False
return True
def depth(self):
"""
Determine the depth of the largest tree in the forest
"""
current_depth = 0
for tree in self.get_children():
new_depth = tree.tree_depth()
if new_depth > current_depth:
current_depth = new_depth
return current_depth
def traverse(self, flag=None, mode='depth-first'):
"""
Iterator: Visit every (flagged) node in the forest
Inputs:
flag [None]: node flag
mode: str, type of traversal
'depth-first' [default]: Each node's progeny is visited before
proceeding to next cell.
'breadth-first': All nodes at a given depth are returned before
proceeding to the next level.
Output:
all_nodes: list, of all nodes in tree (marked with flag).
"""
if mode=='depth-first':
queue = deque(reversed(self._trees))
elif mode=='breadth-first':
queue = deque(self._trees)
else:
raise Exception('Input "mode" must be "depth-first"'+\
' or "breadth-first".')
while len(queue) != 0:
if mode == 'depth-first':
node = queue.pop()
elif mode == 'breadth-first':
node = queue.popleft()
if node.has_children():
reverse = True if mode=='depth-first' else False
for child in node.get_children(reverse=reverse):
queue.append(child)
if flag is not None:
if node.is_marked(flag):
yield node
else:
yield node
def get_leaves(self, flag=None, subforest_flag=None, mode='breadth-first'):
"""
Return all marked LEAF nodes (nodes with no children) of current subtree
Inputs:
*flag: If flag is specified, return all leaf nodes within rooted
subtree marked with flag (or an empty list if there are none).
*subforest_flag: Label specifying the rooted subtrees (rs) within which
to search for (flagged) leaves.
Outputs:
leaves: list, of LEAF nodes.
Note:
The rooted subtree must contain all ancestors of a marked node
"""
#
# Get all leaves of the subtree
#
leaves = []
for node in self.traverse(flag=subforest_flag, mode=mode):
if not node.has_children(flag=subforest_flag):
leaves.append(node)
#
# Return marked leaves
#
if flag is None:
return leaves
else:
return [leaf for leaf in leaves if leaf.is_marked(flag)]
def root_subtrees(self, flag):
"""
Mark all ancestors of flagged node with same flag, to turn flag into
a subtree marker.
Note: If no node is flagged, then only flag the root nodes.
"""
#
# Search through all nodes
#
for root_node in self.get_children():
#
# Mark all root nodes with flag
#
root_node.mark(flag)
for node in root_node.traverse():
#
# Look for marked subnodes
#
if node.is_marked(flag):
#
# If node is flagged, mark all its ancestors & siblings
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
# Mark ancestor
ancestor.mark(flag)
for child in ancestor.get_children():
# Mark siblings
child.mark(flag)
def subtrees_rooted(self, flag):
"""
Determine whether a given flag defines a rooted subtree
Note: This takes roughly the same amount of work as make_rooted_subtree
"""
if flag is None:
#
# Forest itself is always one of rooted subtrees
#
return True
#
# Search through all nodes
#
for root_node in self.get_children():
#
# Check if root nodes are marked
#
if not root_node.is_marked(flag):
return False
else:
for node in root_node.traverse():
if node.is_marked(flag):
#
# Check that ancestors and sibilngs of flagged node
# are also marked
#
ancestor = node
while ancestor.has_parent():
ancestor = ancestor.get_parent()
if not ancestor.is_marked(flag):
#
# Ancestor not marked: not a rooted subtree
#
return False
for child in ancestor.get_children():
if not child.is_marked(flag):
#
# Sibling not marked
#
return False
#
# No problems: it's a forest of rooted subtrees
#
return True
def find_node(self, address):
"""
Locate a tree node by its address
Inputs:
address: list of branches along which to find node in tree
"""
# Reverse address
address = address[::-1]
node = self
while len(address)>0:
a = address.pop()
if node.has_children():
if a not in range(node.n_children()):
return None
else:
node = node.get_child(a)
return node
def has_children(self, flag=None):
"""
Determine whether the forest contains any trees
"""
if len(self._trees) > 0:
if flag is None:
return True
else:
return any(tree for tree in self.get_children(flag=flag))
else:
return False
def get_child(self, position):
"""
Returns the tree at a given position
"""
assert position < len(self._trees),\
'Input "position" exceeds number of trees.'
assert type(position) is np.int, \
'Input "position" should be a nonnegative integer. '
return self._trees[position]
def get_children(self, flag=None, reverse=False):
"""
Iterate over (all) (flagged) trees in the forest
"""
if not reverse:
if flag is None:
return self._trees
else:
children = []
for tree in self._trees:
if tree.is_marked(flag):
children.append(tree)
return children
else:
if flag is None:
return self._trees[::-1]
else:
children = []
for tree in reversed(self._trees):
if tree.is_marked():
children.append(tree)
def add_tree(self, tree):
"""
Add a new tree to the current forest
"""
assert isinstance(tree, Tree), \
'Can only add trees to the forest.'
self._trees.append(tree)
tree.plant_in_forest(self, self.n_children()-1)
def remove_tree(self, position):
"""
Remove a tree from the forest.
"""
assert type(position) is np.int, \
'Input "position" should be an integer.'
assert position < len(self._trees), \
'Input "position" exceeds number of trees.'
tree = self.get_child(position)
tree.remove_from_forest()
del self._trees[position]
def record(self, flag):
"""
Mark all trees in current forest with flag
"""
for tree in self.get_children():
tree.mark(flag, recursive=True)
def coarsen(self, subforest_flag=None, coarsening_flag=None,
new_label=None, clean_up=True, debug=False):
"""
Coarsen (sub)forest (delimited by 'subforest_flag', by (possibly)
merging (=deleting or unlabeling the siblings of) children of nodes
marked with 'coarsening_flag' and labeling said nodes with new_label.
If subforest_flag is None, coarsen all nodes
If new_label is None, then:
- either remove subforest flag (if there is one), or
- delete child nodes
Inputs:
*subforest_flag: flag, specifying the subforest being coarsened.
*coarsening_flag: flag, specyfying nodes in subforest whose children
are to be deleted/unmarked.
*new_label: flag, specifying the new subforest.
*clean_up: bool, remove coarsening_flag after use.
"""
#
# Ensure the subforest is rooted
#
if subforest_flag is not None:
self.root_subtrees(subforest_flag)
for tree in self.get_children():
tree.coarsen(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag,
new_label=new_label,
clean_up=clean_up, debug=debug)
"""
if coarsening_flag is not None:
#
# Coarsen
#
for tree in self.get_children():
coarsened = False
if tree.is_marked(coarsening_flag):
#
# Coarsen tree
#
if new_label is not None:
#
# Mark tree with new label and move on
#
tree.mark(new_label)
continue
elif subforest_flag is not None:
#
# Remove subforest flag from progeny
#
for child in tree.get_children(subforest_flag):
child.unmark(subforest_flag, recursive=True)
else:
#
# Delete children
#
tree.delete_children()
# Record coarsened
coarsened = True
elif tree.get_node_type()=='LEAF':
#
# Already a leaf: no need to coarsen
#
coarsened = True
while not coarsened:
if subforest_flag is not None:
if tree.has_children(subforest_flag):
for child in tree.get_children():
else:
pass
for child in tree.get_children():
pass
else:
#
# Don't coarsen yet, go to children
#
if tree.has_children(subforest_flag):
for child in tree.get_children():
pass
if clean_up:
to_clean = []
#
# Look for marked leaves within the submesh
#
for leaf in self.get_leaves(subforest_flag=subforest_flag):
#
# During coarsening, some leaves may already be unmarked
#
if debug:
print('leaf info')
leaf.info()
if subforest_flag is not None:
if not leaf.is_marked(subforest_flag):
continue
#
# Find nodes that must be coarsened
#
if not leaf.has_parent():
if debug:
print('ROOT Node')
#
# Leaf without parent is a ROOT: must be part of the new mesh.
#
if new_label is not None:
#
# Mark leaf with new_label
#
leaf.mark(new_label)
if clean_up and coarsening_flag is not None:
#
# Remove coarsening flag
#
to_clean.append(leaf)
# On to the next leaf
continue
#
# Can get parent
#
parent = leaf.get_parent()
if debug:
print('LEAF has parent')
parent.info()
#
# Determine whether to coarsen
#
if coarsening_flag is None:
coarsen = True
elif parent.is_marked(coarsening_flag):
coarsen = True
if clean_up:
#
# Remove coarsening flag
#
parent.unmark(coarsening_flag, recursive=True)
else:
coarsen = False
if debug:
print('Coarsen', coarsen)
if not coarsen:
#
# Don't coarsen
#
if new_label is not None:
#
# Apply new label to leaf and siblings
#
for child in parent.get_children():
child.mark(new_label)
# Move to the next LEAF
continue
else:
#
# Coarsen
#
if subforest_flag is None and new_label is None:
#
# Delete marked node's children
#
parent.delete_children()
if debug:
print('Deleting children')
parent.info()
elif new_label is None:
#
# Remove 'subforest_label' from leaf and siblings
#
for child in parent.get_children():
child.unmark(subforest_flag)
if debug:
print('Removing subforest_flag')
for child in parent.get_children():
print(child.is_marked(subforest_flag))
else:
#
# Mark parents with new_label
#
parent.mark(new_label)
if debug:
print('Marking parent with new label')
parent.info()
if clean_up and coarsening_flag is not None:
#
# Remove coarsening flag
#
parent.unmark(coarsening_flag)
if debug:
print('removing flag', coarsening_flag)
parent.info()
#
# Apply new label to coarsened submesh if necessary
#
if new_label is not None:
self.root_subtrees(new_label)
"""
def refine(self, subforest_flag=None, refinement_flag=None, new_label=None,
clean_up=True):
"""
Refine (sub)forest (delimited by 'subforest_flag'), by (possibly)
splitting (subforest)nodes with refinement_flag and marking their
children (with new_label).
Inputs:
subforest_flag: flag, used to specify the subforest being refined
refinement_flag: flag, specifying the nodes within the submesh that
are being refined.
new_label: flag, new label to be applied to refined submesh
clean_up: bool, remove the "refinement_flag" once the cell is split.
"""
#
# Ensure that the subforest is rooted
#
if subforest_flag is not None:
self.root_subtrees(subforest_flag)
#
# Look for marked leaves within the submesh
#
for leaf in self.get_leaves(subforest_flag=subforest_flag):
#
# Mark tree with new label to ensure new forest contains old subforest
#
if new_label is not None:
leaf.mark(new_label)
#
# If the refinement flag is used, ensure that the node is marked
# before continuing.
#
if refinement_flag is not None:
if not leaf.is_marked(refinement_flag):
continue
#
# Add new children if necessary
#
if not leaf.has_children():
leaf.split()
#
# Label each (new) child
#
for child in leaf.get_children():
if new_label is None and subforest_flag is None:
#
# No labels specified: do nothing
#
continue
elif new_label is None:
#
# No new label given, use the subforest label
#
child.mark(subforest_flag)
else:
#
# New label given, mark child with new label
#
child.mark(new_label)
#
# Remove refinement flag
#
if refinement_flag is not None and clean_up:
leaf.unmark(refinement_flag)
#
# Label ancestors of newly labeled children
#
if new_label is not None:
self.root_subtrees(new_label)
class Vertex(object):
"""
Description:
Attributes:
coordinates: double, tuple (x,y)
flag: boolean
Methods:
"""
def __init__(self, coordinates):
"""
Description: Constructor
Inputs:
coordinates: double tuple, x- and y- coordinates of vertex
on_boundary: boolean, true if on boundary
"""
if isinstance(coordinates, numbers.Real):
#
# Coordinate passed as a real number 1D
#
dim = 1
coordinates = (coordinates,) # recast coordinates as tuple
elif type(coordinates) is tuple:
#
# Coordinate passed as a tuple
#
dim = len(coordinates)
assert dim <= 2, 'Only 1D and 2D meshes supported.'
else:
raise Exception('Enter coordinates as a number or a tuple.')
self.__coordinate = coordinates
self._flags = set()
self.__dim = dim
self.__periodic_pair = set()
self.__is_periodic = False
def coordinates(self):
"""
Return coordinates tuple
"""
return self.__coordinate
def dim(self):
"""
Return the dimension of the vertex
"""
return self.__dim
def mark(self, flag=None):
"""
Mark Vertex
Inputs:
flag: int, optional label
"""
if flag is None:
self._flags.add(True)
else:
self._flags.add(flag)
def unmark(self, flag=None):
"""
Unmark Vertex
Inputs:
flag: label to be removed
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
def is_marked(self,flag=None):
"""
Check whether Vertex is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
def is_periodic(self):
"""
Determine whether a Vertex lies on a periodic boundary
"""
return self.__is_periodic
def set_periodic(self, periodic=True):
"""
Label vertex periodic
"""
self.__is_periodic = periodic
def set_periodic_pair(self, cell_vertex_pair):
"""
Pair a periodic vertex with its periodic counterpart. The periodic
pair can be accessed by specifying the neighboring interval (in 1D)
or cell (in 2D).
Inputs:
half_edge: HalfEdge/Interval
In 1D: half_edge represents the Interval on which the vertex pair resides
In 2D: half_edge represents the HalfEdge on which the vertex itself resides
vertex: Vertex associated with
See also: get_periodic_pair
"""
assert self.is_periodic(), 'Vertex should be periodic.'
if self.dim()==1:
#
# 1D: There is only one pairing for the entire mesh
#
interval, vertex = cell_vertex_pair
assert isinstance(vertex, Vertex), \
'Input "vertex" should be of class "Vertex".'
assert isinstance(interval, Interval), \
'Input "interval" should be of class "Interval".'
assert vertex.is_periodic(), \
'Input "vertex" should be periodic.'
#
# 1D: Store periodic pair
#
self.__periodic_pair.add((interval, vertex))
elif self.dim()==2:
#
# 2D
#
c_nb, v_nb = cell_vertex_pair
assert isinstance(v_nb, Vertex), \
'Input "cell_vertex_pair[1]" should be of class "Vertex".'
assert isinstance(c_nb, Cell), \
'Input "cell_vertex_pair[0]" should be of class "HalfEdge".'
assert v_nb.is_periodic(), \
'Input "cell_vertex_pair[1]" should be periodic.'
#
# Collect all possible c/v pairs in a set
#
cell_vertex_pairs = v_nb.get_periodic_pair().union(set([cell_vertex_pair]))
assert len(cell_vertex_pairs)!=0, 'Set of pairs should be nonempty'
for c_nb, v_nb in cell_vertex_pairs:
#
# Check whether v_nb already in list
#
in_list = False
for c, v in self.get_periodic_pair():
if v==v_nb and c.contains(c_nb):
#
# Vertex already appears in list
#
in_list = True
break
if not in_list:
#
# Not in list, add it
#
self.__periodic_pair.add((c_nb, v_nb))
def get_periodic_pair(self, cell=None):
"""
Returns the other vertex that is mapped onto self through periodicity
Input:
cell: Cell/HalfEdge in which paired vertex resides
"""
if cell is None:
#
# Return all cell, vertex pairs
#
return self.__periodic_pair
else:
#
# Return all paired vertices within a given cell
#
vertices = [v for c, v in self.__periodic_pair if c==cell]
return vertices
class HalfEdge(Tree):
"""
Description: Half-Edge in Quadtree mesh
Attributes:
base: Vertex, at base
head: Vertex, at head
twin: HalfEdge, in adjoining cell pointing from head to base
cell: QuadCell, lying to half edge's left
Methods:
"""
def __init__(self, base, head, cell=None, previous=None, nxt=None,
twin=None, parent=None, position=None, n_children=2,
regular=True, forest=None, flag=None, periodic=False):
"""
Constructor
Inputs:
base: Vertex, at beginning
head: Vertex, at end
parent: HalfEdge, parental
cell: QuadCell, lying to the left of half edge
previous: HalfEdge, whose head is self's base
nxt: HalfEdge, whose base is self's head
twin: Half-Edge, in adjoining cell pointing from head to base
position: int, position within parental HalfEdge
n_children: int, number of sub-HalfEdges
regular: bool, do all tree subnodes have the same no. of children?
forest: Forest, clever list of trees containing self
flag: (set of) int/string/bool, used to mark half-edge
periodic: bool, True if HalfEdge lies on a periodic boundary
"""
#
# Initialize Tree structure
#
Tree.__init__(self, n_children=n_children, regular=regular,
parent=parent, position=position, forest=forest, flag=flag)
#
# Assign head and base
#
self.set_vertices(base, head)
#
# Check parent
#
if parent is not None:
assert isinstance(parent, HalfEdge), \
'Parent should be a HalfEdge.'
#
# Assign incident cell
#
if cell is not None:
assert isinstance(cell, Cell), \
'Input "cell" should be a Cell object.'
self.__cell = cell
#
# Assign previous half-edge
#
if previous is not None:
assert isinstance(previous, HalfEdge), \
'Input "previous" should be a HalfEdge object.'
assert self.base()==previous.head(),\
'Own base should equal previous head.'
self.__previous = previous
#
# Assign next half-edge
#
if nxt is not None:
assert isinstance(nxt, HalfEdge), \
'Input "nxt" should be a HalfEdge object.'
assert self.head()==nxt.base(), \
'Own head should equal base of next.'
self.__next = nxt
#
# Mark periodic
#
self.__is_periodic = periodic
#
# Assign twin half-edge
#
if twin is not None:
assert isinstance(twin, HalfEdge), \
'Input "twin" should be a HalfEdge object.'
self.assign_twin(twin)
else:
self.__twin = None
def is_periodic(self):
"""
Returns True is the HalfEdge lies on a periodic boundary
"""
return self.__is_periodic
def set_periodic(self, periodic=True):
"""
Flag HalfEdge as periodic
"""
self.__is_periodic = periodic
def pair_periodic_vertices(self):
"""
Pair up HalfEdge vertices that are periodic
"""
if self.is_periodic():
#
# Pair up periodic vertices along half_edge
#
cell = self.cell()
cell_nb = self.twin().cell()
assert cell_nb is not None,\
'Periodic HalfEdge: Neighboring cell should not be None.'
#
# Pair up adjacent vertices
#
for v, v_nb in [(self.base(), self.twin().head()),
(self.head(), self.twin().base())]:
# Label vertices 'periodic'
v.set_periodic()
v_nb.set_periodic()
# Add own vertex-cell pair to own set of periodic pairs
v.set_periodic_pair((cell, v))
v_nb.set_periodic_pair((cell_nb, v_nb))
# Add adjoining vertex-cell pair to set of periodic pairs
v.set_periodic_pair((cell_nb, v_nb))
v_nb.set_periodic_pair((cell, v))
def base(self):
"""
Returns half-edge's base vertex
"""
return self.__base
def head(self):
"""
Returns half-edge's head vertex
"""
return self.__head
def get_vertices(self):
"""
Returns all half-edge vertices
"""
return [self.__base, self.__head]
def set_vertices(self, base, head):
"""
Define base and head vertices
"""
assert isinstance(base, Vertex) and isinstance(head, Vertex),\
'Inputs "base" and "head" should be Vertex objects.'
self.__base = base
self.__head = head
def cell(self):
"""
Returns the cell containing half-edge
"""
return self.__cell
def assign_cell(self, cell):
"""
Assign cell to half-edge
"""
self.__cell = cell
def twin(self):
"""
Returns the half-edge's twin
"""
return self.__twin
def assign_twin(self, twin):
"""
Assigns twin to half-edge
"""
if not self.is_periodic():
assert self.base()==twin.head() and self.head()==twin.base(),\
'Own head vertex should be equal to twin base vertex & vice versa.'
self.__twin = twin
def delete_twin(self):
"""
Deletes half-edge's twin
"""
self.__twin = None
def make_twin(self):
"""
Construct a twin HalfEdge
"""
assert not self.is_periodic(), \
'Twin HalfEdge of a periodic HalfEdge may have different vertices.'
if self.has_parent() and self.get_parent().twin() is not None:
twin_parent = self.get_parent().twin()
twin_position = 1-self.get_node_position()
else:
twin_parent = None
twin_position = None
twin = HalfEdge(self.head(), self.base(), parent=twin_parent,
position=twin_position)
self.assign_twin(twin)
twin.assign_twin(self)
return twin
def next(self):
"""
Returns the next half-edge, whose base is current head
"""
return self.__next
def assign_next(self, nxt):
"""
Assigns half edge to next
"""
if nxt is None:
return
else:
if not self.is_periodic():
assert self.head() == nxt.base(), \
'Own head vertex is not equal to next base vertex.'
self.__next = nxt
if nxt.previous() != self:
nxt.assign_previous(self)
def previous(self):
"""
Returns previous half-edge, whose head is current base
"""
return self.__previous
def assign_previous(self, previous):
"""
Assigns half-edge to previous
"""
if previous is None:
return
else:
if not self.is_periodic():
assert self.base() == previous.head(), \
'Own base vertex is not equal to previous head vertex.'
self.__previous = previous
if previous.next()!=self:
previous.assign_next(self)
def split(self):
"""
Refine current half-edge (overwrite Tree.split)
Note:
This function could potentially be generalized to HalfEdges with
multiple children (already implemented for Intervals).
"""
#
# Check if twin has been split
#
twin_split = False
twin = self.twin()
if twin is not None and twin.has_children():
t0, t1 = twin.get_children()
twin_split = True
else:
t0, t1 = None, None
#
# Determine whether to inherit midpoint vertex
#
if twin_split and not self.is_periodic():
#
# Share twin's midpoint Vertex
#
vm = t0.head()
else:
#
# Compute new midpoint vertex
#
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
xm = 0.5*(x[0,:]+x[1,:])
vm = Vertex(tuple(xm))
#
# Define own children and combine with twin children
#
c0 = HalfEdge(self.base(), vm, parent=self, twin=t1, position=0, periodic=self.is_periodic())
c1 = HalfEdge(vm, self.head(), parent=self, twin=t0, position=1, periodic=self.is_periodic())
#
# Assign new HalfEdges to twins if necessary
#
if twin_split:
t0.assign_twin(c1)
t1.assign_twin(c0)
#
# Save the babies
#
self._children[0] = c0
self._children[1] = c1
def to_vector(self):
"""
Returns the vector associated with the HalfEdge
"""
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
return x[1,:] - x[0,:]
def length(self):
"""
Returns the HalfEdge's length
"""
return np.linalg.norm(self.to_vector())
def unit_normal(self):
"""
Returns the unit normal vector of HalfEdge, pointing to the right
Note: This only works in 2D
"""
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
u = np.array([y1-y0, x0-x1])
return u/np.linalg.norm(u, 2)
def contains_points(self, points):
"""
Determine whether points lie on a HalfEdge
Inputs:
points: double,
"""
tol = 1e-10
x0 = convert_to_array(self.base().coordinates())
v = self.to_vector()
dim = x0.shape[1]
p = convert_to_array(points, dim)
n_points = p.shape[0]
in_half_edge = np.ones(n_points, dtype=np.bool)
if np.abs(v[0])<tol:
#
# Vertical line
#
assert np.abs(v[1])>tol, 'Half-edge is too short'
# Locate y-coordinate along segment
t = (p[:,1]-x0[:,1])/v[1]
# Discard points whose location parameter t is not in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
# Discard points whose x-values don't lie on Edge
in_half_edge[np.abs(p[:,0]-x0[0,0])>tol] = False
elif dim==1 or np.abs(v[1]<1e-14):
#
# Horizontal line
#
assert np.abs(v[0])>tol, 'Half-edge is too short'
# Locate x-coordinate along line
t = (p[:,0]-x0[:,0])/v[0]
# Check that t in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
if dim > 1:
# Check distance between y-values
in_half_edge[np.abs(p[:,1]-x0[0,1])>tol] = False
else:
#
# Skew line
#
s = (p[:,0]-x0[:,0])/v[0]
t = (p[:,1]-x0[:,1])/v[1]
# Check coordinates have same location parameters
in_half_edge[np.abs(t-s)>tol] = False
# Check that location parameter lies in [0,1]
in_half_edge[np.abs(t-0.5)>0.5] = False
return in_half_edge
def intersects_line_segment(self, line):
"""
Determine whether the HalfEdge intersects with a given line segment
Input:
line: double, list of two tuples
Output:
boolean, true if intersection, false otherwise.
Note: This only works in 2D
"""
# Express edge as p + t*r, t in [0,1]
p = np.array(self.base().coordinates())
r = np.array(self.head().coordinates()) - p
# Express line as q + u*s, u in [0,1]
q = np.array(line[0])
s = np.array(line[1]) - q
if abs(np.cross(r,s)) < 1e-14:
#
# Lines are parallel
#
if abs(np.cross(q-p,r)) < 1e-14:
#
# Lines are collinear
#
t0 = np.dot(q-p,r)/np.dot(r,r)
t1 = t0 + np.dot(s,r)/np.dot(r,r)
if (max(t0,t1) >= 0) and (min(t0,t1) <= 1):
#
# Line segments overlap
#
return True
else:
return False
else:
#
# Lines not collinear
#
return False
else:
#
# Lines not parallel
#
t = np.cross(q-p,s)/np.cross(r,s)
u = np.cross(p-q,r)/np.cross(s,r)
if 0 <= t <= 1 and 0 <= u <= 1:
#
# Line segments meet
#
return True
else:
return False
def reference_map(self, x_in, mapsto='physical',
jac_p2r=False, jac_r2p=False,
hess_p2r=False, hess_r2p=False):
"""
Map points x from the reference interval to the physical HalfEdge or
vice versa.
Inputs:
x_in: double, (n,) array or a list of points to be mapped.
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of physical-to-reference jacobians
jac_r2p: double, n-list of reference-to-physical jacobians
hess_p2r: double, n-list of physical-to-reference hessians
hess_r2p: double, n-list of reference-to-phyiscal hessians
"""
#
# Preprocessing
#
if mapsto=='physical':
#
# Check that input is an array
#
assert type(x_in) is np.ndarray, \
'If "mapsto" is "physical", then input should '+\
'be an array.'
#
# Check that points contained in [0,1]
#
assert x_in.max()>=0 and x_in.min()<=1, \
'Reference point should be between 0 and 1.'
elif mapsto=='reference':
x_in = convert_to_array(x_in, dim=self.head().dim())
#
# Check that points lie on the HalfEdge
#
assert all(self.contains_points(x_in)), \
'Some points are not contained in the HalfEdge.'
#
# Compute mapped points
#
n = x_in.shape[0]
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
if mapsto == 'physical':
x_trg = [(x0 + (x1-x0)*xi, y0 + (y1-y0)*xi) for xi in x_in]
elif mapsto == 'reference':
if not np.isclose(x0, x1):
#
# Not a vertical line
#
x_trg = list((x_in[:,0]-x0)/(x1-x0))
elif not np.isclose(y0, y1):
#
# Not a horizontal line
#
x_trg = list((x_in[:,1]-y0)/(y1-y0))
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [np.array([[x1-x0],[y1-y0]])]*n
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
# TODO: Shouldn't this also be a list?
mg['jac_p2r'] = np.array([[1/(x1-x0), 1/(y1-y0)]])
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
mg['hess_r2p'] = [np.zeros((2,2))]*n
if hess_p2r:
#
# Hessian of mappring from physical to reference region
#
mg['hess_p2r'] = [np.zeros((2,2))]*n
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
"""
# TODO: Remove this...
#
# Compute the Jacobian
#
if jacobian:
if mapsto == 'physical':
#
# Derivative of mapping from refence to physical cell
#
jac = [np.array([[x1-x0],[y1-y0]])]*n
elif mapsto == 'reference':
#
# Derivative of inverse map
#
jac = np.array([[1/(x1-x0), 1/(y1-y0)]])
#
# Compute the Hessian (linear mapping, so Hessian = 0)
#
hess = np.zeros((2,2))
#
# Return output
#
if jacobian and hessian:
return x_trg, jac, hess
elif jacobian and not hessian:
return x_trg, jac
elif hessian and not jacobian:
return x_trg, hess
else:
return x_trg
"""
class Interval(HalfEdge):
"""
Interval Class (1D equivalent of a Cell)
"""
def __init__(self, vertex_left, vertex_right, n_children=2, \
regular=True, parent=None, position=None, forest=None, \
periodic=False):
"""
Constructor
"""
assert vertex_left.dim()==1 and vertex_right.dim()==1, \
'Input "half_edge" should be one dimensional.'
HalfEdge.__init__(self, vertex_left, vertex_right, \
n_children=n_children, regular=regular,\
parent=parent, position=position, forest=forest,\
periodic=periodic)
def get_vertices(self):
"""
Return interval endpoints
"""
return [self.base(), self.head()]
def get_vertex(self, position):
"""
Return a given vertex
"""
assert position in [0,1], 'Position should be 0 or 1.'
return self.base() if position==0 else self.head()
def assign_previous(self, prev):
"""
Assign a previous interval
"""
if prev is not None:
assert isinstance(prev, Interval), \
'Input "prev" should be an Interval.'
HalfEdge.assign_previous(self, prev)
def assign_next(self, nxt):
"""
Assign the next interval
"""
if nxt is not None:
assert isinstance(nxt, Interval), \
'Input "nxt" should be an Interval.'
HalfEdge.assign_next(self,nxt)
def get_neighbor(self, pivot, subforest_flag=None, mode='physical'):
"""
Returns the neighboring interval
Input:
pivot: int, 0 (=left) or 1 (=right) or Vertex
subforest_flag (optional): marker to specify submesh
mode: str, specify the type of neighbor search. When intervals are
arranged within a forest, two adjoining intervals may be on
different refinement levels.
mode='physical': return the interval adjoining input interval
on the mesh
mode='level-wise': return the neighboring interval on the same
level in the forest.
"""
#
# Pivot is a vertex
#
if isinstance(pivot, Vertex):
if pivot==self.base():
pivot = 0
elif pivot==self.head():
pivot = 1
else:
raise Exception('Vertex not an interval endpoint')
if mode=='level-wise':
# =================================================================
# Return Level-wise Neighbor
# =================================================================
if pivot == 0:
#
# Left neighbor
#
nbr = self.previous()
if nbr is None:
#
# No previous, may still be periodic
#
v = self.base()
if v.is_periodic():
#
# Get coarsest cell periodically associated with v
#
for pair in v.get_periodic_pair():
nbr, dummy = pair
while nbr.get_depth()<self.get_depth():
#
# Search children until depth matches
#
if nbr.has_children(flag=subforest_flag):
nbr = nbr.get_child(0)
else:
#
# There are no children at same depth as interval
#
return None
#
# Found nbr at correct depth
#
return nbr
else:
#
# Return previous interval
#
return nbr
elif pivot == 1:
#
# Right neighbor
#
nbr = self.next()
if nbr is None:
#
# No next, may still be periodic
#
v = self.head()
if v.is_periodic():
#
# Get coarsest cell periodically associated with v
#
for pair in v.get_periodic_pair():
nbr, dummy = pair
while nbr.get_depth()<self.get_depth():
#
# Iterate through children until depth matches
#
if nbr.has_children(flag=subforest_flag):
nbr = nbr.get_child(1)
else:
#
# There are no cells matching cell's depth
#
return None
#
# Found nbr at correct depth
#
return nbr
else:
#
# Return next interval
#
return nbr
elif mode=='physical':
# =================================================================
# Return Physical Neighbor
# =================================================================
#
# Move left or right
#
if pivot == 0:
#
# Left neighbor
#
itv = self
prev = itv.previous()
#
# Go up the tree until there is a "previous"
#
while prev is None:
if itv.has_parent():
#
# Go up one level and check
#
itv = itv.get_parent()
prev = itv.previous()
else:
#
# No parent: check whether vertex is periodic
#
if itv.base().is_periodic():
for pair in itv.base().get_periodic_pair():
prev, dummy = pair
else:
return None
#
# Go down tree (to the right) as far as you can
#
nxt = prev
while nxt.has_children(flag=subforest_flag):
nxt = nxt.get_child(nxt.n_children()-1)
return nxt
elif pivot==1:
#
# Right neighbor
#
itv = self
nxt = itv.next()
#
# Go up the tree until there is a "next"
#
while nxt is None:
if itv.has_parent():
#
# Go up one level and check
#
itv = itv.get_parent()
nxt = itv.next()
else:
#
# No parent: check whether vertex is periodic
#
if itv.head().is_periodic():
for nxt, dummy in itv.head().get_periodic_pair():
pass
else:
return None
#
# Go down tree (to the left) as far as you can
#
prev = nxt
while prev.has_children(flag=subforest_flag):
prev = prev.get_child(0)
return prev
def split(self, n_children=None):
"""
Split a given interval into subintervals
"""
#
# Determine interval endpoints
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
n = self.n_children()
#
# Loop over children
#
for i in range(n):
#
# Determine children base and head Vertices
#
if i==0:
base = self.base()
if i==n-1:
head = self.head()
else:
head = Vertex(x0+(i+1)*(x1-x0)/n)
#
# Define new child interval
#
subinterval = Interval(base, head, parent=self, \
regular=self.is_regular(),\
position=i, n_children=n_children)
#
# Store in children
#
self._children[i] = subinterval
#
# The head of the current subinterval
# becomes the base of the next one
base = subinterval.head()
#
# Assign previous/next
#
for child in self.get_children():
i = child.get_node_position()
#
# Assign previous
#
if i != 0:
# Middle children
child.assign_previous(self.get_child(i-1))
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine the set of smallest subintervals (within submesh) that
contain the set of points, as well as the indices of these.
Inputs:
points: set of points
i_points: indices of these points
subforest_flag: submesh flag
Outputs:
bins: (cell, index) tuples of cells containing subsets of the
points, and the points' indices.
"""
assert all(self.contains_points(points)), \
'Not all points contained in cell'
sf = subforest_flag
# Convert points to array
x = convert_to_array(points)
if i_points is None:
i_points = np.arange(x.shape[0])
bins = []
#
# Cell is not in submesh
#
if not (sf is None or self.is_marked(flag=sf)):
#
# Move up tree until in submesh
#
if self.has_parent():
cell = self.get_parent()
bins.extend(cell.bin_points(x, i_points, subforest_flag=sf))
return bins
#
# Cell in submesh
#
if self.has_children(flag=sf):
#
# Points must be contained in some child cells
#
for child in self.get_children(flag=sf):
in_cell = child.contains_points(x)
if any(in_cell):
# Extract the points in child and bin
y = x[in_cell]
i_y = i_points[in_cell]
c_bin = child.bin_points(y,i_y, subforest_flag=sf)
bins.extend(c_bin)
# Remove points contained in child from list
x = x[~in_cell]
i_points = i_points[~in_cell]
else:
#
# Base case
#
bins.append((self, i_points))
return bins
return bins
def contains_points(self, points):
"""
Determine which of the points in x are contained in the interval.
Inputs:
points: double, collection of 1D points
Outputs:
in_cell: bool, (n_points,) array whose ith entry is True if point i
is contained in interval, False otherwise.
"""
# Get interval enpoints
x0, = self.base().coordinates()
x1, = self.head().coordinates()
# Convert points to (n_points,1) array
x = convert_to_array(points,1)
in_cell = np.ones(x.shape, dtype=bool)
in_cell[x<x0] = False
in_cell[x>x1] = False
return in_cell.ravel()
def reference_map(self, x_in, mapsto='physical',
jac_r2p=False, jac_p2r=False,
hess_r2p=False, hess_p2r=False,
jacobian=False, hessian=False):
"""
Map points x from the reference to the physical Interval or vice versa
Inputs:
x_in: double, (n,) array or a list of points to be mapped
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of physical-to-reference jacobians
jac_r2p: double, n-list of reference-to-physical jacobians
hess_p2r: double, n-list of physical-to-reference hessians
hess_r2p: double, n-list of reference-to-phyiscal hessians
"""
#
# Convert input to array
#
x_in = convert_to_array(x_in,dim=1)
#
# Compute mapped points
#
n = len(x_in)
x0, = self.get_vertex(0).coordinates()
x1, = self.get_vertex(1).coordinates()
#
# Compute target point
#
if mapsto == 'physical':
x_trg = x0 + (x1-x0)*x_in
elif mapsto == 'reference':
x_trg = (x_in-x0)/(x1-x0)
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [(x1-x0)]*n
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
#
mg['jac_p2r'] = [1/(x1-x0)]*n
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
mg['hess_r2p'] = list(np.zeros(n))
if hess_p2r:
#
# Hessian of mappring from physical to reference region
#
mg['hess_p2r'] = list(np.zeros(n))
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
# TODO: Remove whatever is underneath
if jacobian:
if mapsto == 'physical':
#
# Derivative of mapping from refence to physical cell
#
jac = [(x1-x0)]*n
elif mapsto == 'reference':
#
# Derivative of inverse map
#
jac = [1/(x1-x0)]*n
#
# Compute the Hessian (linear mapping, so Hessian = 0)
#
hess = list(np.zeros(n))
#
# Return output
#
if jacobian and hessian:
return x_trg, jac, hess
elif jacobian and not hessian:
return x_trg, jac
elif hessian and not jacobian:
return x_trg, hess
else:
return x_trg
class Cell(Tree):
"""
Cell object: A two dimensional polygon
"""
def __init__(self, half_edges, n_children=0, parent=None, position=None, grid=None):
"""
Constructor
Inputs:
half_edges: HalfEdge, list of half-edges that determine the cell
n_children: int, number of sub-cells within cell
"""
Tree.__init__(self, n_children=n_children, parent=parent, \
position=position, forest=grid)
# =====================================================================
# Half-Edges
# =====================================================================
assert type(half_edges) is list, 'Input "half_edges" should be a list.'
#
# 2D Cells are constructed from lists of HalfEdges
#
for he in half_edges:
assert isinstance(he, HalfEdge), 'Not a HalfEdge.'
self._half_edges = half_edges
for he in self._half_edges:
# Assign self as incident cell
he.assign_cell(self)
#
# String half-edges together
#
n_hes = self.n_half_edges()
for i in range(n_hes):
he_nxt = self._half_edges[(i+1)%n_hes]
he_cur = self._half_edges[i]
he_cur.assign_next(he_nxt)
he_nxt.assign_previous(he_cur)
#
# Check that base of first halfedge coincides with head of last
#
assert half_edges[0].base()==half_edges[-1].head(),\
'HalfEdges should form a closed loop.'
#
# Check winding order
#
self.check_winding_order()
def n_half_edges(self):
"""
Return the number of half_edges
"""
return len(self._half_edges)
def get_half_edge(self, position):
"""
Return specific half_edge
"""
assert position>=0 and position<self.n_half_edges(),\
'Input "position" incompatible with number of HalfEdges'
return self._half_edges[position]
def get_half_edges(self):
"""
Iterate over half-edges
"""
return self._half_edges
def incident_half_edge(self, vertex, reverse=False):
"""
Returns the edge whose head (base) is the given vertex
"""
assert isinstance(vertex, Vertex), \
'Input "vertex" should be of type Vertex.'
for half_edge in self.get_half_edges():
if reverse:
#
# HalfEdge's base coincides with vertex
#
if half_edge.base()==vertex:
return half_edge
else:
#
# HalfEdge's head coincides with vertex
#
if half_edge.head()==vertex:
return half_edge
#
# No such HalfEdge
#
return None
def area(self):
"""
Determine the area of the polygon
"""
area = 0
for half_edge in self.get_half_edges():
x0, y0 = half_edge.base().coordinates()
x1, y1 = half_edge.head().coordinates()
area += (x0+x1)*(y1-y0)
return 0.5*area
def bounding_box(self):
"""
Returns the cell's bounding box in the form of a tuple (x0,x1,y0,y1),
so that the cell is contained in the rectangle [x0,x1]x[y0,y1]
"""
xy = convert_to_array(self.get_vertices(), 2)
x0 = np.min(xy[:,0], axis=0)
x1 = np.max(xy[:,0], axis=0)
y0 = np.min(xy[:,1], axis=0)
y1 = np.max(xy[:,1], axis=0)
return x0, x1, y0, y1
def check_winding_order(self):
"""
Check whether the winding order is correct
"""
winding_error = 'Cell vertices not ordered correctly.'
area = self.area()
assert area > 0, winding_error
def n_vertices(self):
"""
Return the number of vertices
"""
return self.n_half_edges()
def get_vertex(self, position):
"""
Return a specific vertex
"""
assert position < self.n_vertices(), 'Input "position" incorrect.'
half_edge = self.get_half_edge(position)
return half_edge.base()
def get_vertices(self):
"""
Returns the vertices of the current cell.
Outputs:
vertices: list of vertices
"""
return [half_edge.base() for half_edge in self.get_half_edges()]
def get_neighbors(self, pivot, flag=None):
"""
Returns all neighboring cells about a given pivot
Input:
pivot: Vertex/HalfEdge,
- If the pivot is a HalfEdge, then neighbors are cells
containing the twin HalfEdge
- If it's a Vertex, then the neighbors are all cells (of
the "same" size) that contain the vertex
flag: marker - only return neighbors with given marker
Output:
neighbor(s):
- If the pivot is a HalfEdge, then return a Cell/None
- If the pivot is a Vertex, then return a list of Cells
Note: Neighbors are chosen via shared edges, which means
Not OK, Ok + is a neighbor of o, but x is not
----- ----- -------------
| x | | x | | + | |
---*---- ----*---- ----- x
| x | | x | x | | o | |
----- --------- -------------
"""
if isinstance(pivot, HalfEdge):
# =================================================================
# Direction is given by a HalfEdge
# =================================================================
twin = pivot.twin()
if twin is not None:
#
# Halfedge has a twin
#
neighbor = twin.cell()
if flag is not None:
if neighbor.is_marked(flag):
return neighbor
else:
return None
else:
return neighbor
elif isinstance(pivot, Vertex):
# =================================================================
# Direction is determined by a Vertex
# =================================================================
#
# Anti-clockwise
#
neighbors = []
cell = self
while True:
#
# Get neighbor
#
half_edge = cell.incident_half_edge(pivot)
neighbor = cell.get_neighbors(half_edge)
#
# Move on
#
if neighbor is None:
break
elif neighbor==self:
#
# Full rotation or no neighbors
#
return neighbors
else:
#
# Got at neighbor!
#
neighbors.append(neighbor)
cell = neighbor
if pivot.is_periodic() and len(pivot.get_periodic_pair(cell))!=0:
pivot = pivot.get_periodic_pair(cell)[0]
#
# Clockwise
#
neighbors_clockwise = []
cell = self
while True:
#
# Get neighbor
#
half_edge = cell.incident_half_edge(pivot, reverse=True)
neighbor = cell.get_neighbors(half_edge)
#
# Move on
#
if neighbor is None:
break
elif neighbor==self:
#
# Full rotation or no neighbors
#
return neighbors
else:
#
# Got a neighbor
#
neighbors_clockwise.append(neighbor)
cell = neighbor
if pivot.is_periodic() and len(pivot.get_periodic_pair(cell))!=0:
pivot = pivot.get_periodic_pair(cell)[0]
#
# Combine clockwise and anticlockwise neighbors
#
neighbors.extend(reversed(neighbors_clockwise))
if flag is not None:
return [nb for nb in neighbors if nb.is_marked(flag)]
else:
return neighbors
def contains_points(self, points, tol=1e-10):
"""
Determine whether the given cell contains a point
Input:
point: tuple (x,y), list of tuples, or (n,2) array
Output:
in_cell: boolean array (n,1), True if cell contains points,
False otherwise
"""
xy = convert_to_array(points, 2)
x,y = xy[:,0], xy[:,1]
n_points = len(x)
in_cell = np.ones(n_points, dtype=np.bool)
for half_edge in self.get_half_edges():
#
# Traverse vertices in counter-clockwise order
#
x0, y0 = half_edge.base().coordinates()
x1, y1 = half_edge.head().coordinates()
# Determine which points lie outside cell
pos_means_left = (y-y0)*(x1-x0)-( x-x0)*(y1-y0)
in_cell[pos_means_left<-tol] = False
"""
if len(in_cell)==1:
return in_cell[0]
else:
return in_cell
"""
return in_cell
def intersects_line_segment(self, line):
"""
Determine whether cell intersects with a given line segment
Input:
line: double, list of two tuples (x0,y0) and (x1,y1)
Output:
intersects: bool, true if line segment and cell intersect
Modified: 06/04/2016
"""
#
# Check whether line is contained in rectangle
#
if all(self.contains_points([line[0], line[1]])):
return True
#
# Check whether line intersects with any cell half_edge
#
for half_edge in self.get_half_edges():
if half_edge.intersects_line_segment(line):
return True
#
# If function has not terminated yet, there is no intersection
#
return False
class QuadCell(Cell, Tree):
"""
Quadrilateral cell
"""
def __init__(self, half_edges, parent=None, position=None, grid=None):
"""
Constructor
"""
assert len(half_edges)==4, 'QuadCells contain only 4 HalfEdges.'
Cell.__init__(self, half_edges, n_children=4, parent=parent,
position=position, grid=grid)
#
# Check whether cell's parent is a rectangle
#
if self.has_parent():
is_rectangle = self.get_parent().is_rectangle()
elif self.in_forest() and self.get_forest().is_rectangular:
is_rectangle = True
else:
is_rectangle = True
for i in range(4):
he = half_edges[i].to_vector()
he_nxt = half_edges[(i+1)%4].to_vector()
on_axis = min(abs(he)) <1e-12
perpendicular = abs(np.dot(he, he_nxt)) < 1e-12
if not (perpendicular and on_axis):
is_rectangle = False
break
self._is_rectangle = is_rectangle
def is_rectangle(self):
"""
Is the cell a rectangle?
"""
return self._is_rectangle
def split(self, flag=None):
"""
Split QuadCell into 4 subcells (and mark children with flag)
"""
assert not self.has_children(), 'Cell already split.'
#
# Middle Vertex
#
xx = convert_to_array(self.get_vertices())
v_m = Vertex((np.mean(xx[:,0]),np.mean(xx[:,1])))
interior_half_edges = []
for half_edge in self.get_half_edges():
#
# Split each half_edge
#
if not half_edge.has_children():
half_edge.split()
#
# Form new HalfEdges to and from the center
#
h_edge_up = HalfEdge(half_edge.get_child(0).head(),v_m)
h_edge_down = h_edge_up.make_twin()
# Add to list
interior_half_edges.append([h_edge_up, h_edge_down])
#
# Form new cells using new half_edges
#
i = 0
for half_edge in self.get_half_edges():
#
# Define Child's HalfEdges
#
h1 = half_edge.get_child(0)
h2 = interior_half_edges[i][0]
h3 = interior_half_edges[(i-1)%self.n_half_edges()][1]
h4 = half_edge.previous().get_child(1)
hes = deque([h1, h2, h3, h4])
hes.rotate(i)
hes = list(hes)
#
# Define new QuadCell
#
self._children[i] = QuadCell(hes, parent=self, position=i)
# Increment counter
i += 1
if flag is not None:
for child in self.get_children():
child.mark(flag)
#
# Pair up periodic vertices
#
for half_edge in self.get_half_edges():
for he_child in half_edge.get_children():
if he_child.is_periodic() and he_child.twin() is not None:
he_child.pair_periodic_vertices()
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Returns a list of the smallest flagged subcells in containing at least
one point, together with the indices of the included points
Inputs:
points: points in cell, to be categorized
i_points: point indices (if contained within a larger array).
subforest_flag: submesh indicator
Outputs:
bins: list of (cell, i_points) pairs enumerating all cells
that contain points, and the indices of these.
"""
#
# Check that cell contains points
#
assert all(self.contains_points(points)), \
'Not all points contained in cell'
sf = subforest_flag
# Convert points to array
x = convert_to_array(points)
if i_points is None:
i_points = np.arange(x.shape[0])
bins = []
#
# Cell is not in submesh
#
if not (sf is None or self.is_marked(flag=sf)):
#
# Move up tree until in submesh
#
if self.has_parent():
cell = self.get_parent()
bins.extend(cell.bin_points(x, i_points, subforest_flag=sf))
return bins
#
# Cell is in submesh
#
if self.has_children(flag=sf):
#
# Points must be contained in some child cells
#
for child in self.get_children(flag=sf):
in_cell = child.contains_points(x)
if any(in_cell):
# Extract the points in child and bin
y = x[in_cell]
i_y = i_points[in_cell]
c_bin = child.bin_points(y,i_y, subforest_flag=sf)
bins.extend(c_bin)
# Remove points contained in child from list
x = x[~in_cell]
i_points = i_points[~in_cell]
else:
#
# Base case
#
bins.append((self, i_points))
return bins
return bins
def reference_map(self, x_in, mapsto='physical',
jac_p2r=False, jac_r2p=False,
hess_p2r=False, hess_r2p=False,
jacobian=False, hessian=False):
"""
Bilinear map between reference cell [0,1]^2 and physical QuadCell
Inputs:
x_in: double, (n,) array or a list of points.
jac_p2r: bool, return jacobian of mapping from physical to
reference domain
jac_r2p: bool, return jacobian of mapping from reference to
physical domain
hess_p2r: bool, return hessian of mapping from physical to
reference domain
hess_r2p: bool, return hessian of mapping from phyical to
reference domain
mapsto: str, 'physical' (map from reference to physical), or
'reference' (map from physical to reference).
Outputs:
x_trg: double, (n,) array of mapped points
mg: dictionary, of jacobians and hessians associated with the
mapping.
jac_p2r: double, n-list of (2,2) physical-to-reference
jacobians.
jac_r2p: double, n-list of (2,2) reference-to-physical
jacobians.
hess_p2r: double, n-list of (2,2,2) physical-to-reference
hessians.
hess_r2p: double, n-list of (2,2,2) reference-to-phyiscal
hessians.
"""
#
# Convert input to array
#
x_in = convert_to_array(x_in, dim=2)
n = x_in.shape[0]
assert x_in.shape[1]==2, 'Input "x" has incorrect dimension.'
#
# Get cell corner vertices
#
x_verts = convert_to_array(self.get_vertices())
p_sw_x, p_sw_y = x_verts[0,:]
p_se_x, p_se_y = x_verts[1,:]
p_ne_x, p_ne_y = x_verts[2,:]
p_nw_x, p_nw_y = x_verts[3,:]
if mapsto=='physical':
#
# Map points from [0,1]^2 to the physical cell, using bilinear
# nodal basis functions
#
# Points in reference domain
s, t = x_in[:,0], x_in[:,1]
# Mapped points
x = p_sw_x*(1-s)*(1-t) + p_se_x*s*(1-t) +\
p_ne_x*s*t + p_nw_x*(1-s)*t
y = p_sw_y*(1-s)*(1-t) + p_se_y*s*(1-t) +\
p_ne_y*s*t + p_nw_y*(1-s)*t
# Store points in an array
x_trg = np.array([x,y]).T
elif mapsto=='reference':
#
# Map from physical- to reference domain using Newton iteration
#
# Points in physical domain
x, y = x_in[:,0], x_in[:,1]
if self.is_rectangle():
#
# Cell is a rectangle - the inverse mapping is explicit
#
s = (x-p_sw_x)/(p_se_x-p_sw_x)
t = (y-p_sw_y)/(p_nw_y-p_sw_y)
x_trg = np.array([s,t]).T
else:
#
# Cell is quadrilateral - the inverse mapping must be estimated
#
# Initialize points in reference domain
s, t = 0.5*np.ones(n), 0.5*np.ones(n)
n_iterations = 5
for dummy in range(n_iterations):
#
# Compute residual
#
rx = p_sw_x*(1-s)*(1-t) + p_se_x*s*(1-t) \
+ p_ne_x*s*t + p_nw_x*(1-s)*t - x
ry = p_sw_y*(1-s)*(1-t) + p_se_y*s*(1-t) \
+ p_ne_y*s*t + p_nw_y*(1-s)*t - y
#
# Compute jacobian
#
drx_ds = -p_sw_x*(1-t) + p_se_x*(1-t) + p_ne_x*t - p_nw_x*t # J11
dry_ds = -p_sw_y*(1-t) + p_se_y*(1-t) + p_ne_y*t - p_nw_y*t # J21
drx_dt = -p_sw_x*(1-s) - p_se_x*s + p_ne_x*s + p_nw_x*(1-s) # J12
dry_dt = -p_sw_y*(1-s) - p_se_y*s + p_ne_y*s + p_nw_y*(1-s) # J22
#
# Newton Update:
#
Det = drx_ds*dry_dt - drx_dt*dry_ds
s -= ( dry_dt*rx - drx_dt*ry)/Det
t -= (-dry_ds*rx + drx_ds*ry)/Det
#
# Project onto [0,1]^2
#
s = np.minimum(np.maximum(s,0),1)
t = np.minimum(np.maximum(t,0),1)
x_trg = np.array([s,t]).T
#
# Compute the Jacobians and Hessians (if asked for)
#
if any([jac_r2p, jac_p2r, hess_r2p, hess_p2r]):
#
# Gradients of the mapping sought
#
# Initialize map gradients (mg) dictionary
mg = {}
if jac_r2p or jac_p2r:
#
# Compute Jacobian of the forward mapping
#
xs = -p_sw_x*(1-t) + p_se_x*(1-t) + p_ne_x*t - p_nw_x*t # J11
ys = -p_sw_y*(1-t) + p_se_y*(1-t) + p_ne_y*t - p_nw_y*t # J21
xt = -p_sw_x*(1-s) - p_se_x*s + p_ne_x*s + p_nw_x*(1-s) # J12
yt = -p_sw_y*(1-s) - p_se_y*s + p_ne_y*s + p_nw_y*(1-s) # J22
if jac_r2p:
#
# Jacobian of mapping from reference to physical region
#
mg['jac_r2p'] = [np.array([[xs[i], xt[i]], [ys[i], yt[i]]])\
for i in range(n)]
if jac_p2r:
#
# Jacobian of mapping from physical to reference region
#
# Compute matrix inverse of jacobian for backward mapping
Det = xs*yt-xt*ys
sx = yt/Det
sy = -xt/Det
tx = -ys/Det
ty = xs/Det
mg['jac_p2r'] = [np.array([[sx[i], sy[i]],[tx[i], ty[i]]])\
for i in range(n)]
if hess_r2p:
#
# Hessian of mapping from reference to physical region
#
if self.is_rectangle():
# Linear mapping (no curvature)
hr2p = [np.zeros((2,2,2)) for dummy in range(n)]
else:
hr2p = []
# Nonlinear mapping: compute curvature for each point
for i in range(n):
h = np.zeros((2,2,2))
xts = p_sw_x - p_se_x + p_ne_x - p_nw_x
yts = p_sw_y - p_se_y + p_ne_y - p_nw_y
h[:,:,0] = np.array([[0, xts], [xts, 0]])
h[:,:,1] = np.array([[0, yts], [yts, 0]])
hr2p.append(h)
# Store result
mg['hess_r2p'] = hr2p
if hess_p2r:
#
# Hessian of mapping from physical to reference region
#
if self.is_rectangle():
# Linear mapping (no curvature)
hp2r = [np.zeros((2,2,2)) for dummy in range(n)]
else:
# Nonlinear mapping: compute curvature for each point
hp2r = []
Dx = p_sw_x - p_se_x + p_ne_x - p_nw_x
Dy = p_sw_y - p_se_y + p_ne_y - p_nw_y
dxt_dx = Dx*sx
dxt_dy = Dx*sy
dyt_dx = Dy*sx
dyt_dy = Dy*sy
dxs_dx = Dx*tx
dxs_dy = Dx*ty
dys_dx = Dy*tx
dys_dy = Dy*ty
dDet_dx = dxs_dx*yt + dyt_dx*xs - dys_dx*xt - dxt_dx*ys
dDet_dy = dxs_dy*yt + dyt_dy*xs - dys_dy*xt - dxt_dy*ys
sxx = dyt_dx/Det - yt*dDet_dx/Det**2
sxy = dyt_dy/Det - yt*dDet_dy/Det**2
syy = -dxt_dy/Det + xt*dDet_dy/Det**2
txx = -dys_dx/Det + ys*dDet_dx/Det**2
txy = -dys_dy/Det + ys*dDet_dy/Det**2
tyy = dxs_dy/Det - xs*dDet_dy/Det**2
for i in range(n):
h = np.zeros((2,2,2))
h[:,:,0] = np.array([[sxx[i], sxy[i]],
[sxy[i], syy[i]]])
h[:,:,1] = np.array([[txx[i], txy[i]],
[txy[i], tyy[i]]])
hp2r.append(h)
# Store result
mg['hess_p2r'] = hp2r
#
# Return points and gradients
#
return x_trg, mg
else:
#
# No gradients of the mapping sought
#
return x_trg
class RVertex(Vertex):
"""
Vertex on the reference cell
"""
def __init__(self, coordinates):
"""
Constructor
"""
Vertex.__init__(self, coordinates)
self.__pos = {0: None, 1: {0: None, 1: None, 2: None, 3: None}}
self.__basis_index = None
def set_pos(self, pos, level=0, child=None):
"""
Set the position of the Dof Vertex
Inputs:
pos: int, a number not exceeding the element's number of dofs
level: int in {0,1}, number specifying the refinement level
( 0 = coarse, 1 = fine ).
child: int in {0,1,2,3}, number specifying the child cell
"""
assert level in [0,1], 'Level should be either 0 or 1.'
if level==0:
self.__pos[level] = pos
if level==1:
assert child in [0,1,2,3], 'Level=1. Child should be specified.'
self.__pos[level][child] = pos
def get_pos(self, level, child=None, debug=False):
"""
Return the dof vertex's position at a given level for a given child
"""
if debug:
print(self.__pos)
if level==1:
assert child is not None, 'On fine level, child must be specified.'
return self.__pos[level][child]
else:
return self.__pos[level]
def set_basis_index(self, idx):
self.__basis_index = idx
class RHalfEdge(HalfEdge):
"""
HalfEdge for reference element
"""
def __init__(self, base, head, dofs_per_edge,
parent=None, position=None, twin=None):
"""
Constructor
"""
HalfEdge.__init__(self, base, head, parent=parent, \
position=position, twin=twin)
#
# Assign edge dof vertices
#
self.__dofs_per_edge = dofs_per_edge
self.assign_edge_dof_vertices()
def get_edge_dof_vertices(self, pos=None):
"""
Returns all dof vertices associated with HalfEdge
"""
if pos is None:
return self.__edge_dof_vertices
else:
return self.__edge_dof_vertices[pos]
def assign_edge_dof_vertices(self):
if self.twin() is not None:
#
# Use RHalfEdge's twin's dof vertices
#
assert isinstance(self.twin(),RHalfEdge), \
'Twin should also be an RHalfEdge'
edge_dofs = self.twin().get_edge_dof_vertices()
edge_dofs.reverse()
else:
#
# Make new dof Vertices
#
dofs_per_edge = self.n_dofs()
x0, y0 = self.base().coordinates()
x1, y1 = self.head().coordinates()
edge_dofs = []
if dofs_per_edge!=0:
h = 1/(dofs_per_edge+1)
for i in range(dofs_per_edge):
#
# Compute coordinates for dof vertex
#
t = (i+1)*h
x = x0 + t*(x1-x0)
y = y0 + t*(y1-y0)
v = RVertex((x,y))
if self.has_parent():
#
# Check if vertex already exists
#
for v_p in self.get_parent().get_edge_dof_vertices():
if np.allclose(v.coordinates(),v_p.coordinates()):
v = v_p
edge_dofs.append(v)
#
# Store edge dof vertices
#
self.__edge_dof_vertices = edge_dofs
def make_twin(self):
"""
Returns the twin RHalfEdge
"""
return RHalfEdge(self.head(), self.base(), self.n_dofs(), twin=self)
def n_dofs(self):
"""
Returns the number of dofs associated with the HalfEdge
"""
return self.__dofs_per_edge
def split(self):
"""
Refine current half-edge (overwrite Tree.split)
"""
#
# Compute new midpoint vertex
#
x = convert_to_array([self.base().coordinates(),\
self.head().coordinates()])
xm = 0.5*(x[0,:]+x[1,:])
vm = RVertex(tuple(xm))
for v in self.get_edge_dof_vertices():
if np.allclose(vm.coordinates(), v.coordinates()):
vm = v
#
# Define own children independently of neighbor
#
c0 = RHalfEdge(self.base(), vm, self.n_dofs(), parent=self, position=0)
c1 = RHalfEdge(vm, self.head(), self.n_dofs(), parent=self, position=1)
#
# Save the babies
#
self._children[0] = c0
self._children[1] = c1
class RQuadCell(QuadCell):
"""
Quadrilateral Reference Cell
"""
def __init__(self, element, half_edges=None, parent=None, position=None):
"""
Constructor
"""
#
# Check if the element is correct
#
self.element = element
# Extract numbers of degrees of freedom
dofs_per_vertex = element.n_dofs('vertex')
assert dofs_per_vertex<=1, \
'Only elements with at most one dof per vertex supported'
#
# Determine Cell's RHalfEdges
#
if parent is None:
#
# Corner Vertices
#
vertices = [RVertex((0,0)), RVertex((1,0)),
RVertex((1,1)), RVertex((0,1))]
#
# Reference HalfEdges
#
dofs_per_edge = element.n_dofs('edge')
half_edges = []
for i in range(4):
he = RHalfEdge(vertices[i], vertices[(i+1)%4], dofs_per_edge)
half_edges.append(he)
else:
assert half_edges is not None, 'Cell has parent. Specify RefHalfEdges.'
# Define Quadcell
QuadCell.__init__(self, half_edges, parent=parent, position=position)
#
# Assign cell dof vertices
#
self.assign_cell_dof_vertices()
if not self.has_parent():
#
# Assign positions on coarse level
#
self.assign_dof_positions(0)
#
# Split
#
self.split()
#
# Assign positions
#
self.assign_dof_positions(1)
def split(self):
"""
Split refQuadCell into 4 subcells
"""
assert not self.has_children(), 'Cell already split.'
#
# Middle Vertex
#
xx = convert_to_array(self.get_vertices())
v_m = RVertex((np.mean(xx[:,0]),np.mean(xx[:,1])))
# Check if this vertex is contained in cell
for v_p in self.get_dof_vertices():
if np.allclose(v_m.coordinates(), v_p.coordinates()):
# Vertex already exists
v_m = v_p
break<|fim▁hole|>
dofs_per_edge = self.element.n_dofs('edge')
interior_half_edges = []
for half_edge in self.get_half_edges():
#
# Split each half_edge
#
if not half_edge.has_children():
half_edge.split()
#
# Form new HalfEdges to and from the center
#
h_edge_up = RHalfEdge(half_edge.get_child(0).head(),v_m, dofs_per_edge)
h_edge_down = h_edge_up.make_twin()
# Add to list
interior_half_edges.append([h_edge_up, h_edge_down])
#
# Form new cells using new half_edges
#
i = 0
for half_edge in self.get_half_edges():
#
# Define Child's HalfEdges
# key
h1 = half_edge.get_child(0)
h2 = interior_half_edges[i][0]
h3 = interior_half_edges[(i-1)%self.n_half_edges()][1]
h4 = half_edge.previous().get_child(1)
hes = deque([h1, h2, h3, h4])
hes.rotate(i)
hes = list(hes)
#hes = [h1, h2, h3, h4]
#
# Define new QuadCell
#
self._children[i] = RQuadCell(self.element, hes, parent=self, position=i)
# Increment counter
i += 1
def assign_cell_dof_vertices(self):
"""
Assign interior dof vertices to cell
"""
dofs_per_cell = self.element.n_dofs('cell')
cell_dofs = []
if dofs_per_cell!=0:
n = int(np.sqrt(dofs_per_cell)) # number of dofs per direction
x0, x1, y0, y1 = self.bounding_box()
h = 1/(n+1) # subcell width
for i in range(n): # y-coordinates
for j in range(n): # x-coordinates
#
# Compute new Vertex
#
v_c = RVertex((x0+(j+1)*h*(x1-x0),y0+(i+1)*h*(y1-y0)))
#
# Check if vertex exists within parent cell
#
inherits_dof_vertex = False
if self.has_parent():
for v_p in self.get_parent().get_cell_dof_vertices():
if np.allclose(v_c.coordinates(), v_p.coordinates()):
cell_dofs.append(v_p)
inherits_dof_vertex = True
break
if not inherits_dof_vertex:
cell_dofs.append(v_c)
self.__cell_dof_vertices = cell_dofs
def get_cell_dof_vertices(self, pos=None):
"""
Return the interior dof vertices
"""
if pos is None:
return self.__cell_dof_vertices
else:
return self.__cell_dof_vertices[pos]
def assign_dof_positions(self, level):
"""
"""
if level==0:
#
# Level 0: Assign positions to vertices on coarse level
#
self.__dof_vertices = {0: [], 1: {0: [], 1: [], 2: [], 3: []}}
count = 0
# Corner dof vertices
for vertex in self.get_vertices():
if self.element.n_dofs('vertex')!=0:
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
# HalfEdge dof vertices
for half_edge in self.get_half_edges():
for vertex in half_edge.get_edge_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
# Cell dof vertices
for vertex in self.get_cell_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
elif level==1:
#
# Assign positions to child vertices
#
coarse_dofs = [i for i in range(self.element.n_dofs())]
for i_child in range(4):
#
# Add all dof vertices to one list
#
child = self.get_child(i_child)
child_dof_vertices = []
# Dofs at Corners
for vertex in child.get_vertices():
if self.element.n_dofs('vertex')!=0:
child_dof_vertices.append(vertex)
# Dofs on HalfEdges
for half_edge in child.get_half_edges():
for vertex in half_edge.get_edge_dof_vertices():
child_dof_vertices.append(vertex)
# Dofs in Cell
for vertex in child.get_cell_dof_vertices():
child_dof_vertices.append(vertex)
count = 0
for vertex in child_dof_vertices:
if not self.element.torn_element():
#
# Continuous Element (Dof Vertex can be inherited multiple times)
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
else:
#
# Discontinuous Element (Dof Vertex can be inherited once)
#
if vertex in self.__dof_vertices[0]:
i_vertex = self.__dof_vertices[0].index(vertex)
if i_vertex in coarse_dofs:
#
# Use vertex within child cell
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
# Delete the entry (preventing reuse).
coarse_dofs.pop(coarse_dofs.index(i_vertex))
else:
#
# Vertex has already been used, make a new one
#
vcopy = RVertex(vertex.coordinates())
vcopy.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vcopy)
count += 1
else:
#
# Not contained in coarse vertex set
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
def get_dof_vertices(self, level=0, child=None, pos=None):
"""
Returns all dof vertices in cell
"""
if level==0:
return self.__dof_vertices[0]
elif level==1:
assert child is not None, 'On level 1, child must be specified.'
if pos is None:
return self.__dof_vertices[1][child]
else:
return self.__dof_vertices[1][child][pos]
class RInterval(Interval):
def __init__(self, element, base=None, head=None,
parent=None, position=None):
"""
Constructor
"""
assert element.dim()==1, 'Element must be one dimensional'
self.element = element
if parent is None:
base = RVertex(0)
head = RVertex(1)
else:
assert isinstance(head, RVertex), 'Input "head" must be an RVertex.'
assert isinstance(base, RVertex), 'Input "base" must be an RVertex.'
Interval.__init__(self, base, head, parent=parent, position=position)
#
# Assign cell dof vertices
#
self.assign_cell_dof_vertices()
if not self.has_parent():
#
# Assign positions on coarse level
#
self.assign_dof_positions(0)
#
# Split
#
self.split()
#
# Assign positions
#
self.assign_dof_positions(1)
def split(self):
"""
Split a given interval into 2 subintervals
"""
#
# Determine interval endpoints
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
n = self.n_children()
#
# Loop over children
#
for i in range(n):
#
# Determine children base and head Vertices
#
if i==0:
base = self.base()
if i==n-1:
head = self.head()
else:
head = RVertex(x0+(i+1)*(x1-x0)/n)
#
# Check whether Vertex appears in parent
#
for v_p in self.get_dof_vertices():
if np.allclose(head.coordinates(), v_p.coordinates()):
head = v_p
#
# Define new child interval
#
subinterval = RInterval(self.element, base, head, \
parent=self, position=i)
#
# Store in children
#
self._children[i] = subinterval
#
# The head of the current subinterval
# becomes the base of the next one
base = subinterval.head()
#
# Assign previous/next
#
for child in self.get_children():
i = child.get_node_position()
#
# Assign previous
#
if i==0:
# Leftmost child assign own previous
child.assign_previous(self.previous())
else:
# Child in the middle
#print(child.get_node_position(), child.base().coordinates())
#print(self.get_child(i-1).get_node_position(), child.base().coordinates())
child.assign_previous(self.get_child(i-1))
#
# Assign next
#
if i==n-1:
# Rightmost child, assign own right
child.assign_next(self.next())
def assign_cell_dof_vertices(self):
dofs_per_cell = self.element.n_dofs('edge')
cell_dofs = []
if dofs_per_cell !=0:
#
# Compute coordinates for cell dof vertices
#
x0, = self.base().coordinates()
x1, = self.head().coordinates()
h = 1/(dofs_per_cell+1)
for i in range(dofs_per_cell):
x = x0 + (i+1)*h*(x1-x0)
v_c = RVertex(x)
#
# Check if vertex exists within parent cell
#
inherits_dof_vertex = False
if self.has_parent():
for v_p in self.get_parent().get_cell_dof_vertices():
if np.allclose(v_c.coordinates(), v_p.coordinates()):
cell_dofs.append(v_p)
inherits_dof_vertex = True
break
if not inherits_dof_vertex:
cell_dofs.append(v_c)
self.__cell_dof_vertices = cell_dofs
def get_cell_dof_vertices(self, pos=None):
"""
Returns the Dofs associated with the interior of the cell
Note: This function is only used during construction
"""
if pos is None:
return self.__cell_dof_vertices
else:
return self.__cell_dof_vertices[pos]
def get_dof_vertices(self, level=0, child=None, pos=None):
"""
Returns all dof vertices in cell
Inputs:
level: int 0/1, 0=coarse, 1=fine
child: int, child node position within parent (0/1)
pos: int, 0,...n_dofs-1, dof number within cell
"""
if level==0:
return self.__dof_vertices[0]
elif level==1:
assert child is not None, 'On level 1, child must be specified.'
if pos is None:
return self.__dof_vertices[1][child]
else:
return self.__dof_vertices[1][child][pos]
def assign_dof_positions(self, level):
"""
Assigns a number to each dof vertex in the interval.
Note: We only deal with bisection
"""
if level==0:
#
# Level 0: Assign position to vertices on coarse level
#
self.__dof_vertices = {0: [], 1: {0: [], 1: []}}
count = 0
#
# Add endpoints
#
dpv = self.element.n_dofs('vertex')
if dpv != 0:
for vertex in self.get_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
#
# Add cell dof vertices
#
for vertex in self.get_cell_dof_vertices():
vertex.set_pos(count, level)
self.__dof_vertices[level].append(vertex)
count += 1
elif level==1:
#
# Assign positions to child vertices
#
coarse_dofs = [i for i in range(self.element.n_dofs())]
for i_child in range(2):
#
# Add all dof vertices to a list
#
child = self.get_child(i_child)
child_dof_vertices = []
# Dofs at corners
for vertex in child.get_vertices():
if self.element.n_dofs('vertex')!=0:
child_dof_vertices.append(vertex)
# Dofs in Interval
for vertex in child.get_cell_dof_vertices():
child_dof_vertices.append(vertex)
#
# Inspect each vertex in the child, to see
# whether it is duplicated in the parent.
#
count = 0
for vertex in child_dof_vertices:
if not self.element.torn_element():
#
# Continuous Element (Dof Vertex can be inherited multiple times)
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
else:
#
# Discontinuous Element (Dof Vertex can be inherited once)
#
if vertex in self.__dof_vertices[0]:
i_vertex = self.__dof_vertices[0].index(vertex)
if i_vertex in coarse_dofs:
#
# Use vertex within child cell
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
# Delete the entry (preventing reuse).
coarse_dofs.pop(coarse_dofs.index(i_vertex))
else:
#
# Vertex has already been used, make a new one
#
vcopy = RVertex(vertex.coordinates())
vcopy.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vcopy)
count += 1
else:
#
# Not contained in coarse vertex set
#
vertex.set_pos(count, level=level, child=i_child)
self.__dof_vertices[level][i_child].append(vertex)
count += 1
'''
class Mesh(object):
"""
Mesh class, consisting of a grid (a doubly connected edge list), as well
as a list of root cells, -half-edges and vertices.
Attributes:
Methods:
"""
def __init__(self, grid):
"""
Constructor
class Mesh(object):
"""
Mesh class, consisting of a grid (a doubly connected edge list), as well
as a list of root cells, -half-edges and vertices.
Attributes:
Methods:
"""
def __init__(self, grid):
"""
Constructor
Inputs:
grid: DCEL object, doubly connected edge list specifying
the mesh topology.
"""
self.__grid = grid
# =====================================================================
# Vertices
# =====================================================================
n_vertices = grid.points['n']
vertices = []
for i in range(n_vertices):
vertices.append(Vertex(grid.points['coordinates'][i]))
# =====================================================================
# Half-edges
# =====================================================================
n_he = grid.half_edges['n']
#
# Define Half-Edges via base and head vertices
#
half_edges = []
for i in range(n_he):
i_base, i_head = grid.half_edges['connectivity'][i]
v_base = grid.points['coordinates'][i_base]
v_head = grid.points['coordinates'][i_head]
half_edges.append(HalfEdge(Vertex(v_base), Vertex(v_head)))
#
# Specify relations among Half-Edges
#
for i in range(n_he):
he = half_edges[i]
i_prev = grid.half_edges['prev'][i]
i_next = grid.half_edges['next'][i]
i_twin = grid.half_edges['twin'][i]
he.assign_next(half_edges[i_next])
he.assign_prev(half_edges[i_prev])
if i_twin != -1:
he.assign_twin(half_edges[i_twin])
# =====================================================================
# Cells
# =====================================================================
n_faces = grid.faces['n']
cells = []
for i in range(n_faces):
cell_type = grid.faces['type'][i]
if cell_type == 'interval':
cell = BCell()
pass
elif cell_type == 'triangle':
#cell = TriCell()
pass
elif cell_type == 'quadrilateral':
cell = QuadCell()
else:
unknown_cell_type = 'Unknown cell type. Use "interval", '+\
'"triangle", or "quadrilateral".'
raise Exception(unknown_cell_type)
cells.append(cell)
if grid is not None:
#
# grid specified
#
#assert all(i is None for i in [node, cell, dim]),\
#'Grid specified: All other inputs should be None.'
#
# ROOT node
#
dim = grid.dim()
if dim == 1:
node = BiNode(grid=grid)
elif dim == 2:
node = QuadNode(grid=grid)
else:
raise Exception('Only dimensions 1 and 2 supported.')
#
# Cells
#
node.split()
for pos in node._child_positions:
#
# ROOT cells
#
if dim == 1:
cell = BiCell(grid=grid, position=pos)
elif dim == 2:
cell = QuadCell(grid=grid, position=pos)
child = node.children[pos]
child.link(cell)
#
# Mark nodes, edges, and vertices
#
elif cell is not None:
#
# Cell specified
#
assert all(i is None for i in [node, grid, dim]),\
'Cell specified: All other inputs should be None.'
#
# ROOT node linked to cell
#
dim = cell.dim()
if dim == 1:
node = BiNode(bicell=cell)
elif dim == 2:
node = QuadNode(quadcell=cell)
else:
raise Exception('Only dimensions 1 and 2 supported.')
elif node is not None:
#
# Tree specified
#
assert all(i is None for i in [cell, grid, dim]),\
'Tree specified: All other inputs should be None.'
#
# Default cell
#
dim = node.dim()
if dim == 1:
cnr_vtcs = [0,1]
cell = BiCell(corner_vertices=cnr_vtcs)
elif dim == 2:
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
node.link(cell)
elif dim is not None:
#
# Dimension specified
#
assert all(i is None for i in [node, cell, grid]),\
'Dimension specified: All other inputs should be None.'
#
# Default cell
#
if dim == 1:
cnr_vtcs = [0,1]
cell = BiCell(corner_vertices=cnr_vtcs)
elif dim == 2:
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
#
# Default node, linked to cell
#
if dim == 1:
node = BiNode(bicell=cell)
elif dim==2:
node = QuadNode(quadcell=cell)
else:
raise Exception('Only dimensions 1 or 2 supported.')
else:
#
# Default cell
#
cnr_vtcs = [0,1,0,1]
cell = QuadCell(corner_vertices=cnr_vtcs)
node = QuadNode(quadcell=cell)
dim = 2
self.__root_node = node
self.grid = grid
self.__mesh_count = 0
self.__dim = dim
def dim(self):
"""
Return the spatial dimension of the region
"""
return self.__dim
def depth(self):
"""
Return the maximum refinement level
"""
return self.root_node().tree_depth()
def n_nodes(self, flag=None):
"""
Return the number of cells
"""
if hasattr(self, '__n_cells'):
return self.__n_cells
else:
self.__n_cells = len(self.__root_node.get_leaves(flag=flag))
return self.__n_cells
def root_node(self):
"""
Return tree node used for mesh
"""
return self.__root_node
def boundary(self, entity, flag=None):
"""
Returns a set of all boundary entities (vertices/edges)
Input:
entity: str, 'vertices', 'edges', or 'quadcells'
flag:
TODO: Add support for tricells
"""
boundary = set()
print(entity)
print(len(boundary))
for node in self.root_node().get_leaves(flag=flag):
cell = node.cell()
for direction in ['W','E','S','N']:
#
# Look in 4 directions
#
if node.get_neighbor(direction) is None:
if entity=='quadcells':
boundary.add(cell)
break
edge = cell.get_edges(direction)
if entity=='edges':
boundary.add(edge)
if entity=='vertices':
for v in edge.vertices():
boundary.add(np.array(v.coordinates()))
return boundary
def bounding_box(self):
"""
Returns the mesh's bounding box
Output:
box: double, [x_min, x_max, y_min, y_max] if mesh is 2d
and [x_min, x_max] if mesh is 1d.
"""
root = self.root_node()
if root.grid is not None:
#
# DCEL on coarsest level
#
grid = root.grid
if self.dim() == 1:
x_min, x_max = grid.points['coordinates'][[0,-1]]
return [x_min, x_max]
elif self.dim() == 2:
#
# Determine bounding box from boundary points
#
i_vbnd = grid.get_boundary_points()
v_bnd = []
for k in i_vbnd:
v_bnd.append( \
grid.points['coordinates'][i_vbnd[k]].coordinates())
v_bnd = np.array(v_bnd)
x_min, x_max = v_bnd[:,0].min(), v_bnd[:,0].max()
y_min, y_max = v_bnd[:,1].min(), v_bnd[:,1].max()
return [x_min, x_max, y_min, y_max]
else:
#
# No DCEL: Use Cell
#
cell = root.cell()
if cell.dim()==1:
x_min, x_max = cell.get_vertices(pos='corners', as_array=True)
return [x_min, x_max]
elif cell.dim()==2:
vbnd = cell.get_vertices(pos='corners', as_array=True)
x_min, x_max = vbnd[:,0].min(), vbnd[:,0].max()
y_min, y_max = vbnd[:,1].min(), vbnd[:,1].max()
return [x_min, x_max, y_min, y_max]
else:
raise Exception('Only 1D and 2D supported.')
def unmark_all(self, flag=None, nodes=False, cells=False, edges=False,
vertices=False, all_entities=False):
"""
Unmark all nodes, cells, edges, or vertices.
"""
if all_entities:
#
# Unmark everything
#
nodes = True
cells = True
edges = True
vertices = True
for node in self.root_node().traverse():
if nodes:
#
# Unmark node
#
node.unmark(flag=flag, recursive=True)
if cells:
#
# Unmark quad cell
#
node.cell().unmark(flag=flag, recursive=True)
if edges:
#
# Unmark quad edges
#
for edge in node.cell().edges.values():
edge.unmark(flag=flag)
if vertices:
#
# Unmark quad vertices
#
for vertex in node.cell().vertices.values():
vertex.unmark(flag=flag)
def iter_quadedges(self, flag=None, nested=False):
"""
Iterate over cell edges
Output:
quadedge_list, list of all active cell edges
"""
quadedge_list = []
#
# Unmark all edges
#
self.unmark_all(quadedges=True)
for cell in self.iter_quadcells(flag=flag, nested=nested):
for edge_key in [('NW','SW'),('SE','NE'),('SW','SE'),('NE','NW')]:
edge = cell.edges[edge_key]
if not(edge.is_marked()):
#
# New edge: add it to the list
#
quadedge_list.append(edge)
edge.mark()
#
# Unmark all edges again
#
self.unmark_all(quadedges=True)
return quadedge_list
def quadvertices(self, coordinate_array=True, flag=None, nested=False):
"""
Iterate over quad cell vertices
Inputs:
coordinate_array: bool, if true, return vertices as arrays
nested: bool, traverse tree depthwise
Output:
quadvertex_list, list of all active cell vertices
"""
quadvertex_list = []
#
# Unmark all vertices
#
self.unmark_all(quadvertices=True)
for cell in self.iter_quadcells(flag=flag, nested=nested):
for direction in ['SW','SE','NW','NE']:
vertex = cell.vertices[direction]
if not(vertex.is_marked()):
#
# New vertex: add it to the list
#
quadvertex_list.append(vertex)
vertex.mark()
self.unmark_all(quadvertices=True)
if coordinate_array:
return np.array([v.coordinates() for v in quadvertex_list])
else:
return quadvertex_list
def refine(self, flag=None):
"""
Refine mesh by splitting marked LEAF nodes
"""
for leaf in self.root_node().get_leaves(flag=flag):
leaf.split()
def coarsen(self, flag=None):
"""
Coarsen mesh by merging marked LEAF nodes.
Inputs:
flag: str/int, marker flag.
If flag is specified, merge a node if all
of its children are flagged.
If no flag is specified, merge nodes so that
mesh depth is reduced by 1.
"""
root = self.root_node()
if flag is None:
tree_depth = root.tree_depth()
for leaf in root.get_leaves():
if leaf.depth == tree_depth:
leaf.parent.merge()
else:
for leaf in root.get_leaves(flag=flag):
parent = leaf.parent
if all(child.is_marked(flag=flag) \
for child in parent.get_children()):
parent.merge()
def record(self,flag=None):
"""
Mark all mesh nodes with flag
"""
count = self.__mesh_count
for node in self.root_node().traverse(mode='breadth-first'):
if flag is None:
node.mark(count)
else:
node.mark(flag)
self.__mesh_count += 1
def n_meshes(self):
"""
Return the number of recorded meshes
"""
return self.__mesh_count
'''
class DCEL(object):
"""
Description: Doubly connected edge list
Attributes:
__dim: int, dimension of grid
format: str, version of mesh file
is_rectangular: bool, specifying whether 2D grid has rectangular faces
subregions: struct, encoding the mesh's subregions, with fields:
n: int, number of subregions
dim: int, dimension of subregion
tags: int, tags of subregions
names: str, names of subregions
points: struct, encoding the mesh's vertices, with fields:
n: int, number of points
n_dofs: int, number of dofs associated with point
tags: tags associated with vertices
phys: int list, indicating membership to one of the
physical subregions listed above.
geom: int list, indicating membership to certain
geometric entities.
partition: int, list indicating membership to certain
mesh partitions.
half_edge: int array, pointing to a half-edge based at
point.
coordinates: double, list of tuples
edges: struct, encoding the mesh's edges associated with
specific subregions, w. fields:
n: int, number of edges
n_dofs: int, number of dofs associated with edge
tags: struct, tags associated with edges (see points)
connectivity: int, list of sets containing edge vertices
half_edge: int, array pointing to associated half-edge
Edges: Edge list in same order as connectivity
half_edges: struct, encoding the mesh's half-edges
n: int, number of half-edges
n_dofs: int, number of dofs associated with half_edge
tags: struct, tags associated with half-edges (see points)
connectivity: int, list pointing to initial and final
vertices [v1,v2].
prev: int, array pointing to the preceding half-edge
next: int, array pointing to the next half-edge
twin: int, array pointing to the reversed half-edge
edge: int, array pointing to an associated edge
face: int, array pointing to an incident face
faces: struct, encoding the mesh's faces w. fields:
n: int, number of faces
n_dofs: int, list containing number of dofs per face
type: str, type of face (interval, triangle, or quadrilateral)
tags: tags associated with faces (same as for points)
connectivity: int, list of indices of vertices that make
up faces.
half_edge: int, array pointing to a half-edge on the boundary
Methods:
__init__
initialize_grid_structure
rectangular_grid
grid_from_gmsh
determine_half_edges
dim
get_neighbor
contains_node
Note: The grid can be used to describe the connectivity associated with a
ROOT Tree.
"""
def __init__(self, box=None, resolution=None, periodic=None, dim=None,
x=None, connectivity=None, file_path=None, file_format='gmsh'):
"""
Constructor
Inputs:
box: list of endpoints for rectangular mesh
1D [x_min, x_max]
2D [x_min, x_max, y_min, y_max]
resolution: tuple, with number of cells in each direction
dim: int, spatial dimension of the grid
x: double, (n,) array of points in for constructing a grid
connectivity: int, list of cell connectivities
file_path: str, path to mesh file
file_format: str, type of mesh file (currently only gmsh)
periodic: int, set containing integers 0 and/or 1.
0 in periodic: make periodic in x-direction
1 in periodic: make periodic in y-direction
"""
#
# Initialize struct
#
self.is_rectangular = False
self.is_periodic = False
self.resolution = resolution
self.initialize_grid_structure()
if file_path is not None:
# =================================================================
# Import grid from gmsh
# =================================================================
assert file_format=='gmsh', \
'For input file_format, use "gmsh".'
#
# Import grid from gmsh
#
self.grid_from_gmsh(file_path)
elif x is not None:
# =================================================================
# Generate grid from connectivity
# =================================================================
self.grid_from_connectivity(x, connectivity)
else:
# =================================================================
# Rectangular Grid
# =================================================================
#
# Determine dimension
#
if dim is None:
if resolution is not None:
assert type(resolution) is tuple, \
'Input "resolution" should be a tuple.'
dim = len(resolution)
elif box is not None:
assert type(box) is list, 'Input "box" should be a list.'
if len(box) == 2:
dim = 1
elif len(box) == 4:
dim = 2
else:
box_length = 'Box should be a list of length 2 or 4.'
raise Exception(box_length)
else:
raise Exception('Unable to verify dimension of grid')
self.__dim = dim
#
# Specify box
#
if box is None:
#
# Default boundary box
#
if dim==1:
box = [0,1]
elif dim==2:
box = [0,1,0,1]
#
# Specify resolution
#
if resolution is None:
#
# Default resolution
#
if dim==1:
resolution = (1,)
elif dim==2:
resolution = (1,1)
self.is_rectangular = True
self.rectangular_grid(box=box, resolution=resolution)
# =====================================================================
# Generate doubly connected edge list
# =====================================================================
self.determine_half_edges()
#
# Add periodicity
#
self.periodic_coordinates = {}
if periodic is not None:
if self.dim()==2:
assert self.is_rectangular, \
'Only rectangular meshes can be made periodic'
self.make_periodic(periodic, box)
self.is_periodic = True
def initialize_grid_structure(self):
"""
Initialize empty grid.
"""
self.format = None
# Subregions
self.subregions = {'dim': [], 'n': None, 'names': [], 'tags': []}
# Points
self.points = {'half_edge': [], 'n': None, 'tags': {}, 'n_dofs': None,
'coordinates': []}
# Edges
# TODO: Remove
self.edges = {'n': None, 'tags': {}, 'n_dofs': None, 'connectivity': []}
# Half-Edges
self.half_edges = {'n': None, 'tags': {}, 'n_dofs': None,
'connectivity': [], 'prev': [], 'next': [],
'twin': [], 'edge': [], 'face': [], 'position': []}
# Faces
self.faces = {'n': None, 'type': [], 'tags': {}, 'n_dofs': [],
'connectivity': []}
def rectangular_grid(self, box, resolution):
"""
Construct a grid on a rectangular region
Inputs:
box: int, tuple giving bounding vertices of rectangular domain:
(x_min, x_max) in 1D, (x_min, x_max, y_min, y_max) in 2D.
resolution: int, tuple giving the number of cells in each direction
"""
assert type(resolution) is tuple, \
'Input "resolution" should be a tuple.'
dim = len(resolution)
if dim == 1:
# =================================================================
# One dimensional grid
# =================================================================
# Generate DCEL
x_min, x_max = box
n_points = resolution[0] + 1
x = np.linspace(x_min, x_max, n_points)
# Store grid information
self.__dim = 1
self.points['coordinates'] = [(xi,) for xi in x]
self.points['n'] = n_points
elif dim == 2:
# =================================================================
# Two dimensional grid
# =================================================================
self.__dim = 2
x_min, x_max, y_min, y_max = box
nx, ny = resolution
n_points = (nx+1)*(ny+1)
self.points['n'] = n_points
#
# Record vertices
#
x = np.linspace(x_min, x_max, nx+1)
y = np.linspace(y_min, y_max, ny+1)
for i_y in range(ny+1):
for i_x in range(nx+1):
self.points['coordinates'].append((x[i_x],y[i_y]))
#
# Face connectivities
#
# Vertex indices
idx = np.arange((nx+1)*(ny+1)).reshape(ny+1,nx+1).T
for i_y in range(ny):
for i_x in range(nx):
fv = [idx[i_x,i_y], idx[i_x+1,i_y],
idx[i_x+1,i_y+1], idx[i_x,i_y+1]]
self.faces['connectivity'].append(fv)
self.faces['n'] = nx*ny
self.faces['type'] = ['quadrilateral']*self.faces['n']
else:
raise Exception('Only 1D/2D supported.')
def grid_from_connectivity(self, x, connectivity):
"""
Construct grid from connectivity information
"""
points = self.points
x = convert_to_array(x, dim=1)
dim = x.shape[1]
if dim==1:
#
# 1D
#
self.__dim = 1
#
# Store points
#
x = np.sort(x, axis=0) # ensure the vector is sorted
points['coordinates'] = [(xi[0],) for xi in x]
points['n'] = len(x)
elif dim==2:
#
# 2D
#
self.__dim = 2
#
# Store points
#
n_points = x.shape[0]
points['coordinates'] = [(x[i,0],x[i,1]) for i in range(n_points)]
points['n'] = n_points
#
# Store faces
#
faces = self.faces
assert connectivity is not None, 'Specify connectivity.'
assert type(connectivity) is list, \
'Connectivity should be passed as a list.'
n_faces = len(connectivity)
faces['n'] = n_faces
for i in range(n_faces):
assert type(connectivity[i]) is list, \
'Connectivity entries should be lists'
faces['connectivity'].append(connectivity[i])
faces['n_dofs'].append(len(connectivity[i]))
def grid_from_gmsh(self, file_path):
"""
Import computational mesh from a .gmsh file and store it in the grid.
Input:
file_path: str, path to gmsh file
"""
points = self.points
edges = self.edges
faces = self.faces
subregions = self.subregions
#
# Initialize tag categories
#
for entity in [points, edges, faces]:
entity['tags'] = {'phys': [], 'geom': [], 'partition': []}
with open(file_path, 'r') as infile:
while True:
line = infile.readline()
#
# Mesh format
#
if line == '$MeshFormat\n':
# Read next line
line = infile.readline()
self.format = line.rstrip()
# TODO: Put an assert statement here to check version
while line != '$EndMeshFormat\n':
line = infile.readline()
line = infile.readline()
#
# Subregions
#
if line == '$PhysicalNames\n':
#
# Record number of subregions
#
line = infile.readline()
subregions['n'] = int(line.rstrip())
line = infile.readline()
while True:
if line == '$EndPhysicalNames\n':
line = infile.readline()
break
#
# Record names, dimensions, and tags of subregions
#
words = line.split()
name = words[2].replace('"','')
subregions['names'].append(name)
subregions['dim'].append(int(words[0]))
subregions['tags'].append(int(words[1]))
line = infile.readline()
# TODO: Is this necessary?
# =============================================================
# Cell Vertices
# =============================================================
if line == '$Nodes\n':
#
# Record number of nodes
#
line = infile.readline()
points['n'] = int(line.rstrip())
line = infile.readline()
while True:
if line == '$EndNodes\n':
line = infile.readline()
break
#
# Record vertex coordinates
#
words = line.split()
vtx = (float(words[1]),float(words[2]))
points['coordinates'].append(vtx)
line = infile.readline()
# =============================================================
# Faces
# =============================================================
if line == '$Elements\n':
next(infile) # skip 'number of elements' line
line = infile.readline()
n_faces = 0 # count number of faces
while True:
"""
General format for elements
$Elements
n_elements
el_number | el_type* | num_tags** | ...
tag1 .. tag_num_tags |...
node_number_list
*el_type: element type
points: 15 (1 node point)
lines: 1 (2 node line), 0 --------- 1
8 (3 node 2nd order line), 0 --- 2 --- 1
26 (4 node 3rd order line) 0 - 2 - 3 - 1
triangles: 2 (3 node 1st order triangle)
9 (6 node 2nd order triangle)
21 (9 node 3rd order triangle)
quadrilateral: 3 (4 node first order quadrilateral)
10 (9 node second order quadrilateral)
**num_tags:
1st tag - physical entity to which element belongs
(often 0)
2nd tag - number of elementary geometrical entity to
which element belongs (as defined in the
.geo file).
3rd tag - number of the mesh partition to which the
element belongs.
"""
if line == '$EndElements\n':
faces['n'] = n_faces
line = infile.readline()
break
words = line.split()
#
# Identify entity
#
element_type = int(words[1])
if element_type==15:
#
# Point (1 node)
#
dofs_per_entity = 1
entity = points
if element_type==1:
#
# Linear edge (2 nodes)
#
dofs_per_entity = 2
entity = edges
elif element_type==8:
#
# Quadratic edge (3 nodes)
#
dofs_per_entity = 3
entity = edges
elif element_type==26:
#
# Cubic edge (4 nodes)
#
dofs_per_entity = 4
entity = edges
elif element_type==2:
#
# Linear triangular element (3 nodes)
#
dofs_per_entity = 3
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==9:
#
# Quadratic triangular element (6 nodes)
#
dofs_per_entity = 6
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==21:
#
# Cubic triangle (10 nodes)
#
dofs_per_entity = 10
entity = faces
entity['type'].append('triangle')
n_faces += 1
elif element_type==3:
#
# Linear quadrilateral (4 nodes)
#
dofs_per_entity = 4
entity = faces
entity['type'].append('quadrilateral')
n_faces += 1
elif element_type==10:
#
# Quadratic quadrilateral (9 nodes)
#
dofs_per_entity = 9
entity = faces
entity['type'].append('quadrilateral')
n_faces += 1
entity['n_dofs'] = dofs_per_entity
#
# Record tags
#
num_tags = int(words[2])
if num_tags > 0:
#
# Record Physical Entity tag
#
entity['tags']['phys'].append(int(words[3]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('phys', None)
if num_tags > 1:
#
# Record Geometrical Entity tag
#
entity['tags']['geom'].append(int(words[4]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('geom', None)
if num_tags > 2:
#
# Record Mesh Partition tag
#
entity['tags']['partition'].append(int(words[5]))
else:
#
# Tag not included ... delete
#
entity['tags'].pop('partition', None)
if dofs_per_entity > 1:
#
# Connectivity
#
i_begin = 3 + num_tags
i_end = 3 + num_tags + dofs_per_entity
connectivity = [int(words[i])-1 for i in \
np.arange(i_begin,i_end) ]
entity['connectivity'].append(connectivity)
line = infile.readline()
if line == '':
break
#
# Check for mixed Faces
#
if len(set(faces['type']))>1:
raise Warning('Face types are mixed')
#
# Turn Edge connectivities into sets
#
for i in range(len(edges['connectivity'])):
edges['connectivity'][i] = frozenset(edges['connectivity'][i])
#
# There are faces, dimension = 2
#
if n_faces > 0:
self.__dim = 2
def determine_half_edges(self):
"""
Returns a doubly connected edge list.
The grid should already have the following specified:
1D: points
2D: points, faces
Currently,
"""
#
# Update Point Fields
#
n_points = self.points['n']
self.points['half_edge'] = np.full((n_points,), -1, dtype=np.int)
# =====================================================================
# Initialize Half-Edges
# =====================================================================
if self.dim()==1:
#
# 1D mesh
#
n_he = self.points['n']-1
elif self.dim()==2:
#
# 2D mesh
#
n_faces = self.faces['n']
n_he = 0
for i in range(n_faces):
n_he += len(self.faces['connectivity'][i])
self.half_edges['n'] = n_he
self.half_edges['connectivity'] = np.full((n_he,2), -1, dtype=np.int)
self.half_edges['prev'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['next'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['twin'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['edge'] = np.full((n_he,), -1, dtype=np.int)
self.half_edges['face'] = np.full((n_he,), -1, dtype=np.int)
# =====================================================================
# Define Half-Edges
# =====================================================================
if self.dim()==1:
#
# 1D: Define HE's and link with others and points
#
n_points = self.points['n']
for i in range(n_points-1):
# Connectivity
self.half_edges['connectivity'][i] = [i,i+1]
# Previous and next half_edge in the DCEL
# NOTE: Here (unlike 2D), prev and next are used to
# navigate in the grid.
self.half_edges['prev'][i] = i-1
self.half_edges['next'][i] = i+1 if i+1<n_points-1 else -1
# Incident half_edge to left endpoint
self.points['half_edge'][i] = i
'''
#
# Twin
#
# Define twin half-edge
self.half_edges['connectivity'][n_points-1+i] = [i+1,i]
self.half_edges['twin'][i] = n_points-1+i
self.half_edges['twin'][n_points-1+i] = i
# Incident half-edge to right endpoint
self.points['half_edge'][i+1] = n_points + i
# Next and previous
self.half_edges['next'][n_points-1+i] = i-1
self.half_edges['prev'][n_points-1+i] = \
i+1 if i+1<n_points else -1
'''
elif self.dim()==2:
#
# 2D: Define HE's and link with others, faces, and points
#
n_faces = self.faces['n']
self.faces['half_edge'] = np.full((n_faces,), -1, dtype=np.int)
#
# Loop over faces
#
half_edge_count = 0
for i_fce in range(n_faces):
fc = self.faces['connectivity'][i_fce]
n_sides = len(fc)
#
# Face's half-edge numbers
#
fhe = [half_edge_count + j for j in range(n_sides)]
#
# Update face information
#
self.faces['half_edge'][i_fce] = fhe[0]
for i in range(n_sides):
#
# Update half-edge information
#
#
# Connectivity
#
hec = [fc[i%n_sides], fc[(i+1)%n_sides]]
self.half_edges['connectivity'][fhe[i],:] = hec
'''
DEBUG
if fhe[i] >= n_he:
print('Half-edge index exceeds matrix dimensions.')
print('Number of faces: {0}'.format(self.faces['n']))
print('Number of half-edges: 3x#faces =' + \
' {0}'.format(3*self.faces['n']))
print('#Half-Edges recorded: {0}'+\
''.format(self.half_edges['n']))
'''
#
# Previous Half-Edge
#
self.half_edges['prev'][fhe[i]] = fhe[(i-1)%n_sides]
#
# Next Half-Edge
#
self.half_edges['next'][fhe[i]] = fhe[(i+1)%n_sides]
#
# Face
#
self.half_edges['face'][fhe[i]] = i_fce
#
# Points
#
self.points['half_edge'][fc[i%n_sides]] = fhe[i]
#
# Update half-edge count
#
half_edge_count += n_sides
hec = self.half_edges['connectivity']
# =====================================================================
# Determine twin half_edges
# =====================================================================
for i in range(n_he):
#
# Find the row whose reversed entries match current entry
#
row = np.argwhere((hec[:,0]==hec[i,1]) & (hec[:,1]==hec[i,0]))
if len(row) == 1:
#
# Update twin field
#
self.half_edges['twin'][i] = int(row)
"""
# =====================================================================
# Link with Edges
# =====================================================================
#
# Update Edge Fields
#
# TODO: Delete when safe to do so!!
edge_set = set(self.edges['connectivity'])
self.edges['half_edge'] = [None]*len(edge_set)
for i_he in range(n_he):
#
# Loop over half-edges
#
hec = self.half_edges['connectivity'][i_he]
'''
DEBUG
#print('Size of edge_set: {0}'.format(len(edge_set)))
#print('Size of edge connectivity: {0}'.format(len(self.edges['connectivity'])))
'''
if set(hec) in edge_set:
'''
DEBUG
print('Set {0} is in edge_set. Locating it'.format(hec))
'''
#
# Edge associated with Half-Edge exists
#
i_edge = self.edges['connectivity'].index(set(hec))
'''
DEBUG
print('Location: {0}'.format(i_edge))
print('Here it is: {0}'.format(self.edges['connectivity'][i_edge]))
#print('Linking half edge with edge:')
#print('Half-edge: {0}'.format(self.edges['connectivity'][i_edge]))
#print('Edge: {0}'.format(self.half_edges['connectivity'][fhe[i]]))
#print(len(self.edges['half_edge']))
#print('Length of edge_set {0}'.format(len(edge_set)))
#print(edge_set)
'''
#
# Link edge to half edge
#
self.edges['half_edge'][i_edge] = i_he
else:
#print('Set {0} is not in edge_set \n '.format(hec))
#
# Add edge
#
new_edge = frozenset(hec)
self.edges['connectivity'].append(new_edge)
edge_set.add(new_edge)
i_edge =len(self.edges['connectivity'])-1
#
# Assign empty tags
#
for tag in self.edges['tags'].values():
tag.append(None)
#
# Link edge to half-edge
#
self.edges['half_edge'].append(i)
#
# Link half-edge to edge
#
self.half_edges['edge'][i] = i_edge
#
# Update size of edge list
#
self.edges['n'] = len(self.edges['connectivity'])
"""
def dim(self):
"""
Returns the underlying dimension of the grid
"""
return self.__dim
def get_neighbor(self, i_entity, i_direction):
"""
Returns the neighbor of an entity in a given direction
Inputs:
i_entity: int, index of the entity whose neighbor we seek
In 1D: i_entity indexes a half_edge
In 2D: i_entity indexes a face
i_direction: int, index of an entity specifying a direction
In 1D: i_direction indexes an interval endpoint
In 2D: i_direction indexes a half_edge
"""
if self.dim() == 1:
#
# 1D grid
#
hec = self.half_edges['connectivity'][i_entity]
assert i_direction in hec, \
'Point index not in connectivity of this Half-Edge.'
if i_direction == hec[0]:
#
# Left endpoint: go to previous half-edge
#
i_nbr = self.half_edges['prev'][i_entity]
elif i_direction == hec[1]:
#
# Right endpoint: go to next Half-Edge
#
i_nbr = self.half_edges['next'][i_entity]
elif self.dim() == 2:
#
# 2D grid: use half_edges
#
assert self.half_edges['face'][i_direction] == i_entity,\
'Cell not incident to Half-Edge.'
i_nbr_he = self.half_edges['twin'][i_direction]
i_nbr = self.half_edges['face'][i_nbr_he]
if i_nbr != -1:
return i_nbr
else:
return None
def get_boundary_half_edges(self):
"""
Returns a list of the boundary half_edge indices
"""
assert self.dim()==2, 'Half edges only present in 2D grids.'
bnd_hes_conn = []
bnd_hes = []
#
# Locate half-edges on the boundary
#
for i_he in range(self.half_edges['n']):
if self.half_edges['twin'][i_he] == -1:
bnd_hes.append(i_he)
bnd_hes_conn.append(self.half_edges['connectivity'][i_he])
#
# Group and sort half-edges
#
bnd_hes_sorted = [deque([he]) for he in bnd_hes]
while True:
for g1 in bnd_hes_sorted:
#
# Check if g1 can add a deque in bnd_hes_sorted
#
merger_activity = False
for g2 in bnd_hes_sorted:
#
# Does g1's head align with g2's tail?
#
if self.half_edges['connectivity'][g1[-1]][1]==\
self.half_edges['connectivity'][g2[0]][0]:
# Remove g2 from list
if len(bnd_hes_sorted) > 1:
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extend(g2)
merger_activity = True
#
# Does g1's tail align with g2's head?
#
elif self.half_edges['connectivity'][g1[0]][0]==\
self.half_edges['connectivity'][g2[-1]][1]:
if len(bnd_hes_sorted) > 1:
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extendleft(g2)
merger_activity = True
if not merger_activity:
break
#
# Multiple boundary segments
#
return [list(segment) for segment in bnd_hes_sorted]
"""
bnd_hes_sorted = []
i_he_left = bnd_hes.pop()
i_he_right = i_he_left
he_conn_left = bnd_hes_conn.pop()
he_conn_right = he_conn_left
subbnd_hes_sorted = deque([i_he])
while len(bnd_hes)>0:
added_to_left = False
added_to_right = False
for i in range(len(bnd_hes)):
if bnd_hes_conn[i][0] == he_conn_right[1]:
#
# Base vertex of he in list matches
# head vertex of popped he.
#
i_he_right = bnd_hes.pop(i)
he_conn_right = bnd_hes_conn.pop(i)
subbnd_hes_sorted.append(i_he_right)
added_to_right = True
elif bnd_hes_conn[i][1] == he_conn_left[0]:
#
# Head vertex of he in list matches
# base vertex of popped he.
#
i_he_left = bnd_hes_conn.pop(i)
he_conn_left = bnd_hes_conn.pop(i)
subbnd_hes_sorted.appendleft(i_he_left)
added_to_left = True
if added_to_left and added_to_right:
break
if not added_to_left and not added_to_right:
# Could not find any half-edges to add
#
# Add boundary segment to sorted hes
#
bnd_hes_sorted.extend(ihe for ihe in subbnd_hes_sorted)
#
# Reinitialize subbnd_hes_sorted
#
i_he_left = bnd_hes.pop()
i_he_right = i_he_left
he_conn_left = bnd_hes_conn.pop()
he_conn_right = he_conn_left
subbnd_hes_sorted = deque([i_he])
return bnd_hes_sorted
"""
'''
def get_boundary_edges(self):
"""
Returns a list of the boundary edge indices
TODO: Get rid of this
"""
bnd_hes_sorted = self.get_boundary_half_edges()
#
# Extract boundary edges
#
bnd_edges = [self.half_edges['edge'][i] for i in bnd_hes_sorted]
return bnd_edges
'''
def get_boundary_points(self):
"""
Returns a list of boundary point indices
"""
if self.dim() == 1:
#
# One dimensional grid (assume sorted)
#
bnd_points = [0, self.points['n']-1]
elif self.dim() == 2:
#
# Two dimensional grid
#
bnd_points = []
for i_he in self.get_boundary_half_edges():
#
# Add initial point of each boundary half edge
#
bnd_points.append(self.half_edges['connectivity'][i_he][0])
else:
raise Exception('Only dimensions 1 and 2 supported.')
return bnd_points
def make_periodic(self, coordinates, box):
"""
Make a rectangular DCEL periodic by assigning the correct twins to
HalfEdges on the boundary.
Inputs:
Coordinates: set, containing 0 (x-direction) and/or 1 (y-direction).
TODO: Cannot make periodic (1,1) DCEL objects
"""
if self.dim()==1:
#
# In 1D, first half-edge becomes "next" of last half-edge
#
self.half_edges['next'][-1] = 0
self.half_edges['prev'][0] = self.half_edges['n']-1
elif self.dim()==2:
#
# In 2D, must align vertices on both side of the box
#
x_min, x_max, y_min, y_max = box
if 0 in coordinates:
#
# Make periodic in the x-direction
#
left_hes = []
right_hes = []
for segment in self.get_boundary_half_edges():
for he in segment:
#
# Record coordinates of half-edge's base and head
#
i_base, i_head = self.half_edges['connectivity'][he][:]
x_base, y_base = self.points['coordinates'][i_head]
x_head, y_head = self.points['coordinates'][i_base]
if np.isclose(x_base,x_max) and np.isclose(x_head,x_max):
#
# If x-values are near x_max, it's on the right
#
right_hes.append((he, y_base, y_head))
elif np.isclose(x_base,x_min) and np.isclose(x_head,x_min):
#
# If x-values are near x_min, it's on the left
#
left_hes.append((he, y_base, y_head))
#
# Look for twin half-edges
#
n_right = len(left_hes)
n_left = len(right_hes)
assert n_right==n_left, \
'Number of half-edges on either side of domain differ.'+\
'Cannot make periodic.'
while len(left_hes)>0:
l_he, l_ybase, l_yhead = left_hes.pop()
for ir in range(len(right_hes)):
#
# For each halfedge on the left, check if there is a
# corresponding one on the right.
#
r_he, r_ybase, r_yhead = right_hes[ir]
if np.isclose(l_ybase, r_yhead) and np.isclose(l_yhead, r_ybase):
self.half_edges['twin'][l_he] = r_he
self.half_edges['twin'][r_he] = l_he
del right_hes[ir]
break
assert len(right_hes)==0, \
'All HalfEdges on the left should be matched with '+\
'one on the right.'
if 1 in coordinates:
#
# Make periodic in the y-direction
#coordinates
top_hes = []
bottom_hes = []
for segment in self.get_boundary_half_edges():
for he in segment:
#
# Record coordinates of half-edge's base and head
#
i_base, i_head = self.half_edges['connectivity'][he]
x_base, y_base = self.points['coordinates'][i_head]
x_head, y_head = self.points['coordinates'][i_base]
if np.isclose(y_base,y_max) and np.isclose(y_head,y_max):
#
# If y-values are near y_max, it's on the top
#
top_hes.append((he, x_base, x_head))
elif np.isclose(y_base,y_min) and np.isclose(y_head,y_min):
#
# If y-values are near y_min, it's on the bottom
#
bottom_hes.append((he, x_base, x_head))
#
# Look for twin half-edges
#
while len(bottom_hes)>0:
b_he, b_xbase, b_xhead = bottom_hes.pop()
for it in range(len(top_hes)):
#
# For each halfedge on the left, check if there is a
# corresponding one on the right.
#
t_he, t_xbase, t_xhead = top_hes[it]
if np.isclose(t_xbase, b_xhead) and np.isclose(t_xhead, b_xbase):
self.half_edges['twin'][b_he] = t_he
self.half_edges['twin'][t_he] = b_he
del top_hes[it]
break
assert len(top_hes)==0, \
'All HalfEdges on the left should be matched with '+\
'one on the right.'
self.periodic_coordinates = coordinates
class Mesh(object):
"""
Mesh class
"""
def __init__(self, dcel=None, box=None, resolution=None, periodic=None,
dim=None, x=None, connectivity=None, file_path=None,
file_format='gmsh'):
# =====================================================================
# Doubly connected Edge List
# =====================================================================
if dcel is None:
#
# Initialize doubly connected edge list if None
#
dcel = DCEL(box=box, resolution=resolution, periodic=periodic,
dim=dim, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
else:
assert isinstance(dcel,DCEL)
self.dcel = dcel
#
# Determine mesh dimension
#
dim = dcel.dim()
self._dim = dim
# =====================================================================
# Vertices
# =====================================================================
vertices = []
n_points = dcel.points['n']
for i in range(n_points):
vertices.append(Vertex(dcel.points['coordinates'][i]))
self.vertices = vertices
def dim(self):
"""
Returns the dimension of the mesh (1 or 2)
"""
return self._dim
class Mesh1D(Mesh):
"""
1D Mesh Class
"""
def __init__(self, dcel=None, box=None, resolution=None, periodic=False,
x=None, connectivity=None, file_path=None, file_format='gmsh'):
#
# Convert input "periodic" to something intelligible for DCEL
#
if periodic is True:
periodic = {0}
else:
periodic = None
Mesh.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, dim=1, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
assert self.dim()==1, 'Mesh dimension not 1.'
# =====================================================================
# Intervals
# =====================================================================
intervals = []
n_intervals = self.dcel.half_edges['n']
for i in range(n_intervals):
#
# Make list of intervals
#
i_vertices = self.dcel.half_edges['connectivity'][i]
v_base = self.vertices[i_vertices[0]]
v_head = self.vertices[i_vertices[1]]
interval = Interval(v_base, v_head)
intervals.append(interval)
#
# Align intervals (assign next)
#
for i in range(n_intervals):
i_nxt = self.dcel.half_edges['next'][i]
if i_nxt!=-1:
if intervals[i].head() != intervals[i_nxt].base():
assert self.dcel.is_periodic, 'DCEL should be periodic'
#
# Intervals linked by periodicity
#
itv_1, vtx_1 = intervals[i], intervals[i].head()
itv_2, vtx_2 = intervals[i_nxt], intervals[i_nxt].base()
# Mark intervals periodic
itv_1.set_periodic()
itv_2.set_periodic()
# Mark vertices periodic
vtx_1.set_periodic()
vtx_2.set_periodic()
# Associate vertices with one another
vtx_1.set_periodic_pair((itv_2, vtx_2))
vtx_2.set_periodic_pair((itv_1, vtx_1))
else:
intervals[i].assign_next(intervals[i_nxt])
#
# Store intervals in Forest
#
self.cells = Forest(intervals)
self.__periodic_coordinates = self.dcel.periodic_coordinates
def is_periodic(self):
"""
Returns true if the mesh is periodic
"""
return 0 in self.__periodic_coordinates
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine a list of LEAF cells in the submesh, each of which contains
at least one point in points. Return the list of tuples of LEAF cells
and point indices.
Inputs:
points: Set of admissible points
subforest_flag: submesh flag
Outputs:
bins: tuple of (cell, index) pairs detailing the bins and indices
of points.
"""
x = convert_to_array(points)
n_points = x.shape[0]
if i_points is None:
i_points = np.arange(n_points)
else:
assert n_points==len(i_points)
bins = []
for cell in self.cells.get_children(flag=subforest_flag):
in_cell = cell.contains_points(x)
if any(in_cell):
#
# Cell contains (some) points
#
# Isolate points in cell and their indices
y = x[in_cell] # subset of points
y_idx = i_points[in_cell] # index of subset
# Recursion step
c_bin = cell.bin_points(y, y_idx, subforest_flag)
bins.extend(c_bin)
# Eliminate points from list
x = x[~in_cell]
i_points = i_points[~in_cell]
assert len(x)==0, 'Some points are not in domain.'
return bins
def get_boundary_vertices(self):
"""
Returns the mesh endpoint vertices
"""
if self.is_periodic():
return None
else:
v0 = self.cells.get_child(0).base()
v1 = self.cells.get_child(-1).head()
return v0, v1
def get_boundary_cells(self, subforest_flag=None):
"""
Returns the mesh endpoint cells
"""
if self.is_periodic():
#
# Periodic Mesh: No cells on the boundary
#
return None
else:
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over cells
#
if cell.get_neighbor(0, subforest_flag=subforest_flag) is None:
#
# Cannot find a left neighbor: found left boundary cell
#
cell_left = cell
if cell.get_neighbor(1, subforest_flag=subforest_flag) is None:
#
# Cannot find a right neighbor: found right boundary cell
#
cell_right = cell
return cell_left, cell_right
def bounding_box(self):
"""
Returns the interval endpoints
"""
if self.is_periodic():
#
# Periodic meshes have no boundary vertices, get them explicitly
#
v0 = self.cells.get_child(0).base()
v1 = self.cells.get_child(-1).head()
else:
v0, v1 = self.get_boundary_vertices()
x0, = v0.coordinates()
x1, = v1.coordinates()
return x0, x1
def mark_region(self, flag, f, entity_type='vertex', strict_containment=True,
on_boundary=False, subforest_flag=None):
"""
Flags all entities of specified type within specified 1D region in mesh
Inputs:
flag: str/int/tuple, marker
f: boolean function whose input is a number x and whose
output is True if the point is contained in the region to be
marked, False otherwise.
entity_type: str, entity to be marked ('cell', 'vertex')
strict_containment: bool, if True, an entity is marked only
if all its vertices are contained in the region. If False,
one vertex suffices
on_boundary: bool, if True, consider only entities on the boundary
subforest_flag: str/int/tuple, mesh marker.
"""
if on_boundary:
#
# Entity adjacent to boundary
#
if entity_type=='vertex':
#
# Vertices
#
for v in self.get_boundary_vertices():
x, = v.coordinates()
if f(x):
#
# Vertex in region -> mark it
#
v.mark(flag)
elif entity_type=='cell':
#
# Intervals
#
for cell in self.get_boundary_cells(subforest_flag=subforest_flag):
#
# Iterate over boundary cells
#
if strict_containment:
#
# Only mark interval if all vertices are in region
#
mark = True
for v in cell.get_vertices():
x, = v.coordinates()
if not f(x):
#
# One vertex outide region -> don't mark interval
#
mark = False
break
else:
#
# Mark interval if any vertex is in region
#
mark = False
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# One vertex in region -> mark interval
#
mark = True
break
if mark:
#
# Mark interval if necessary
#
cell.mark(flag)
else:
#
# Region not adjacent to boundary
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
if entity_type=='vertex':
#
# Mark vertices
#
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# Vertex is in region -> mark it
#
v.mark(flag)
elif entity_type=='cell':
#
# Mark intervals
#
if strict_containment:
mark = True
for v in cell.get_vertices():
x, = v.coordinates()
if not f(x):
#
# One cell vertex outside region -> don't mark
#
mark = False
break
else:
mark = False
for v in cell.get_vertices():
x, = v.coordinates()
if f(x):
#
# One vertex in region -> mark interval
#
mark = True
break
if mark:
#
# Mark interval if necessary
#
cell.mark(flag)
def get_region(self, flag=None, entity_type='vertex', on_boundary=False,
subforest_flag=None, return_cells=False):
"""
Returns a list of entities marked with the specified flag in 1D mesh
Inputs:
flag: str/int/tuple, entity marker
entity_type: str, type of entity to be returned
('vertex', 'cell', or 'half_edge')
on_boundary: bool, if True, seek region only along boundary
subforest_flag: str/int/tuple, submesh flag
return_cells: bool, if True, return tuples of the form
(entity, cell), i.e. include the cell containing the entity.
Outputs:
region_entities: list, or Cells/Intervals/HalfEdges/Vertices
located within region.
"""
region_entities = set()
if on_boundary:
#
# Restrict region to boundary
#
cells = self.get_boundary_cells(subforest_flag=subforest_flag)
bnd_vertices = self.get_boundary_vertices()
else:
#
# Region within 1D domain
#
cells = self.cells.get_leaves(subforest_flag=subforest_flag)
for cell in cells:
#
# Iterate over cells
#
if entity_type=='vertex':
#
# Vertex
#
for v in cell.get_vertices():
add_entity = flag is None or v.is_marked(flag)
if on_boundary:
#
# Additional check when on boundary
#
add_entity = add_entity and v in bnd_vertices
if add_entity:
#
# Add vertex to set
#
if return_cells:
#
# Add (vertex, cell) tuple
#
region_entities.add((v,cell))
else:
#
# Add only vertex
#
region_entities.add(v)
elif entity_type=='cell':
#
# Intervals
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
#
# Add cell to set
#
if return_cells:
#
# Add (cell, cell) tuple
#
region_entities.add((cell, cell))
else:
#
# Add only cell
#
region_entities.add(cell)
return list(region_entities)
def record(self, subforest_flag):
"""
Record current mesh (intervals)
Input:
subforest_flag: str/int/tuple, name of mesh
"""
self.cells.record(subforest_flag)
class Mesh2D(Mesh):
"""
2D Mesh class
"""
def __init__(self, dcel=None, box=None, resolution=None, x=None,
periodic=None, connectivity=None, file_path=None,
file_format='gmsh'):
Mesh.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, dim=2, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
self._is_rectangular = self.dcel.is_rectangular
self._periodic_coordinates = self.dcel.periodic_coordinates
# ====================================================================
# HalfEdges
# ====================================================================
half_edges = []
n_hes = self.dcel.half_edges['n']
for i in range(n_hes):
i_vertices = self.dcel.half_edges['connectivity'][i]
v_base = self.vertices[i_vertices[0]]
v_head = self.vertices[i_vertices[1]]
half_edge = HalfEdge(v_base, v_head)
half_edges.append(half_edge)
#
# Assign twins (2D)
#
for i_he in range(n_hes):
i_twin = self.dcel.half_edges['twin'][i_he]
if i_twin!=-1:
#
# HalfEdge has twin
#
he_nodes = self.dcel.half_edges['connectivity'][i_he]
twin_nodes = self.dcel.half_edges['connectivity'][i_twin]
if not all(he_nodes == list(reversed(twin_nodes))):
#
# Heads and Bases don't align, periodic boundary
#
assert self.is_periodic(), 'Mesh is not periodic.'\
'All HalfEdges should align.'
half_edges[i_he].set_periodic()
half_edges[i_twin].set_periodic()
half_edges[i_he].assign_twin(half_edges[i_twin])
half_edges[i_twin].assign_twin(half_edges[i_he])
#
# Store HalfEdges in Forest.
#
self.half_edges = Forest(half_edges)
# =====================================================================
# Cells
# =====================================================================
cells = []
n_cells = self.dcel.faces['n']
is_quadmesh = True
for ic in range(n_cells):
i_he_pivot = self.dcel.faces['half_edge'][ic]
i_he = i_he_pivot
one_rotation = False
i_hes = []
while not one_rotation:
i_hes.append(i_he)
i_he = self.dcel.half_edges['next'][i_he]
if i_he==i_he_pivot:
one_rotation = True
if len(i_hes)==4:
cells.append(QuadCell([half_edges[i] for i in i_hes]))
else:
cells.append(Cell([half_edges[i] for i in i_hes]))
is_quadmesh = False
self._is_quadmesh = is_quadmesh
self.cells = Forest(cells)
# =====================================================================
# Pair Periodic Vertices
# =====================================================================
for half_edge in self.half_edges.get_children():
# Pair periodic vertices
#
if half_edge.is_periodic():
half_edge.pair_periodic_vertices()
def is_rectangular(self):
"""
Check whether the Mesh is rectangular
"""
return self._is_rectangular
def is_periodic(self, coordinates=None):
"""
Check whether the Mesh is periodic in the x- and/or the y direction
Input:
*coordinates: int, set containing 0 (x-direction) and/or 1 (y-direction)
if directions is None, check for periodicity in any direction
"""
if coordinates is None:
return 0 in self._periodic_coordinates or 1 in self._periodic_coordinates
else:
is_periodic = True
for i in coordinates:
if i not in self._periodic_coordinates:
return False
return is_periodic
def is_quadmesh(self):
"""
Check if the mesh is a quadmesh
"""
return self._is_quadmesh
def locate_point(self, point, flag=None):
"""
Returns the smallest (flagged) cell containing a given point
or None if current cell doesn't contain the point
Input:
point: Vertex
Output:
cell: smallest cell that contains x
"""
for cell in self.cells.get_children():
if flag is None:
if cell.contains_points(point):
return cell
else:
if cell.is_marked(flag) and cell.contains_points(point):
return cell
def get_boundary_segments(self, subforest_flag=None, flag=None):
"""
Returns a list of segments of boundary half edges
Inputs:
subforest_flag: optional flag (int/str) specifying the submesh
within which boundary segments are sought.
Note: This flag is applied to the cells in the submesh, not the edges
flag: optional flag (int/str) specifying boundary segments
Notes:
- The subforest flag specified above refers to the mesh cells,
not to the half-edges
- This implementation assumes that the boundary edges on the coarsest
mesh are a good representation of the computational region.
"""
bnd_hes = []
#
# Locate half-edges on the boundary (coarsest level)
#
for he in self.half_edges.get_children():
if he.twin() is None:
bnd_hes.append(he)
#
# Group and sort half-edges
#
bnd_hes_sorted = [deque([he]) for he in bnd_hes]
while True:
merger_activity = False
for g1 in bnd_hes_sorted:
#
# Check if g1 can add a deque in bnd_hes_sorted
#
merger_activity = False
for g2 in bnd_hes_sorted:
#
# Does g1's head align with g2's tail?
#
if g1[-1].head()==g2[0].base():
# Remove g2 from list
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g1.extend(g2)
merger_activity = True
#
# Does g1's tail align with g2's head?
#
elif g1[0].base()==g2[-1].head():
g2 = bnd_hes_sorted.pop(bnd_hes_sorted.index(g2))
g2.reverse()
g1.extendleft(g2)
merger_activity = True
if not merger_activity or len(bnd_hes_sorted)==1:
break
#
# Multiple boundary segments
#
bnd = [list(segment) for segment in bnd_hes_sorted]
#
# Get edges on finest level (allowed by submesh)
#
for segment in bnd:
hes_todo = [he for he in segment]
while len(hes_todo)>0:
#
# Pop out first half-edge in list
#
he = hes_todo.pop(0)
if he.cell().has_children(flag=subforest_flag):
#
# Half-Edge has valid sub-edges:
# Replace he in list with these.
#
i_he = segment.index(he)
del segment[i_he]
for che in he.get_children():
segment.insert(i_he, che)
i_he += 1
#
# Add che's to the list of he's to do
#
hes_todo.append(che)
#
# Throw out he's that are not flagged
#
if flag is not None:
for he, i_he in zip(segment, range(len(segment))):
if not he.is_marked(flag):
#
# Not flagged: remove from list
#
del segment[i_he]
return bnd
def get_boundary_vertices(self, flag=None, subforest_flag=None):
"""
Returns the Vertices on the boundary
"""
vertices = []
for segment in self.get_boundary_segments(subforest_flag=subforest_flag,
flag=flag):
for he in segment:
vertices.append(he.base())
return vertices
def mark_region(self, flag, f, entity_type='vertex', strict_containment=True,
on_boundary=False, subforest_flag=None):
"""
This method marks all entities within a 2D region.
Inputs:
flag: str, int, tuple marker
f: boolean function whose inputs are an x and a y vector and whose
output is True if the point is contained in the region to be
marked, False otherwise.
entity_type: str, entity to be marked ('cell', 'half_edge', 'vertex')
strict_containment: bool, if True, an entity is marked only
if all its vertices are contained in the region. If False,
one vertex suffices
on_boundary: bool, if True, consider only entities on the boundary
subforest_flag: str/int/tuple, mesh marker.
"""
if on_boundary:
#
# Iterate only over boundary segments
#
for segment in self.get_boundary_segments(subforest_flag=subforest_flag):
#
# Iterate over boundary segments
#
for he in segment:
#
# Iterate over half_edges within each segment
#
if entity_type=='vertex':
#
# Mark vertices
#
for v in he.get_vertices():
#
# Iterate over half-edge vertices
#
x,y = v.coordinates()
if f(x,y):
#
# Mark
#
v.mark(flag)
elif entity_type=='half_edge':
#
# Mark Half-Edges
#
if strict_containment:
#
# All vertices must be within region
#
mark = True
for v in he.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# One vertex not in region, don't mark edge
#
mark = False
break
else:
#
# Only one vertex need be in the region
#
mark = False
for v in he.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# One vertex in region is enough
#
mark = True
break
if mark:
#
# Mark half_edge
#
he.mark(flag)
elif entity_type=='cell':
#
# Mark Cells
#
cell = he.cell()
if strict_containment:
mark = True
for v in cell.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# One vertex not in region -> don't mark
#
mark = False
break
else:
mark = False
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# One vertex in region -> mark
#
mark = True
break
if mark:
#
# Mark cell
#
cell.mark(flag)
else:
raise Exception('Entity %s not supported'%(entity_type))
else:
#
# Region may lie within interior of the domain
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over mesh cells
#
if entity_type=='vertex':
#
# Mark vertices
#
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# Mark vertex
#
v.mark(flag)
elif entity_type=='half_edge':
#
# Mark half-edges
#
for he in cell.get_half_edges():
if strict_containment:
mark = True
for v in he.get_vertices():
x,y = v.coordinates()
if not f(x,y):
#
# Single vertex outside region disqualifies half_edge
#
mark = False
break
else:
mark = False
for v in he.get_vertices():
x,y = v.coordinates()
if f(x,y):
#
# Single vertex in region -> mark half_edge
#
mark = True
break
if mark:
#
# Mark half_edge
#
he.mark(flag)
elif entity_type=='cell':
#
# Mark cells
#
if strict_containment:
#
# All vertices must be in region
#
mark = True
for v in cell.get_vertices():
x,y = v.coordinates()
if not f(x,y):
mark = False
break
else:
#
# Only one vertex need be in region
#
mark = False
for v in cell.get_vertices():
x,y = v.coordinates()
if f(x,y):
mark = True
break
if mark:
#
# Mark cell
#
cell.mark(flag)
def tear_region(self, flag, subforest_flag=None):
"""
Tear the domain along an interior half-edge region.
As a consequence,
- Vertices on either side of the half-edge are separate
(although they still have the same coordinates).
- Adjoining half-edges along the region will no longer be
neighbors of each other.
Inputs:
flag: str/int/tuple, flag specifying the region of half-edges
subforest_flag: str/int/tuple, flag specifying the submesh
"""
#
# Iterate over half-edges along region
#
for he in self.get_region(flag=flag, entity_type='half_edge',
subforest_flag=subforest_flag):
#
# Assign New Vertices to half-edge
#
base = Vertex(he.base().coordinates())
head = Vertex(he.head().coordinates())
he.set_vertices(base, head)
#
# Disassociate from neighboring half-edge
#
twin = he.twin()
twin.delete_twin()
he.delete_twin()
def get_region(self, flag=None, entity_type='vertex', on_boundary=False,
subforest_flag=None, return_cells=False):
"""
Returns a list of entities marked with the specified flag
Inputs:
flag: str/int/tuple, entity marker
entity_type: str, type of entity to be returned
('vertex', 'cell', or 'half_edge')
on_boundary: bool, if True, seek region only along boundary
subforest_flag: str/int/tuple, submesh flag
return_cells: bool, if True, return a list of tuples of the form
(entity, cell)
Outputs:
region_entities: list, or Cells/Intervals/HalfEdges/Vertices
located within region.
"""
debug = False
region_entities = set()
if on_boundary:
if debug: print('On boundary')
#
# Region is a subset of the boundary
#
for segment in self.get_boundary_segments(subforest_flag=subforest_flag):
#
# Iterate over boundary segments
#
for he in segment:
#
# Iterate over boundary edges
#
if entity_type=='cell':
#
# Get cell associated with half-edge
#
cell = he.cell()
#
# Add cell to set
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell as cell
#
region_entities.add((cell, cell))
else:
#
# Return only entity
#
region_entities.add(cell)
elif entity_type=='half_edge':
#
# Half-edge
#
add_entity = flag is None or he.is_marked(flag)
if add_entity:
if return_cells:
#
# Return half-edge and cell
#
cell = he.cell()
region_entities.add((he, cell))
else:
#
# Return only entity
#
region_entities.add(he)
elif entity_type=='vertex':
#
# Vertices
#
for v in he.get_vertices():
if debug:
print('considering vertex', v.coordinates())
add_entity = flag is None or v.is_marked(flag)
if debug:
print('to add?', add_entity)
print('marked?', v.is_marked(flag))
if add_entity:
if return_cells:
#
# Return containing cell and entity
#
cell = he.cell()
region_entities.add((v, cell))
else:
#
# Return only entity
#
region_entities.add(v)
else:
#
# Iterate over entire mesh.
#
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
#
# Iterate over mesh cells
#
if entity_type=='cell':
#
# Cells
#
add_entity = flag is None or cell.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell as cell
#
region_entities.add((cell, cell))
else:
#
# Return only entity
#
region_entities.add(cell)
elif entity_type=='half_edge':
#
# Half-Edges
#
for he in cell.get_half_edges():
add_entity = flag is None or he.is_marked(flag)
if add_entity:
if return_cells:
#
# Return half-edge and cell
#
region_entities.add((he, cell))
else:
#
# Return only entity
#
region_entities.add(he)
elif entity_type=='vertex':
#
# Vertices
#
for he in cell.get_half_edges():
for v in he.get_vertices():
add_entity = flag is None or v.is_marked(flag)
if add_entity:
if return_cells:
#
# Return containing cell and entity
#
region_entities.add((v, cell))
else:
#
# Return only entity
#
region_entities.add(v)
return region_entities
def bounding_box(self):
"""
Returns the bounding box of the mesh
"""
xy = convert_to_array(self.vertices, dim=2)
x0, x1 = xy[:,0].min(), xy[:,0].max()
y0, y1 = xy[:,1].min(), xy[:,1].max()
return x0, x1, y0, y1
def record(self, subforest_flag):
"""
Mark all cells and half-edges within current mesh with subforest_flag
"""
self.cells.record(subforest_flag)
self.half_edges.record(subforest_flag)
'''
def get_boundary_edges(self, flag=None):
"""
Returns the half-nodes on the boundary
"""
bnd_hes_unsorted = []
#
# Locate ROOT half-edges on the boundary
#
for he in self.half_edges.get_children():
if he.twin() is None:
bnd_hes_unsorted.append(he)
n_bnd = len(bnd_hes_unsorted)
#
# Sort half-edges
#
he = bnd_hes_unsorted.pop()
bnd_hes_sorted = [he]
while n_bnd>0:
for i in range(n_bnd):
nxt_he = bnd_hes_unsorted[i]
if he.head()==nxt_he.base():
bnd_hes_sorted.append(nxt_he)
he = bnd_hes_unsorted.pop(i)
n_bnd -= 1
break
#
# Get LEAF half-edges
#
bnd_hes = []
for he in bnd_hes_sorted:
bnd_hes.extend(he.get_leaves(flag=flag))
'''
class QuadMesh(Mesh2D):
"""
Two dimensional mesh with quadrilateral cells.
Note:
When coarsening and refining a QuadMesh, the HalfEdges are not deleted
Rather use submeshes.
"""
def __init__(self, dcel=None, box=None, resolution=None, x=None,
periodic=None, connectivity=None, file_path=None,
file_format='gmsh'):
#
# Initialize 2D Mesh.
#
Mesh2D.__init__(self, dcel=dcel, box=box, resolution=resolution,
periodic=periodic, x=x, connectivity=connectivity,
file_path=file_path, file_format=file_format)
self.cells = Forest(self.cells.get_children())
def bin_points(self, points, i_points=None, subforest_flag=None):
"""
Determine a list of LEAF cells in the submesh, each of which contains
at least one point in points. Return the list of tuples of LEAF cells
and point indices.
Inputs:
points: Set of admissible points
subforest_flag: submesh flag
Outputs:
bins: tuple of (cell, index) pairs detailing the bins and indices
of points.
"""
x = convert_to_array(points)
n_points = x.shape[0]
if i_points is None:
i_points = np.arange(n_points)
else:
assert n_points==len(i_points)
bins = []
for cell in self.cells.get_children(flag=subforest_flag):
in_cell = cell.contains_points(x)
if any(in_cell):
#
# Cell contains (some) points
#
# Isolate points in cell and their indices
y = x[in_cell] # subset of points
y_idx = i_points[in_cell] # index of subset
# Recursion step
c_bin = cell.bin_points(y, y_idx, subforest_flag)
bins.extend(c_bin)
# Eliminate points from list
x = x[~in_cell]
i_points = i_points[~in_cell]
assert len(x)==0, 'Some points are not in domain.'
return bins
def is_balanced(self, subforest_flag=None):
"""
Check whether the mesh is balanced
Inputs:
flag (optional): marker, allowing for the restriction to
a submesh.
"""
for cell in self.cells.get_leaves(subforest_flag=subforest_flag):
for half_edge in cell.get_half_edges():
nb = cell.get_neighbors(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
twin = half_edge.twin()
for the_child in twin.get_children():
if the_child.cell().has_children(flag=subforest_flag):
return False
return True
def balance(self, subforest_flag=None):
"""
Ensure that subcells of current cell conform to the 2:1 rule
"""
assert self.cells.subtrees_rooted(subforest_flag)
#
# Get all LEAF cells
#
leaves = set(self.cells.get_leaves(subforest_flag=subforest_flag)) # set: no duplicates
while len(leaves)>0:
leaf = leaves.pop()
#
# For each Cell
#
is_split = False
for half_edge in leaf.get_half_edges():
#
# Look for neighbors in each direction
#
nb = leaf.get_neighbors(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
#
# Check if neighbor has children (still fine)
#
twin = half_edge.twin()
for the_child in twin.get_children():
if the_child.cell().has_children(flag=subforest_flag):
#
# Neighbor has grandchildren
#
if not leaf.has_children(flag=subforest_flag):
#
# LEAF does not have any flagged children
#
if leaf.has_children():
#
# LEAF has children (just not flagged)
#
for child in leaf.get_children():
child.mark(subforest_flag)
else:
#
# LEAF needs new children.
#
leaf.split(flag=subforest_flag)
#
# Add children to the leaf nodes to be considered
#
for child in leaf.get_children():
leaves.add(child)
#
# If LEAF is split, add all its neighbors to leaves
# to be considered for splitting.
#
for half_edge in leaf.get_half_edges():
hep = half_edge.get_parent()
if hep is not None:
hep_twin = hep.twin()
if hep_twin is not None:
leaves.add(hep_twin.cell())
#
# Current LEAF cell has been split, move on to next one
#
is_split = True
break
if is_split:
#
# LEAF already split, no need to check other directions
#
break
def remove_supports(self, subforest_flag=None, coarsening_flag=None):
"""
Given a submesh (subforest_flag) and a coarsening_flag,
Input:
subforest_flag: flag specifying the submesh to be considered
coarsening_flag: flag specifying the cells to be removed
during coarsening
TODO: Unfinished. Loop over cells to be coarsened. Check if it's
safe to coarsen neighbors.
"""
#
# Get all flagged LEAF nodes
#
leaves = self.get_leaves(subforest_flag=subforest_flag,
coarsening_flag=coarsening_flag)
while len(leaves) > 0:
#
# For each LEAF
#
leaf = leaves.pop()
#
# Check if leaf is a support leaf
#
if subforest_flag is None:
is_support = leaf.is_marked('support')
else:
is_support = leaf.is_marked((subforest_flag, 'support'))
if is_support:
#
# Check whether its safe to delete the support cell
#
safe_to_coarsen = True
for half_edge in leaf.get_half_edges():
nb = leaf.get_neighbor(half_edge, flag=subforest_flag)
if nb is not None and nb.has_children(flag=subforest_flag):
#
# Neighbor has (flagged) children, coarsening will lead
# to an unbalanced tree
#
safe_to_coarsen = False
break
if safe_to_coarsen:
#
# Remove support by marking self with coarsening flag
#
self.mark(coarsening_flag)
leaves.append(leaf.get_parent())
'''
class TriCell(object):
"""
TriCell object
Attributes:
Methods:
"""
def __init__(self, vertices, parent=None):
"""
Inputs:
vertices: Vertex, list of three vertices (ordered counter-clockwise)
parent: QuadCell that contains triangle
"""
v = []
e = []
assert len(vertices) == 3, 'Must have exactly 3 vertices.'
for i in range(3):
#
# Define vertices and Half-Edges with minimun information
#
v.append(Vertex(vertices[i],2))
#
# Some edge on outerboundary
#
self.outer_component = e[0]
for i in range(3):
#
# Half edge originating from v[i]
#
v[i].incident_edge = e[i]
#
# Edges preceding/following e[i]
#
j = np.remainder(i+1,3)
e[i].next = e[j]
e[j].previous = e[i]
#
# Incident face
#
e[i].incident_face = self
self.parent_node = parent
self.__vertices = v
self.__edges = [
Edge(vertices[0], vertices[1], parent=self), \
Edge(vertices[1], vertices[2], parent=self), \
Edge(vertices[2], vertices[0], parent=self)
]
self.__element_no = None
self._flags = set()
def vertices(self,n):
return self.__vertices[n]
def edges(self):
return self.__edges
def area(self):
"""
Compute the area of the triangle
"""
v = self.__vertices
a = [v[1].coordinates()[i] - v[0].coordinates()[i] for i in range(2)]
b = [v[2].coordinates()[i] - v[0].coordinates()[i] for i in range(2)]
return 0.5*abs(a[0]*b[1]-a[1]*b[0])
def unit_normal(self, edge):
#p = ((y1-y0)/nnorm,(x0-x1)/nnorm)
pass
def number(self, num, overwrite=False):
"""
Assign a number to the triangle
"""
if self.__element_no == None or overwrite:
self.__element_no = num
else:
raise Warning('Element already numbered. Overwrite disabled.')
return
def get_neighbor(self, edge, tree):
"""
Find neighboring triangle across edge wrt a given tree
"""
pass
def mark(self, flag=None):
"""
Mark TriCell
Inputs:
flag: optional label used to mark cell
"""
if flag is None:
self._flags.add(True)
else:
self._flags.add(flag)
def unmark(self, flag=None, recursive=False):
"""
Remove label from TriCell
Inputs:
flag: label to be removed
recursive: bool, also unmark all subcells
"""
#
# Remove label from own list
#
if flag is None:
# No flag specified -> delete all
self._flags.clear()
else:
# Remove specified flag (if present)
if flag in self._flags: self._flags.remove(flag)
#
# Remove label from children if applicable
#
if recursive and self.has_children():
for child in self.children.values():
child.unmark(flag=flag, recursive=recursive)
def is_marked(self,flag=None):
"""
Check whether cell is marked
Input: flag, label for QuadCell: usually one of the following:
True (catchall), 'split' (split cell), 'count' (counting)
TODO: Possible to add/remove set? Useful?
"""
if flag is None:
# No flag -> check whether set is empty
if self._flags:
return True
else:
return False
else:
# Check wether given label is contained in cell's set
return flag in self._flags
'''<|fim▁end|> | |
<|file_name|>test_docs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Doctest runner for 'birdhousebuilder.recipe.adagucserver'.
"""
__docformat__ = 'restructuredtext'
import os
import sys
import unittest
import zc.buildout.tests
import zc.buildout.testing
<|fim▁hole|> doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_ONLY_FIRST_FAILURE)
def setUp(test):
zc.buildout.testing.buildoutSetUp(test)
# Install the recipe in develop mode
zc.buildout.testing.install_develop('birdhousebuilder.recipe.adagucserver', test)
test.globs['os'] = os
test.globs['sys'] = sys
test.globs['test_dir'] = os.path.dirname(__file__)
def test_suite():
suite = unittest.TestSuite((
doctest.DocFileSuite(
'../../../../README.rst',
setUp=setUp,
tearDown=zc.buildout.testing.buildoutTearDown,
optionflags=optionflags,
checker=renormalizing.RENormalizing([
# If want to clean up the doctest output you
# can register additional regexp normalizers
# here. The format is a two-tuple with the RE
# as the first item and the replacement as the
# second item, e.g.
# (re.compile('my-[rR]eg[eE]ps'), 'my-regexps')
zc.buildout.testing.normalize_path,
]),
),
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')<|fim▁end|> | from zope.testing import doctest, renormalizing
optionflags = (doctest.ELLIPSIS | |
<|file_name|>make.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import subprocess
import os
class MakeException(Exception):
pass
def swapExt(path, current, replacement):
path, ext = os.path.splitext(path)
if ext == current:
path += replacement
return path
else:
raise MakeException(
"swapExt: expected file name ending in %s, got file name ending in %s" % \
(current, replacement))
headerFiles = [
'benc.h',
'bencode.h',
]
codeFiles = [
'benc_int.c',
'benc_bstr.c',
'benc_list.c',
'benc_dict.c',
'bencode.c',
'bcopy.c',
]
cflags = ['-g']
programFile = 'bcopy'
def gcc(*packedArgs):
args = []
for arg in packedArgs:
if isinstance(arg, list):
args += arg
elif isinstance(arg, tuple):
args += list(arg)
else:
args.append(arg)
subprocess.check_call(['gcc'] + args)<|fim▁hole|>def compile(codeFile, cflags=[]):
objectFile = swapExt(codeFile, '.c', '.o')
gcc(cflags, '-c', ('-o', objectFile), codeFile)
return objectFile
def link(programFile, objectFiles, cflags=[]):
gcc(cflags, ('-o', programFile), objectFiles)
if __name__ == '__main__':
objectFiles = [compile(codeFile, cflags) for codeFile in codeFiles]
link(programFile, objectFiles, cflags)<|fim▁end|> | |
<|file_name|>LookupPool.java<|end_file_name|><|fim▁begin|>package com.openthinks.libs.utilities.lookup;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.openthinks.libs.utilities.Checker;
import com.openthinks.libs.utilities.InstanceUtilities;
import com.openthinks.libs.utilities.InstanceUtilities.InstanceWrapper;
import com.openthinks.libs.utilities.exception.CheckerNoPassException;
import com.openthinks.libs.utilities.pools.object.ObjectPool;
/**
* ClassName: LookupPool <br>
* Function: It is used for store shared object instances and find other SPI<br>
* Reason: follow the design pattern of fly weight to reduce instantiate new object. <br>
* Notice: avoid store or find those object which include its own state and will be changed;<br>
* Usage:<br>
*
* <pre>
* <code>
* //get gloabl instance of LookupPool
* LookupPool lookupPool = LookupPools.gloabl();
*
* //get a named instance of LookupPool
* lookupPool = LookupPools.get("pool-1");
*
* // instance by class directly
* LookupInterfaceImpl instanceImpl1 = lookupPool.lookup(LookupInterfaceImpl.class);
*
* // instance by customized implementation class
* LookupInterface instanceImpl2 = lookupPool.lookup(LookupInterface.class,InstanceWrapper.build(LookupInterfaceImpl.class));
*
* // lookup shared object already existed
* LookupInterface instanceImpl3 = lookupPool.lookup(LookupInterface.class);
* assert instanceImpl2==instanceImpl3 ;
*
* // register a shared object by name
* lookupPool.register("beanName1",instanceImpl1);
* // lookup shared object by name
* LookupInterface instanceImpl4 = lookupPool.lookup("beanName1");
*
* assert instanceImpl1==instanceImpl4 ;
*
* // clear all shared objects and mappings
* lookupPool.cleanUp();
*
* </code>
* </pre>
*
* date: Sep 8, 2017 3:15:29 PM <br>
*
* @author [email protected]
* @version 1.0
* @since JDK 1.8
*/
public abstract class LookupPool {
protected final ObjectPool objectPool;
protected final ReadWriteLock lock;
public abstract String name();
protected LookupPool() {
objectPool = new ObjectPool();
lock = new ReentrantReadWriteLock();
}
/**
* lookup a object by its type, if not find firstly, try to instance by instanceType and
* constructor parameters args
*
* @param <T> lookup object type
* @param type Class lookup key type
* @param args Object[] instance constructor parameters
* @return T lookup object or null
*/
public <T> T lookup(Class<T> type, Object... args) {
return lookup(type, null, args);
}
/**
* lookup a object by its type, if not find firstly, try to instance by instanceType and
* constructor parameters args
*
* @param <T> lookup object type
* @param <E> lookup object type
* @param searchType Class lookup key type
* @param instancewrapper Class instance type when not lookup the key
* @param args Object[] instance constructor parameters
* @return T lookup object or null
*/
public <T, E extends T> T lookup(final Class<T> searchType, InstanceWrapper<E> instancewrapper,
Object... args) {
T object = null;
lock.readLock().lock();
try {
object = objectPool.get(searchType);
} finally {
lock.readLock().unlock();
}
if (object == null) {
lock.writeLock().lock();
try {
object = InstanceUtilities.create(searchType, instancewrapper, args);
register(searchType, object);
} finally {
lock.writeLock().unlock();
}
}
return object;
}
/**
* look up object by its bean name
*
* @param <T> lookup object type
* @param beanName String lookup object mapping name
* @return T lookup object or null
*/
public <T> T lookup(String beanName) {
lock.readLock().lock();
try {
return objectPool.get(beanName);
} finally {
lock.readLock().unlock();
}
}
/**
* lookup a optional object by its type, if not find firstly, try to instance by instanceType and
* constructor parameters args
*
* @param <T> lookup object type
* @param type Class lookup key type
* @param args Object[] instance constructor parameters
* @return Optional of lookup object
*/
public <T> Optional<T> lookupIf(Class<T> type, Object... args) {
return Optional.ofNullable(lookup(type, args));
}
/**
* lookup a optional object by its type, if not find firstly, try to instance by instanceType and
* constructor parameters args
*
* @param <T> lookup object type
* @param <E> lookup object type
* @param searchType Class lookup key type
* @param instancewrapper Class instance type when not lookup the key
* @param args Object[] instance constructor parameters
* @return Optional of lookup object
*/
public <T, E extends T> Optional<T> lookupIf(final Class<T> searchType,
InstanceWrapper<E> instancewrapper, Object... args) {
return Optional.ofNullable(lookup(searchType, instancewrapper, args));
}
/**
* look up optional object by its bean name
*
* @param <T> lookup object type
* @param beanName String lookup object mapping name
* @return Optional of lookup object
*/
public <T> Optional<T> lookupIf(String beanName) {
return Optional.ofNullable(lookup(beanName));
}
/**
*
* lookupSPI:this will used {@link ServiceLoader} to load SPI which defined in folder
* <B>META-INF/services</B>. <br>
* It will first to try to load instance from cached {@link ObjectPool}, if not found, then try to
* load SPI class and instantiate it.<br>
* Notice: only load and instantiate first SPI class in defined file<br>
*
* @param <T> lookup SPI interface class
* @param spiInterface SPI interface or abstract class type
* @param args constructor arguments
* @return implementation of parameter spiInterface
*/
public <T> T lookupSPI(Class<T> spiInterface, Object... args) {
return lookupFocusSPI(spiInterface, null, args);
}
/**
*
* lookupFocusSPI:this will used {@link ServiceLoader} to load SPI which defined in folder
* <B>META-INF/services</B>. <br>
* It will first to try to load instance from cached {@link ObjectPool}, if not found, then try to
* load SPI class and instantiate it.<br>
* Notice: only load and instantiate focused SPI class in defined file<br>
*
* @param <T> lookup SPI interface class
* @param spiInterface SPI interface or abstract class type
* @param focusClassName focused SPI implementation class aname
* @param args constructor arguments
* @return implementation of parameter spiInterface
*
*/
public <T> T lookupFocusSPI(Class<T> spiInterface, String focusClassName, Object... args) {
T object = null;
lock.readLock().lock();
try {
object = objectPool.get(spiInterface);
} finally {
lock.readLock().unlock();
}
if (object == null) {
lock.writeLock().lock();
try {
ServiceLoader<T> serviceLoader = ServiceLoader.load(spiInterface, focusClassName, args);
object = serviceLoader.iterator().next();
Checker.require(object).notNull("Cannot found SPI implementation for " + spiInterface);
register(spiInterface, object);
} finally {
lock.writeLock().unlock();
}
}
return object;
}
/**
*
* lookupSPISkipCache:this will used {@link ServiceLoader} to load SPI which defined in folder
* <B>META-INF/services</B>. <br>
* It will do load SPI skip cache each time, not try to lookup from cache firstly.<br>
* Notice: only load and instantiate first SPI class in defined file<br>
*
* @param <T> lookup SPI interface class
* @param spiInterface SPI interface or abstract class type
* @param args constructor arguments
* @return implementation of parameter spiInterface
* @throws CheckerNoPassException when not found implementation SPI
*/
public <T> T lookupSPISkipCache(Class<T> spiInterface, Object... args) {
return lookupFocusSPISkipCache(spiInterface, null, args);
}
/**
*
* lookupFocusSPISkipCache:this will used {@link ServiceLoader} to load SPI which defined in
* folder <B>META-INF/services</B>. <br>
* It will do load SPI skip cache each time, not try to lookup from cache firstly.<br>
* Notice: only load and instantiate focused SPI class in defined file<br>
*
* @param <T> lookup SPI interface class
* @param spiInterface SPI interface or abstract class type
* @param focusClassName focused SPI implementation class name
* @param args constructor arguments
* @return implementation of parameter spiInterface
<|fim▁hole|> */
public <T> T lookupFocusSPISkipCache(Class<T> spiInterface, String focusClassName,
Object... args) {
T object = null;
ServiceLoader<T> serviceLoader = ServiceLoader.load(spiInterface, focusClassName, args);
object = serviceLoader.iterator().next();
Checker.require(object).notNull("Cannot found SPI implementation for " + spiInterface);
return object;
}
/**
*
* lookupAllSPI:fina all instance of SPI implementation. <br>
* Notice:<BR>
* <ul>
* <li>all implementation need default constructor.</li>
* <li>do not search from cache</li>
* </ul>
*
* @param <T> SPI type
* @param spiInterface SPI interface or abstract class type
* @return list of all SPI implementation instance
*/
public <T> List<T> lookupAllSPI(Class<T> spiInterface) {
List<T> list = new ArrayList<>();
ServiceLoader<T> serviceLoader = ServiceLoader.load(spiInterface);
Iterator<T> iterator = serviceLoader.iterator();
while (iterator.hasNext()) {
try {
list.add(iterator.next());
} catch (Exception e) {
// ignore
}
}
return list;
}
/**
*
* register an instance, which key is object.getClass(). <br>
*
* @param <T> registered object class type
* @param object instance which need registered
*/
public <T> void register(T object) {
if (object != null) {
lock.writeLock().tryLock();
try {
objectPool.put(object.getClass(), object);
} finally {
lock.writeLock().unlock();
}
}
}
/**
* register an instance, which key is given parameter classType
*
* @param <T> registered object class type
* @param classType Class as the key for registered instance
* @param object instance which need registered
*/
public <T> void register(Class<T> classType, T object) {
if (object != null) {
lock.writeLock().tryLock();
try {
objectPool.put(classType, object);
} finally {
lock.writeLock().unlock();
}
}
}
/**
* register object and mapping it to given bean name
*
* @param <T> register object type
* @param beanName String bean name
* @param object register object
*/
public <T> void register(String beanName, T object) {
if (object != null) {
lock.writeLock().lock();
try {
objectPool.put(beanName, object);
} finally {
lock.writeLock().unlock();
}
}
}
protected void cleanUp() {
lock.writeLock().lock();
try {
objectPool.cleanUp();
} finally {
lock.writeLock().unlock();
}
}
}<|fim▁end|> | *
* @throws CheckerNoPassException when not found implementation SPI
|
<|file_name|>scatter.py<|end_file_name|><|fim▁begin|>"""
Generic, configurable scatterplot
"""
import collections
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
class PlottingAttribute(object):
__slots__ = 'groupby', 'title', 'palette', 'group_to_attribute'
def __init__(self, groupby, title, palette, order):
"""An attribute that you want to visualize with a specific visual cue
Parameters
----------
groupby : mappable
A series or dict or list to groupby on the rows of the data
title : str
Title of this part of the legend
palette : list-like
What to plot for each group
"""
self.groupby = groupby
self.title = title
self.palette = palette
if order is not None:
# there's more than one attribute
self.group_to_attribute = dict(zip(order, palette))
else:
# There's only one attribute
self.group_to_attribute = {None: palette[0]}
def __getitem__(self, item):
return self.group_to_attribute[item]
class PlotterMixin(object):
"""
Must be mixed with something that creates the ``self.plot_data`` attribute
Attributes
----------
color :
"""
# Markers that can be filled, in a reasonable order so things that can be
# confused with each other (e.g. triangles pointing to the left or right) are
# not next to each other
filled_markers = (u'o', u'v', u's', u'*', u'h', u'<', u'H', u'x', u'8',
u'>', u'D', u'd', u'^')
linewidth_min, linewidth_max = 0.1, 5
alpha_min, alpha_max = 0.1, 1
size_min, size_max = 3, 30
legend_order = 'color', 'symbol', 'linewidth', 'edgecolor', 'alpha', 'size'
def establish_colors(self, color, hue, hue_order, palette):
"""Get a list of colors for the main component of the plots."""
n_colors = None
current_palette = sns.utils.get_color_cycle()
color_labels = None
color_title = None
if color is not None and palette is not None:
error = 'Cannot interpret colors to plot when both "color" and ' \
'"palette" are specified'
raise ValueError(error)
# Force "hue" to be a mappable
if hue is not None:
try:
# Check if "hue" is a column in the data
color_title = str(hue)
hue = self.data[hue]
except (ValueError, KeyError):
# Hue is already a mappable
if isinstance(hue, pd.Series):
color_title = hue.name
else:
color_title = None
# This will give the proper number of categories even if there are
# more categories in "hue_order" than represented in "hue"
hue_order = sns.utils.categorical_order(hue, hue_order)
color_labels = hue_order
hue = pd.Categorical(hue, hue_order)
n_colors = len(self.plot_data.groupby(hue))
else:
if hue_order is not None:
# Check if "hue_order" specifies rows in the data
samples_to_plot = self.plot_data.index.intersection(hue_order)
n_colors = len(samples_to_plot)
if n_colors > 0:
# Different color for every sample (row name)
hue = pd.Series(self.plot_data.index,
index=self.plot_data.index)
else:
error = "When 'hue=None' and 'hue_order' is specified, " \
"'hue_order' must overlap with the data row " \
"names (index)"
raise ValueError(error)
else:
# Same color for everything
hue = pd.Series('hue', index=self.plot_data.index)
n_colors = 1
if palette is not None:
colors = sns.color_palette(palette, n_colors=n_colors)
elif color is not None:
colors = sns.light_palette(color, n_colors=n_colors)
else:
colors = sns.light_palette(current_palette[0],
n_colors=n_colors)
self.color = PlottingAttribute(hue, color_title, colors, hue_order)
def _maybe_make_grouper(self, attribute, palette_maker, order=None,
func=None, default=None):
"""Create a Series from a single attribute, else make categorical
Checks if the attribute is in the data provided, or is an external
mapper
Parameters
----------
attribute : object
Either a single item to create into a series, or a series mapping
each sample to an attribute (e.g. the plotting symbol 'o' or
linewidth 1)
palette_maker : function
Function which takes an integer and creates the appropriate
palette for the attribute, e.g. shades of grey for edgecolor or
linearly spaced sizes
order : list
The order to create the attributes into
func : function
A function which returns true if the attribute is a single valid
instance, e.g. "black" for color or 0.1 for linewidth. Otherwise,
we assume that "attribute" is a mappable
Returns
-------
grouper : pandas.Series
A mapping of the high dimensional data samples to the attribute
"""
title = None
if func is None or func(attribute):
# Use this single attribute for everything
return PlottingAttribute(pd.Series(None, index=self.samples),
title, (attribute,), order)
else:
try:
# Check if this is a column in the data
attribute = self.data[attribute]
except (ValueError, KeyError):
pass
if isinstance(attribute, pd.Series):
title = attribute.name
order = sns.utils.categorical_order(attribute, order)
palette = palette_maker(len(order))
attribute = pd.Categorical(attribute, categories=order,
ordered=True)
return PlottingAttribute(pd.Series(attribute, index=self.samples),
title, palette, order)
def establish_symbols(self, marker, marker_order, text, text_order):
"""Figure out what symbol put on the axes for each data point"""
symbol_title = None
if isinstance(text, bool):
# Option 1: Text is a boolean
if text:
# 1a: text=True, so use the sample names of data as the
# plotting symbol
symbol_title = 'Samples'
symbols = [str(x) for x in self.samples]
symbol = pd.Series(self.samples, index=self.samples)
else:
# 1b: text=False, so use the specified marker for each sample
symbol = self._maybe_make_grouper(marker, marker_order, str)
if marker is not None:
try:
symbol_title = marker
symbol = self.data[marker]
symbols = sns.categorical_order(symbol, marker_order)
except (ValueError, KeyError):
# Marker is a single marker, or already a groupable
if marker in self.filled_markers:
# Single marker so make a tuple so it's indexable
symbols = (marker,)
else:
# already a groupable object
if isinstance(marker, pd.Series):
symbol_title = marker.name
n_symbols = len(self.plot_data.groupby(symbol))
if n_symbols > len(self.filled_markers):
# If there's too many categories, then
# auto-expand the existing list of filled
# markers
multiplier = np.ceil(
n_symbols/float(len(self.filled_markers)))
filled_markers = list(self.filled_markers) \
* multiplier
symbols = filled_markers[:n_symbols]
else:
symbols = self.filled_markers[:n_symbols]
symbol = PlottingAttribute(symbol, symbol_title, symbols,
marker_order)
else:
# Assume "text" is a mapping from row names (sample ids) of the
# data to text labels
text_order = sns.utils.categorical_order(text, text_order)
symbols = text_order
symbol = pd.Series(pd.Categorical(text, categories=text_order,
ordered=True),
index=self.samples)
symbol = PlottingAttribute(symbol, symbol_title, symbols,
text_order)
if marker is not None:
warnings.warn('Overriding plotting symbol from "marker" with '
'values in "text"')
# Turn text into a boolean
text = True
self.symbol = symbol
self.text = text
def establish_symbol_attributes(self,linewidth, linewidth_order, edgecolor,
edgecolor_order, alpha, alpha_order, size,
size_order):
self.edgecolor = self._maybe_make_grouper(
edgecolor, self._edgecolor_palette, edgecolor_order,
mpl.colors.is_color_like)
self.linewidth = self._maybe_make_grouper(
linewidth, self._linewidth_palette, linewidth_order, np.isfinite)
self.alpha = self._maybe_make_grouper(<|fim▁hole|> alpha, self._alpha_palette, alpha_order, np.isfinite)
self.size = self._maybe_make_grouper(
size, self._size_palette, size_order, np.isfinite)
@staticmethod
def _edgecolor_palette(self, n_groups):
return sns.color_palette('Greys', n_colors=n_groups)
def _linewidth_palette(self, n_groups):
return np.linspace(self.linewidth_min, self.linewidth_max, n_groups)
def _alpha_palette(self, n_groups):
return np.linspace(self.alpha_min, self.alpha_max, n_groups)
def _size_palette(self, n_groups):
return np.linspace(self.size_min, self.size_max, n_groups)
def symbolplotter(self, xs, ys, ax, symbol, linewidth, edgecolor, **kwargs):
"""Plots either a matplotlib marker or a string at each data position
Wraps plt.text and plt.plot
Parameters
----------
xs : array-like
List of x positions for data
ys : array-like
List of y-positions for data
symbol : str
What to plot at each (x, y) data position
text : bool
If true, then "symboL" is assumed to be a string and iterates over
each data point individually, using plt.text to position the text.
Otherwise, "symbol" is a matplotlib marker and uses plt.plot for
plotting
kwargs
Any other keyword arguments to plt.text or plt.plot
"""
# If both the x- and y- positions don't have data, don't do anything
if xs.empty and ys.empty:
return
if self.text:
# Add dummy plot to make the axes in the right window
ax.plot(xs, ys, color=None)
# Plot each (x, y) position as text
for x, y in zip(xs, ys):
ax.text(x, y, symbol, **kwargs)
else:
# use plt.plot instead of plt.scatter for speed, since plotting all
# the same marker shape and color and linestyle
ax.plot(xs, ys, 'o', marker=symbol, markeredgewidth=linewidth,
markeredgecolor=edgecolor, **kwargs)
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.xlabel is not None:
ax.set_xlabel(self.xlabel)
if self.ylabel is not None:
ax.set_ylabel(self.ylabel)
def establish_legend_data(self):
self.legend_data = pd.DataFrame(dict(color=self.color.groupby,
symbol=self.symbol.groupby,
size=self.size.groupby,
linewidth=self.linewidth.groupby,
edgecolor=self.edgecolor.groupby,
alpha=self.alpha.groupby),
index=self.samples)
self.legend_data = self.legend_data.reindex(columns=self.legend_order)
def draw_symbols(self, ax, plot_kws):
"""Plot each sample in the data"""
plot_kws = {} if plot_kws is None else plot_kws
for labels, df in self.legend_data.groupby(self.legend_order):
# Get the attributes in order, using the group label to get the
# attribute
for name, label in zip(self.legend_order, labels):
plot_kws[name] = getattr(self, name)[label]
self.symbolplotter(df.iloc[:, 0], df.iloc[:, 1], **plot_kws)
# Iterate over all the possible modifications of the points
# TODO: add alpha and size
# for i, (color_label, df1) in enumerate(self.plot_data.groupby(self.color.groupby)):
# color = self.color.palette[i]
# for j, (marker_label, df2) in enumerate(df1.groupby(self.symbol.groupby)):
# symbol = self.symbol.palette[j]
# for k, (lw_label, df3) in enumerate(df2.groupby(self.linewidth.groupby)):
# linewidth = self.linewidth.palette[k]
# for l, (ec_label, df4) in df3.groupby(self.edgecolor):
# edgecolor = self.edgecolor.palette[l]
# # and finally ... actually plot the data!
# for m
# self.symbolplotter(df4.iloc[:, 0], df4.iloc[:, 1],
# symbol=symbol, color=color,
# ax=ax, linewidth=linewidth,
# edgecolor=edgecolor, **plot_kws)
#
class ScatterPlotter(PlotterMixin):
def __init__(self, data, x, y, color, hue, hue_order, palette, marker,
marker_order, text, text_order, linewidth, linewidth_order,
edgecolor, edgecolor_order, alpha, alpha_order, size,
size_order):
self.establish_data(data, x, y)
self.establish_symbols(marker, marker_order, text, text_order)
self.establish_symbol_attributes(linewidth, linewidth_order, edgecolor,
edgecolor_order, alpha, alpha_order, size, size_order)
self.establish_colors(color, hue, hue_order, palette)
self.establish_legend_data()
# import pdb; pdb.set_trace()
def establish_data(self, data, x, y):
if isinstance(data, pd.DataFrame):
xlabel = data.columns[x]
ylabel = data.columns[y]
else:
data = pd.DataFrame(data)
xlabel = None
ylabel = None
self.data = data
self.plot_data = self.data.iloc[:, [x, y]]
self.xlabel = xlabel
self.ylabel = ylabel
self.samples = self.plot_data.index
self.features = self.plot_data.columns
self.n_samples = len(self.samples)
self.n_features = len(self.features)
def plot(self, ax, kwargs):
self.draw_symbols(ax, kwargs)
self.annotate_axes(ax)
def scatterplot(data, x=0, y=1, color=None, hue=None, hue_order=None,
palette=None, marker='o', marker_order=None, text=False,
text_order=None, linewidth=1, linewidth_order=None,
edgecolor='k', edgecolor_order=None, alpha=1, alpha_order=None,
size=7, size_order=None, ax=None, **kwargs):
plotter = ScatterPlotter(data, x, y, color, hue, hue_order, palette,
marker, marker_order, text, text_order, linewidth,
linewidth_order, edgecolor, edgecolor_order,
alpha, alpha_order, size, size_order)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax<|fim▁end|> | |
<|file_name|>test_server.py<|end_file_name|><|fim▁begin|># Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
import mock
import unittest
from contextlib import contextmanager
from shutil import rmtree
from StringIO import StringIO
from tempfile import mkdtemp
from xml.dom import minidom
from eventlet import spawn, Timeout, listen
import simplejson
from swift.common.swob import Request, HeaderKeyDict
import swift.container
from swift.container import server as container_server
from swift.common.utils import mkdirs, public, replication
from swift.common.ondisk import normalize_timestamp
from test.unit import fake_http_connect
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.container.server, 'http_connect',
None)
try:
yield True
finally:
swift.container.server.http_connect = orig_http_connect
class TestContainerController(unittest.TestCase):
"""Test swift.container.server.ContainerController"""
def setUp(self):
"""Set up for testing swift.object_server.ObjectController"""
self.testdir = os.path.join(mkdtemp(),
'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.object_server.ObjectController"""
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_acl_container(self):
# Ensure no acl by default
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure POSTing acls works
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
# Ensure we can clear acls on POST
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3', 'X-Container-Read': '',
'X-Container-Write': ''})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure PUTing acls works
req = Request.blank(
'/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
def test_HEAD(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '0'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(int(response.headers['x-container-bytes-used']), 0)<|fim▁hole|> 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_SIZE': 42,
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x'})
req2.get_response(self.controller)
response = req.get_response(self.controller)
self.assertEquals(int(response.headers['x-container-bytes-used']), 42)
self.assertEquals(int(response.headers['x-container-object-count']), 1)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_HEAD_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_HEAD_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank(
'/sda1/p/a/c?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
def test_PUT_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', 'X-Size': '0',
'X-Content-Type': 'text/plain', 'X-ETag': 'e'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_invalid_container_sync_to(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'},
headers={'x-container-sync-to': '192.168.0.1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c/',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_container_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_utf8(self):
snowman = u'\u2603'
container_name = snowman.encode('utf-8')
req = Request.blank(
'/sda1/p/a/%s' % container_name, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_account_update_mismatched_host_device(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '127.0.0.1:0',
'X-Account-Partition': '123',
'X-Account-Device': 'sda1,sda2'})
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
resp = self.controller.account_update(req, 'a', 'c', broker)
self.assertEquals(resp.status_int, 400)
def test_account_update_account_override_deleted(self):
bindsock = listen(('127.0.0.1', 0))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' %
bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1',
'X-Account-Override-Deleted': 'yes'})
with save_globals():
new_connect = fake_http_connect(200, count=123)
swift.container.server.http_connect = new_connect
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_PUT_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-put-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 201, '0000000001.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000003.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, '0000000003.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000005.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, '0000000005.00000')
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_PUT_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_POST_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_DELETE(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_not_found(self):
# Even if the container wasn't previously heard of, the container
# server will accept the delete and replicate it to where it belongs
# later.
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_object(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0',
'HTTP_X_SIZE': 1, 'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '5'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '6'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-delete-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000002.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 204, '0000000002.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000003.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, '0000000003.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000005.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, '0000000005.00000')
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_DELETE_invalid_partition(self):
req = Request.blank(
'/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a/c?limit=%d' %
(container_server.CONTAINER_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_json(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body), [])
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "2",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*'):
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body), json_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
def test_GET_plain(self):
# make a container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/plainc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
plain_body = '0\n1\n2\n'
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, plain_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
# test conflicting formats
req = Request.blank(
'/sda1/p/a/plainc?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
# test unknown format uses default plain
req = Request.blank(
'/sda1/p/a/plainc?format=somethingelse',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
def test_GET_json_last_modified(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, d in [(0, 1.5), (1, 1.0), ]:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': d,
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
# last_modified format must be uniform, even when there are not msecs
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.500000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}, ]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
def test_GET_xml(self):
# make a container
req = Request.blank(
'/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/xmlc/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
xml_body = '<?xml version="1.0" encoding="UTF-8"?>\n' \
'<container name="xmlc">' \
'<object><name>0</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>1</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>2</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'</container>'
# tests
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.body, xml_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
for xml_accept in (
'application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'):
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, xml_body,
'Invalid body for Accept: %s' % xml_accept)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'text/xml'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/xml')
self.assertEquals(resp.body, xml_body)
def test_GET_marker(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit with marker
req = Request.blank('/sda1/p/a/c?limit=2&marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['2', ])
def test_weird_content_types(self):
snowman = u'\u2603'
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, ctype in enumerate((snowman.encode('utf-8'),
'text/plain; charset="utf-8"')):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = [x['content_type'] for x in simplejson.loads(resp.body)]
self.assertEquals(result, [u'\u2603', 'text/plain;charset="utf-8"'])
def test_GET_accept_not_valid(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_GET_limit(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit
req = Request.blank(
'/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['0', '1'])
def test_GET_prefix(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3'])
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a/c?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"subdir": "US-OK-"},
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_delimiter_xml(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, '<?xml version="1.0" encoding="UTF-8"?>'
'\n<container name="c"><subdir name="US-OK-">'
'<name>US-OK-</name></subdir>'
'<subdir name="US-TX-"><name>US-TX-</name></subdir>'
'<subdir name="US-UT-"><name>US-UT-</name></subdir></container>')
def test_GET_delimiter_xml_with_quotes(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c/<\'sub\' "dir">/object',
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=/&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
dom = minidom.parseString(resp.body)
self.assert_(len(dom.getElementsByTagName('container')) == 1)
container = dom.getElementsByTagName('container')[0]
self.assert_(len(container.getElementsByTagName('subdir')) == 1)
subdir = container.getElementsByTagName('subdir')[0]
self.assertEquals(unicode(subdir.attributes['name'].value),
u'<\'sub\' "dir">/')
self.assert_(len(subdir.getElementsByTagName('name')) == 1)
name = subdir.getElementsByTagName('name')[0]
self.assertEquals(unicode(name.childNodes[0].data),
u'<\'sub\' "dir">/')
def test_GET_path(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?path=US&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"name": "US/OK", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "US/TX", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}])
def test_GET_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
req = Request.blank(
'/sda1/p/a/c',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'})
req.get_response(self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a/c?format=%s' % format,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'path', 'prefix',
'end_marker', 'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
req = Request.blank('/sda1/p/a/c',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
headers = {'x-timestamp': normalize_timestamp(1),
'x-size': '0',
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/.o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_delete_auto_create(self):
headers = {'x-timestamp': normalize_timestamp(1)}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/.c/.o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a/o?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
def test_updating_multiple_container_servers(self):
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.iteritems()
if v is not None))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'X-Account-Partition': '30',
'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Account-Device': 'sdb1, sdf1'})
orig_http_connect = container_server.http_connect
try:
container_server.http_connect = fake_http_connect
req.get_response(self.controller)
finally:
container_server.http_connect = orig_http_connect
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEquals(len(http_connect_args), 2)
self.assertEquals(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c',
'device': 'sdb1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': '0000012345.00000',
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEquals(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c',
'device': 'sdf1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': '0000012345.00000',
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
container_controller = container_server.ContainerController
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEquals(container_controller(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(container_controller(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(container_controller(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEquals(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | self.assertEquals(int(response.headers['x-container-object-count']), 0)
req2 = Request.blank(
'/sda1/p/a/c/o', environ={
'REQUEST_METHOD': 'PUT', |
<|file_name|>de.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2003-2016, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'flash', 'de', {
access: 'Skriptzugriff',
accessAlways: 'Immer',
accessNever: 'Nie',
accessSameDomain: 'Gleiche Domain',
alignAbsBottom: 'Abs Unten',
alignAbsMiddle: 'Abs Mitte',
alignBaseline: 'Basislinie',<|fim▁hole|> chkFull: 'Vollbildmodus erlauben',
chkLoop: 'Endlosschleife',
chkMenu: 'Flash-Menü aktivieren',
chkPlay: 'Automatisch Abspielen',
flashvars: 'Variablen für Flash',
hSpace: 'Horizontal-Abstand',
properties: 'Flash-Eigenschaften',
propertiesTab: 'Eigenschaften',
quality: 'Qualität',
qualityAutoHigh: 'Auto Hoch',
qualityAutoLow: 'Auto Niedrig',
qualityBest: 'Beste',
qualityHigh: 'Hoch',
qualityLow: 'Niedrig',
qualityMedium: 'Mittel',
scale: 'Skalierung',
scaleAll: 'Alles anzeigen',
scaleFit: 'Passgenau',
scaleNoBorder: 'Ohne Rand',
title: 'Flash-Eigenschaften',
vSpace: 'Vertikal-Abstand',
validateHSpace: 'HSpace muss eine Zahl sein.',
validateSrc: 'URL darf nicht leer sein.',
validateVSpace: 'VSpace muss eine Zahl sein.',
windowMode: 'Fenstermodus',
windowModeOpaque: 'Deckend',
windowModeTransparent: 'Transparent',
windowModeWindow: 'Fenster'
} );<|fim▁end|> | alignTextTop: 'Text oben',
bgcolor: 'Hintergrundfarbe', |
<|file_name|>event_listeners.js<|end_file_name|><|fim▁begin|>var AWS = require('./core');
var SequentialExecutor = require('./sequential_executor');
/**
* The namespace used to register global event listeners for request building
* and sending.
*/
AWS.EventListeners = {
/**
* @!attribute VALIDATE_CREDENTIALS
* A request listener that validates whether the request is being
* sent with credentials.
* Handles the {AWS.Request~validate 'validate' Request event}
* @example Sending a request without validating credentials
* var listener = AWS.EventListeners.Core.VALIDATE_CREDENTIALS;
* request.removeListener('validate', listener);
* @readonly
* @return [Function]
* @!attribute VALIDATE_REGION
* A request listener that validates whether the region is set
* for a request.
* Handles the {AWS.Request~validate 'validate' Request event}
* @example Sending a request without validating region configuration
* var listener = AWS.EventListeners.Core.VALIDATE_REGION;
* request.removeListener('validate', listener);
* @readonly
* @return [Function]
* @!attribute VALIDATE_PARAMETERS
* A request listener that validates input parameters in a request.
* Handles the {AWS.Request~validate 'validate' Request event}
* @example Sending a request without validating parameters
* var listener = AWS.EventListeners.Core.VALIDATE_PARAMETERS;
* request.removeListener('validate', listener);
* @example Disable parameter validation globally
* AWS.EventListeners.Core.removeListener('validate',
* AWS.EventListeners.Core.VALIDATE_REGION);
* @readonly
* @return [Function]
* @!attribute SEND
* A request listener that initiates the HTTP connection for a
* request being sent. Handles the {AWS.Request~send 'send' Request event}
* @example Replacing the HTTP handler
* var listener = AWS.EventListeners.Core.SEND;
* request.removeListener('send', listener);
* request.on('send', function(response) {
* customHandler.send(response);
* });
* @return [Function]
* @readonly
* @!attribute HTTP_DATA
* A request listener that reads data from the HTTP connection in order
* to build the response data.
* Handles the {AWS.Request~httpData 'httpData' Request event}.
* Remove this handler if you are overriding the 'httpData' event and
* do not want extra data processing and buffering overhead.
* @example Disabling default data processing
* var listener = AWS.EventListeners.Core.HTTP_DATA;
* request.removeListener('httpData', listener);
* @return [Function]
* @readonly
*/
Core: {} /* doc hack */
};
AWS.EventListeners = {
Core: new SequentialExecutor().addNamedListeners(function(add, addAsync) {
addAsync('VALIDATE_CREDENTIALS', 'validate',
function VALIDATE_CREDENTIALS(req, done) {
if (!req.service.api.signatureVersion) return done(); // none
req.service.config.getCredentials(function(err) {
if (err) {
req.response.error = AWS.util.error(err,
{code: 'CredentialsError', message: 'Missing credentials in config'});
}
done();
});
});
add('VALIDATE_REGION', 'validate', function VALIDATE_REGION(req) {
if (!req.service.config.region && !req.service.isGlobalEndpoint) {
req.response.error = AWS.util.error(new Error(),
{code: 'ConfigError', message: 'Missing region in config'});
}
});
add('BUILD_IDEMPOTENCY_TOKENS', 'validate', function BUILD_IDEMPOTENCY_TOKENS(req) {
var operation = req.service.api.operations[req.operation];
if (!operation) {
return;
}
var idempotentMembers = operation.idempotentMembers;
if (!idempotentMembers.length) {
return;
}
// creates a copy of params so user's param object isn't mutated
var params = AWS.util.copy(req.params);
for (var i = 0, iLen = idempotentMembers.length; i < iLen; i++) {
if (!params[idempotentMembers[i]]) {
// add the member
params[idempotentMembers[i]] = AWS.util.uuid.v4();
}
}
req.params = params;
});
add('VALIDATE_PARAMETERS', 'validate', function VALIDATE_PARAMETERS(req) {
var rules = req.service.api.operations[req.operation].input;
var validation = req.service.config.paramValidation;
new AWS.ParamValidator(validation).validate(rules, req.params);
});
addAsync('COMPUTE_SHA256', 'afterBuild', function COMPUTE_SHA256(req, done) {
req.haltHandlersOnError();
var operation = req.service.api.operations[req.operation];
var authtype = operation ? operation.authtype : '';
if (!req.service.api.signatureVersion && !authtype) return done(); // none
if (req.service.getSignerClass(req) === AWS.Signers.V4) {
var body = req.httpRequest.body || '';
if (authtype.indexOf('unsigned-body') >= 0) {
req.httpRequest.headers['X-Amz-Content-Sha256'] = 'UNSIGNED-PAYLOAD';
return done();
}
AWS.util.computeSha256(body, function(err, sha) {
if (err) {
done(err);
}
else {
req.httpRequest.headers['X-Amz-Content-Sha256'] = sha;
done();
}
});
} else {
done();
}
});
add('SET_CONTENT_LENGTH', 'afterBuild', function SET_CONTENT_LENGTH(req) {
if (req.httpRequest.headers['Content-Length'] === undefined) {
var length = AWS.util.string.byteLength(req.httpRequest.body);
req.httpRequest.headers['Content-Length'] = length;
}
});
add('SET_HTTP_HOST', 'afterBuild', function SET_HTTP_HOST(req) {
req.httpRequest.headers['Host'] = req.httpRequest.endpoint.host;
});
add('RESTART', 'restart', function RESTART() {
var err = this.response.error;
if (!err || !err.retryable) return;
this.httpRequest = new AWS.HttpRequest(<|fim▁hole|>
if (this.response.retryCount < this.service.config.maxRetries) {
this.response.retryCount++;
} else {
this.response.error = null;
}
});
addAsync('SIGN', 'sign', function SIGN(req, done) {
var service = req.service;
var operation = req.service.api.operations[req.operation];
var authtype = operation ? operation.authtype : '';
if (!service.api.signatureVersion && !authtype) return done(); // none
service.config.getCredentials(function (err, credentials) {
if (err) {
req.response.error = err;
return done();
}
try {
var date = AWS.util.date.getDate();
var SignerClass = service.getSignerClass(req);
var signer = new SignerClass(req.httpRequest,
service.api.signingName || service.api.endpointPrefix,
{
signatureCache: service.config.signatureCache,
operation: operation
});
signer.setServiceClientId(service._clientId);
// clear old authorization headers
delete req.httpRequest.headers['Authorization'];
delete req.httpRequest.headers['Date'];
delete req.httpRequest.headers['X-Amz-Date'];
// add new authorization
signer.addAuthorization(credentials, date);
req.signedAt = date;
} catch (e) {
req.response.error = e;
}
done();
});
});
add('VALIDATE_RESPONSE', 'validateResponse', function VALIDATE_RESPONSE(resp) {
if (this.service.successfulResponse(resp, this)) {
resp.data = {};
resp.error = null;
} else {
resp.data = null;
resp.error = AWS.util.error(new Error(),
{code: 'UnknownError', message: 'An unknown error occurred.'});
}
});
addAsync('SEND', 'send', function SEND(resp, done) {
resp.httpResponse._abortCallback = done;
resp.error = null;
resp.data = null;
function callback(httpResp) {
resp.httpResponse.stream = httpResp;
httpResp.on('headers', function onHeaders(statusCode, headers, statusMessage) {
resp.request.emit(
'httpHeaders',
[statusCode, headers, resp, statusMessage]
);
if (!resp.httpResponse.streaming) {
if (AWS.HttpClient.streamsApiVersion === 2) { // streams2 API check
httpResp.on('readable', function onReadable() {
var data = httpResp.read();
if (data !== null) {
resp.request.emit('httpData', [data, resp]);
}
});
} else { // legacy streams API
httpResp.on('data', function onData(data) {
resp.request.emit('httpData', [data, resp]);
});
}
}
});
httpResp.on('end', function onEnd() {
resp.request.emit('httpDone');
done();
});
}
function progress(httpResp) {
httpResp.on('sendProgress', function onSendProgress(value) {
resp.request.emit('httpUploadProgress', [value, resp]);
});
httpResp.on('receiveProgress', function onReceiveProgress(value) {
resp.request.emit('httpDownloadProgress', [value, resp]);
});
}
function error(err) {
resp.error = AWS.util.error(err, {
code: 'NetworkingError',
region: resp.request.httpRequest.region,
hostname: resp.request.httpRequest.endpoint.hostname,
retryable: true
});
resp.request.emit('httpError', [resp.error, resp], function() {
done();
});
}
function executeSend() {
var http = AWS.HttpClient.getInstance();
var httpOptions = resp.request.service.config.httpOptions || {};
try {
var stream = http.handleRequest(resp.request.httpRequest, httpOptions,
callback, error);
progress(stream);
} catch (err) {
error(err);
}
}
var timeDiff = (AWS.util.date.getDate() - this.signedAt) / 1000;
if (timeDiff >= 60 * 10) { // if we signed 10min ago, re-sign
this.emit('sign', [this], function(err) {
if (err) done(err);
else executeSend();
});
} else {
executeSend();
}
});
add('HTTP_HEADERS', 'httpHeaders',
function HTTP_HEADERS(statusCode, headers, resp, statusMessage) {
resp.httpResponse.statusCode = statusCode;
resp.httpResponse.statusMessage = statusMessage;
resp.httpResponse.headers = headers;
resp.httpResponse.body = new AWS.util.Buffer('');
resp.httpResponse.buffers = [];
resp.httpResponse.numBytes = 0;
var dateHeader = headers.date || headers.Date;
if (dateHeader) {
var serverTime = Date.parse(dateHeader);
if (resp.request.service.config.correctClockSkew
&& AWS.util.isClockSkewed(serverTime)) {
AWS.util.applyClockOffset(serverTime);
}
}
});
add('HTTP_DATA', 'httpData', function HTTP_DATA(chunk, resp) {
if (chunk) {
if (AWS.util.isNode()) {
resp.httpResponse.numBytes += chunk.length;
var total = resp.httpResponse.headers['content-length'];
var progress = { loaded: resp.httpResponse.numBytes, total: total };
resp.request.emit('httpDownloadProgress', [progress, resp]);
}
resp.httpResponse.buffers.push(new AWS.util.Buffer(chunk));
}
});
add('HTTP_DONE', 'httpDone', function HTTP_DONE(resp) {
// convert buffers array into single buffer
if (resp.httpResponse.buffers && resp.httpResponse.buffers.length > 0) {
var body = AWS.util.buffer.concat(resp.httpResponse.buffers);
resp.httpResponse.body = body;
}
delete resp.httpResponse.numBytes;
delete resp.httpResponse.buffers;
});
add('FINALIZE_ERROR', 'retry', function FINALIZE_ERROR(resp) {
if (resp.httpResponse.statusCode) {
resp.error.statusCode = resp.httpResponse.statusCode;
if (resp.error.retryable === undefined) {
resp.error.retryable = this.service.retryableError(resp.error, this);
}
}
});
add('INVALIDATE_CREDENTIALS', 'retry', function INVALIDATE_CREDENTIALS(resp) {
if (!resp.error) return;
switch (resp.error.code) {
case 'RequestExpired': // EC2 only
case 'ExpiredTokenException':
case 'ExpiredToken':
resp.error.retryable = true;
resp.request.service.config.credentials.expired = true;
}
});
add('EXPIRED_SIGNATURE', 'retry', function EXPIRED_SIGNATURE(resp) {
var err = resp.error;
if (!err) return;
if (typeof err.code === 'string' && typeof err.message === 'string') {
if (err.code.match(/Signature/) && err.message.match(/expired/)) {
resp.error.retryable = true;
}
}
});
add('CLOCK_SKEWED', 'retry', function CLOCK_SKEWED(resp) {
if (!resp.error) return;
if (this.service.clockSkewError(resp.error)
&& this.service.config.correctClockSkew
&& AWS.config.isClockSkewed) {
resp.error.retryable = true;
}
});
add('REDIRECT', 'retry', function REDIRECT(resp) {
if (resp.error && resp.error.statusCode >= 300 &&
resp.error.statusCode < 400 && resp.httpResponse.headers['location']) {
this.httpRequest.endpoint =
new AWS.Endpoint(resp.httpResponse.headers['location']);
this.httpRequest.headers['Host'] = this.httpRequest.endpoint.host;
resp.error.redirect = true;
resp.error.retryable = true;
}
});
add('RETRY_CHECK', 'retry', function RETRY_CHECK(resp) {
if (resp.error) {
if (resp.error.redirect && resp.redirectCount < resp.maxRedirects) {
resp.error.retryDelay = 0;
} else if (resp.retryCount < resp.maxRetries) {
resp.error.retryDelay = this.service.retryDelays(resp.retryCount) || 0;
}
}
});
addAsync('RESET_RETRY_STATE', 'afterRetry', function RESET_RETRY_STATE(resp, done) {
var delay, willRetry = false;
if (resp.error) {
delay = resp.error.retryDelay || 0;
if (resp.error.retryable && resp.retryCount < resp.maxRetries) {
resp.retryCount++;
willRetry = true;
} else if (resp.error.redirect && resp.redirectCount < resp.maxRedirects) {
resp.redirectCount++;
willRetry = true;
}
}
if (willRetry) {
resp.error = null;
setTimeout(done, delay);
} else {
done();
}
});
}),
CorePost: new SequentialExecutor().addNamedListeners(function(add) {
add('EXTRACT_REQUEST_ID', 'extractData', AWS.util.extractRequestId);
add('EXTRACT_REQUEST_ID', 'extractError', AWS.util.extractRequestId);
add('ENOTFOUND_ERROR', 'httpError', function ENOTFOUND_ERROR(err) {
if (err.code === 'NetworkingError' && err.errno === 'ENOTFOUND') {
var message = 'Inaccessible host: `' + err.hostname +
'\'. This service may not be available in the `' + err.region +
'\' region.';
this.response.error = AWS.util.error(new Error(message), {
code: 'UnknownEndpoint',
region: err.region,
hostname: err.hostname,
retryable: true,
originalError: err
});
}
});
}),
Logger: new SequentialExecutor().addNamedListeners(function(add) {
add('LOG_REQUEST', 'complete', function LOG_REQUEST(resp) {
var req = resp.request;
var logger = req.service.config.logger;
if (!logger) return;
function buildMessage() {
var time = AWS.util.date.getDate().getTime();
var delta = (time - req.startTime.getTime()) / 1000;
var ansi = logger.isTTY ? true : false;
var status = resp.httpResponse.statusCode;
var params = require('util').inspect(req.params, true, null);
var message = '';
if (ansi) message += '\x1B[33m';
message += '[AWS ' + req.service.serviceIdentifier + ' ' + status;
message += ' ' + delta.toString() + 's ' + resp.retryCount + ' retries]';
if (ansi) message += '\x1B[0;1m';
message += ' ' + AWS.util.string.lowerFirst(req.operation);
message += '(' + params + ')';
if (ansi) message += '\x1B[0m';
return message;
}
var line = buildMessage();
if (typeof logger.log === 'function') {
logger.log(line);
} else if (typeof logger.write === 'function') {
logger.write(line + '\n');
}
});
}),
Json: new SequentialExecutor().addNamedListeners(function(add) {
var svc = require('./protocol/json');
add('BUILD', 'build', svc.buildRequest);
add('EXTRACT_DATA', 'extractData', svc.extractData);
add('EXTRACT_ERROR', 'extractError', svc.extractError);
}),
Rest: new SequentialExecutor().addNamedListeners(function(add) {
var svc = require('./protocol/rest');
add('BUILD', 'build', svc.buildRequest);
add('EXTRACT_DATA', 'extractData', svc.extractData);
add('EXTRACT_ERROR', 'extractError', svc.extractError);
}),
RestJson: new SequentialExecutor().addNamedListeners(function(add) {
var svc = require('./protocol/rest_json');
add('BUILD', 'build', svc.buildRequest);
add('EXTRACT_DATA', 'extractData', svc.extractData);
add('EXTRACT_ERROR', 'extractError', svc.extractError);
}),
RestXml: new SequentialExecutor().addNamedListeners(function(add) {
var svc = require('./protocol/rest_xml');
add('BUILD', 'build', svc.buildRequest);
add('EXTRACT_DATA', 'extractData', svc.extractData);
add('EXTRACT_ERROR', 'extractError', svc.extractError);
}),
Query: new SequentialExecutor().addNamedListeners(function(add) {
var svc = require('./protocol/query');
add('BUILD', 'build', svc.buildRequest);
add('EXTRACT_DATA', 'extractData', svc.extractData);
add('EXTRACT_ERROR', 'extractError', svc.extractError);
})
};<|fim▁end|> | this.service.endpoint,
this.service.region
); |
<|file_name|>exception.py<|end_file_name|><|fim▁begin|>#
# exception.py - general exception formatting and saving
#
# Copyright (C) 2000-2013 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Chris Lumens <[email protected]>
# David Cantrell <[email protected]>
# Vratislav Podzimek <[email protected]>
#
from meh import Config
from meh.handler import ExceptionHandler
from meh.dump import ReverseExceptionDump
from pyanaconda import iutil, kickstart
import sys
import os
import shutil
import time
import re
import errno
import glob
import traceback
import blivet.errors
from pyanaconda.errors import CmdlineError
from pyanaconda.ui.communication import hubQ
from pyanaconda.constants import THREAD_EXCEPTION_HANDLING_TEST, IPMI_FAILED
from pyanaconda.threads import threadMgr
from pyanaconda.i18n import _
from pyanaconda import flags
from pyanaconda import startup_utils
from gi.repository import GLib
import logging
log = logging.getLogger("anaconda")
class AnacondaExceptionHandler(ExceptionHandler):
def __init__(self, confObj, intfClass, exnClass, tty_num, gui_lock, interactive):
"""
:see: python-meh's ExceptionHandler
:param tty_num: the number of tty the interface is running on
"""
ExceptionHandler.__init__(self, confObj, intfClass, exnClass)
self._gui_lock = gui_lock
self._intf_tty_num = tty_num
self._interactive = interactive
def _main_loop_handleException(self, dump_info):
"""
Helper method with one argument only so that it can be registered
with GLib.idle_add() to run on idle or called from a handler.
:type dump_info: an instance of the meh.DumpInfo class
"""
ty = dump_info.exc_info.type
value = dump_info.exc_info.value
if (issubclass(ty, blivet.errors.StorageError) and value.hardware_fault) \
or (issubclass(ty, OSError) and value.errno == errno.EIO):
# hardware fault or '[Errno 5] Input/Output error'
hw_error_msg = _("The installation was stopped due to what "
"seems to be a problem with your hardware. "
"The exact error message is:\n\n%s.\n\n "
"The installer will now terminate.") % str(value)
self.intf.messageWindow(_("Hardware error occured"), hw_error_msg)
sys.exit(0)
elif isinstance(value, blivet.errors.UnusableConfigurationError):
sys.exit(0)
else:
super(AnacondaExceptionHandler, self).handleException(dump_info)
return False
def handleException(self, dump_info):
"""
Our own handleException method doing some additional stuff before
calling the original python-meh's one.
:type dump_info: an instance of the meh.DumpInfo class
:see: python-meh's ExceptionHandler.handleException
"""
log.debug("running handleException")
exception_lines = traceback.format_exception(*dump_info.exc_info)
log.critical("\n".join(exception_lines))
ty = dump_info.exc_info.type
value = dump_info.exc_info.value
try:
from gi.repository import Gtk
# XXX: Gtk stopped raising RuntimeError if it fails to
# initialize. Horay! But will it stay like this? Let's be
# cautious and raise the exception on our own to work in both
# cases
initialized = Gtk.init_check(None)[0]
if not initialized:
raise RuntimeError()
# Attempt to grab the GUI initializing lock, do not block
if not self._gui_lock.acquire(False):
# the graphical interface is running, don't crash it by
# running another one potentially from a different thread
log.debug("Gtk running, queuing exception handler to the "
"main loop")
GLib.idle_add(self._main_loop_handleException, dump_info)
else:
log.debug("Gtk not running, starting Gtk and running "
"exception handler in it")
self._main_loop_handleException(dump_info)
except (RuntimeError, ImportError):
log.debug("Gtk cannot be initialized")
# X not running (Gtk cannot be initialized)
if threadMgr.in_main_thread():
log.debug("In the main thread, running exception handler")
if issubclass(ty, CmdlineError) or not self._interactive:
if issubclass(ty, CmdlineError):
cmdline_error_msg = _("\nThe installation was stopped due to "
"incomplete spokes detected while running "
"in non-interactive cmdline mode. Since there "
"cannot be any questions in cmdline mode, "
"edit your kickstart file and retry "
"installation.\nThe exact error message is: "
"\n\n%s.\n\nThe installer will now terminate.") % str(value)
else:
cmdline_error_msg = _("\nRunning in cmdline mode, no interactive debugging "
"allowed.\nThe exact error message is: "
"\n\n%s.\n\nThe installer will now terminate.") % str(value)
# since there is no UI in cmdline mode and it is completely
# non-interactive, we can't show a message window asking the user
# to acknowledge the error; instead, print the error out and sleep
# for a few seconds before exiting the installer
print(cmdline_error_msg)
time.sleep(10)
sys.exit(1)
else:
print("\nAn unknown error has occured, look at the "
"/tmp/anaconda-tb* file(s) for more details")
# in the main thread, run exception handler
self._main_loop_handleException(dump_info)
else:
log.debug("In a non-main thread, sending a message with "
"exception data")
# not in the main thread, just send message with exception
# data and let message handler run the exception handler in
# the main thread
exc_info = dump_info.exc_info
hubQ.send_exception((exc_info.type,
exc_info.value,
exc_info.stack))
def postWriteHook(self, dump_info):
anaconda = dump_info.object
# See if there is a /root present in the root path and put exception there as well
if os.access(iutil.getSysroot() + "/root", os.X_OK):
try:
dest = iutil.getSysroot() + "/root/%s" % os.path.basename(self.exnFile)
shutil.copyfile(self.exnFile, dest)
except (shutil.Error, IOError):
log.error("Failed to copy %s to %s/root", self.exnFile, iutil.getSysroot())
# run kickstart traceback scripts (if necessary)
try:
kickstart.runTracebackScripts(anaconda.ksdata.scripts)
# pylint: disable=bare-except
except:
pass
iutil.ipmi_report(IPMI_FAILED)
def runDebug(self, exc_info):
if flags.can_touch_runtime_system("switch console") \
and self._intf_tty_num != 1:
iutil.vtActivate(1)
iutil.eintr_retry_call(os.open, "/dev/console", os.O_RDWR) # reclaim stdin
iutil.eintr_retry_call(os.dup2, 0, 1) # reclaim stdout
iutil.eintr_retry_call(os.dup2, 0, 2) # reclaim stderr
# ^
# |
# +------ dup2 is magic, I tells ya!
# bring back the echo
import termios
si = sys.stdin.fileno()
attr = termios.tcgetattr(si)
attr[3] = attr[3] & termios.ECHO
termios.tcsetattr(si, termios.TCSADRAIN, attr)
print("\nEntering debugger...")
print("Use 'continue' command to quit the debugger and get back to "\
"the main window")
import pdb
pdb.post_mortem(exc_info.stack)
if flags.can_touch_runtime_system("switch console") \
and self._intf_tty_num != 1:
iutil.vtActivate(self._intf_tty_num)
def initExceptionHandling(anaconda):
fileList = ["/tmp/anaconda.log", "/tmp/packaging.log",
"/tmp/program.log", "/tmp/storage.log", "/tmp/ifcfg.log",
"/tmp/dnf.log", "/tmp/dnf.rpm.log",
"/tmp/yum.log", iutil.getSysroot() + "/root/install.log",
"/proc/cmdline"]
if os.path.exists("/tmp/syslog"):
fileList.extend(["/tmp/syslog"])
if anaconda.opts and anaconda.opts.ksfile:
fileList.extend([anaconda.opts.ksfile])
conf = Config(programName="anaconda",
programVersion=startup_utils.get_anaconda_version_string(),
programArch=os.uname()[4],
attrSkipList=["_intf._actions",
"_intf._currentAction._xklwrapper",
"_intf._currentAction._spokes[\"KeyboardSpoke\"]._xkl_wrapper",
"_intf._currentAction._storage_playground",<|fim▁hole|> "_intf._currentAction.language.translations",
"_intf._currentAction.language.locales",
"_intf._currentAction._spokes[\"PasswordSpoke\"]._oldweak",
"_intf._currentAction._spokes[\"PasswordSpoke\"]._password",
"_intf._currentAction._spokes[\"UserSpoke\"]._password",
"_intf._currentAction._spokes[\"UserSpoke\"]._oldweak",
"_intf.storage.bootloader.password",
"_intf.storage.data",
"_intf.storage.encryptionPassphrase",
"_bootloader.encrypted_password",
"_bootloader.password",
"payload._groups"],
localSkipList=["passphrase", "password", "_oldweak", "_password"],
fileList=fileList)
conf.register_callback("lsblk_output", lsblk_callback, attchmnt_only=True)
conf.register_callback("nmcli_dev_list", nmcli_dev_list_callback,
attchmnt_only=True)
conf.register_callback("type", lambda: "anaconda", attchmnt_only=True)
conf.register_callback("addons", list_addons_callback, attchmnt_only=False)
if "/tmp/syslog" not in fileList:
# no syslog, grab output from journalctl and put it also to the
# anaconda-tb file
conf.register_callback("journalctl", journalctl_callback, attchmnt_only=False)
interactive = not anaconda.displayMode == 'c'
handler = AnacondaExceptionHandler(conf, anaconda.intf.meh_interface,
ReverseExceptionDump, anaconda.intf.tty_num,
anaconda.gui_initialized, interactive)
handler.install(anaconda)
return conf
def lsblk_callback():
"""Callback to get info about block devices."""
return iutil.execWithCapture("lsblk", ["--perms", "--fs", "--bytes"])
def nmcli_dev_list_callback():
"""Callback to get info about network devices."""
return iutil.execWithCapture("nmcli", ["device", "show"])
def journalctl_callback():
"""Callback to get logs from journalctl."""
# regex to filter log messages from anaconda's process (we have that in our
# logs)
anaconda_log_line = re.compile(r"\[%d\]:" % os.getpid())
ret = ""
for line in iutil.execReadlines("journalctl", ["-b"]):
if anaconda_log_line.search(line) is None:
# not an anaconda's message
ret += line + "\n"
return ret
def list_addons_callback():
"""
Callback to get info about the addons potentially affecting Anaconda's
behaviour.
"""
# list available addons and take their package names
addon_pkgs = glob.glob("/usr/share/anaconda/addons/*")
return ", ".join(addon.rsplit("/", 1)[1] for addon in addon_pkgs)
def test_exception_handling():
"""
Function that can be used for testing exception handling in anaconda. It
tries to prepare a worst case scenario designed from bugs seen so far.
"""
# XXX: this is a huge hack, but probably the only way, how we can get
# "unique" stack and thus unique hash and new bugreport
def raise_exception(msg, non_ascii):
timestamp = str(time.time()).split(".", 1)[0]
code = """
def f%s(msg, non_ascii):
raise RuntimeError(msg)
f%s(msg, non_ascii)
""" % (timestamp, timestamp)
eval(compile(code, "str_eval", "exec"))
# test non-ascii characters dumping
non_ascii = u'\u0159'
msg = "NOTABUG: testing exception handling"
# raise exception from a separate thread
from pyanaconda.threads import AnacondaThread
threadMgr.add(AnacondaThread(name=THREAD_EXCEPTION_HANDLING_TEST,
target=raise_exception,
args=(msg, non_ascii)))<|fim▁end|> | "_intf._currentAction._spokes[\"CustomPartitioningSpoke\"]._storage_playground", |
<|file_name|>hr_academic.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|># along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class hr_academic(models.Model):
_name = 'hr.academic'
_inherit = 'hr.curriculum'
diploma = fields.Char(string='Diploma', translate=True)
study_field = fields.Char(string='Field of study', translate=True,)
activities = fields.Text(string='Activities and associations',
translate=True)<|fim▁end|> | # GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License |
<|file_name|>gae_test.go<|end_file_name|><|fim▁begin|>// Copyright 2021 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gae
import (
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"google.golang.org/protobuf/proto"
gaebasepb "go.chromium.org/luci/server/internal/gae/base"
remotepb "go.chromium.org/luci/server/internal/gae/remote_api"
. "github.com/smartystreets/goconvey/convey"
. "go.chromium.org/luci/common/testing/assertions"
)
func TestCall(t *testing.T) {
t.Parallel()
call := func(in, out proto.Message, handler http.HandlerFunc) error {
srv := httptest.NewServer(handler)
defer srv.Close()
apiURL, _ := url.Parse(srv.URL + "/call")
ctx := WithTickets(context.Background(), &Tickets{
api: "api-ticket",
dapperTrace: "dapper-ticket",
cloudTrace: "cloud-ticket",
apiURL: apiURL,
})
return Call(ctx, "service", "SomeMethod", in, out)
}<|fim▁hole|> panic(err)
}
return b
}
unmarshal := func(b []byte, m proto.Message) proto.Message {
if err := proto.Unmarshal(b, m); err != nil {
panic(err)
}
return m
}
respond := func(rw http.ResponseWriter, resp *remotepb.Response) {
if _, err := rw.Write(marshal(resp)); err != nil {
panic(err)
}
}
req := &gaebasepb.StringProto{Value: proto.String("req")}
res := &gaebasepb.StringProto{}
Convey("Happy path", t, func() {
var body []byte
var headers http.Header
So(call(req, res, func(rw http.ResponseWriter, req *http.Request) {
body, _ = ioutil.ReadAll(req.Body)
headers = req.Header.Clone()
respond(rw, &remotepb.Response{
Response: marshal(&gaebasepb.StringProto{Value: proto.String("res")}),
})
}), ShouldBeNil)
So(res, ShouldResembleProto, &gaebasepb.StringProto{
Value: proto.String("res"),
})
So(unmarshal(body, &remotepb.Request{}), ShouldResembleProto, &remotepb.Request{
ServiceName: proto.String("service"),
Method: proto.String("SomeMethod"),
Request: marshal(req),
RequestId: proto.String("api-ticket"),
})
So(headers.Get("X-Google-Rpc-Service-Endpoint"), ShouldEqual, "app-engine-apis")
So(headers.Get("X-Google-Rpc-Service-Method"), ShouldEqual, "/VMRemoteAPI.CallRemoteAPI")
So(headers.Get("X-Google-Rpc-Service-Deadline"), ShouldNotBeEmpty)
So(headers.Get("Content-Type"), ShouldEqual, "application/octet-stream")
So(headers.Get("X-Google-Dappertraceinfo"), ShouldEqual, "dapper-ticket")
So(headers.Get("X-Cloud-Trace-Context"), ShouldEqual, "cloud-ticket")
})
Convey("Bad HTTP status code", t, func() {
err := call(req, res, func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(404)
})
So(err, ShouldErrLike, "unexpected HTTP 404")
})
Convey("RPC error", t, func() {
err := call(req, res, func(rw http.ResponseWriter, req *http.Request) {
respond(rw, &remotepb.Response{
RpcError: &remotepb.RpcError{
Code: proto.Int32(int32(remotepb.RpcError_CALL_NOT_FOUND)),
Detail: proto.String("boo"),
},
})
})
So(err, ShouldErrLike, "RPC error CALL_NOT_FOUND calling service.SomeMethod: boo")
})
Convey("Application error", t, func() {
err := call(req, res, func(rw http.ResponseWriter, req *http.Request) {
respond(rw, &remotepb.Response{
ApplicationError: &remotepb.ApplicationError{
Code: proto.Int32(123),
Detail: proto.String("boo"),
},
})
})
So(err, ShouldErrLike, "API error 123 calling service.SomeMethod: boo")
})
Convey("Exception error", t, func() {
err := call(req, res, func(rw http.ResponseWriter, req *http.Request) {
respond(rw, &remotepb.Response{
Exception: []byte{1},
})
})
So(err, ShouldErrLike, "service bridge returned unexpected exception from service.SomeMethod")
})
}<|fim▁end|> |
marshal := func(m proto.Message) []byte {
b, err := proto.Marshal(m)
if err != nil { |
<|file_name|>GetVgroupsAddonsStaffResponse.java<|end_file_name|><|fim▁begin|>package ru.lanbilling.webservice.wsdl;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Generated;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType><|fim▁hole|> * <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="ret" type="{urn:api3}soapVgroupsAddonsStaff" maxOccurs="unbounded"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"ret"
})
@XmlRootElement(name = "getVgroupsAddonsStaffResponse")
@Generated(value = "com.sun.tools.xjc.Driver", date = "2015-10-25T05:29:34+06:00", comments = "JAXB RI v2.2.11")
public class GetVgroupsAddonsStaffResponse {
@XmlElement(required = true)
@Generated(value = "com.sun.tools.xjc.Driver", date = "2015-10-25T05:29:34+06:00", comments = "JAXB RI v2.2.11")
protected List<SoapVgroupsAddonsStaff> ret;
/**
* Gets the value of the ret property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the ret property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getRet().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link SoapVgroupsAddonsStaff }
*
*
*/
@Generated(value = "com.sun.tools.xjc.Driver", date = "2015-10-25T05:29:34+06:00", comments = "JAXB RI v2.2.11")
public List<SoapVgroupsAddonsStaff> getRet() {
if (ret == null) {
ret = new ArrayList<SoapVgroupsAddonsStaff>();
}
return this.ret;
}
}<|fim▁end|> | |
<|file_name|>WebCLGLVertexFragmentProgram.class.js<|end_file_name|><|fim▁begin|>/**
* WebCLGLVertexFragmentProgram Object
* @class
* @constructor
*/
WebCLGLVertexFragmentProgram = function(gl, vertexSource, vertexHeader, fragmentSource, fragmentHeader) {
this.gl = gl;
var highPrecisionSupport = this.gl.getShaderPrecisionFormat(this.gl.FRAGMENT_SHADER, this.gl.HIGH_FLOAT);
this.precision = (highPrecisionSupport.precision != 0) ? 'precision highp float;\n\nprecision highp int;\n\n' : 'precision lowp float;\n\nprecision lowp int;\n\n';
this.utils = new WebCLGLUtils();
this.vertexSource;
this.fragmentSource;
this.in_vertex_values = [];
this.in_fragment_values = [];
this.vertexAttributes = []; // {location,value}
this.vertexUniforms = []; // {location,value}
this.fragmentSamplers = []; // {location,value}
this.fragmentUniforms = []; // {location,value}
if(vertexSource != undefined) this.setVertexSource(vertexSource, vertexHeader);
if(fragmentSource != undefined) this.setFragmentSource(fragmentSource, fragmentHeader);
};
/**
* Update the vertex source
* @type Void
* @param {String} vertexSource
* @param {String} vertexHeader
*/
WebCLGLVertexFragmentProgram.prototype.setVertexSource = function(vertexSource, vertexHeader) {
this.vertexHead =(vertexHeader!=undefined)?vertexHeader:'';
this.in_vertex_values = [];//{value,type,name,idPointer}
// value: argument value
// type: 'buffer_float4_fromKernel'(4 packet pointer4), 'buffer_float_fromKernel'(1 packet pointer4), 'buffer_float4'(1 pointer4), 'buffer_float'(1 pointer1)
// name: argument name
// idPointer to: this.vertexAttributes or this.vertexUniforms (according to type)
var argumentsSource = vertexSource.split(')')[0].split('(')[1].split(','); // "float* A", "float* B", "float C", "float4* D"
//console.log(argumentsSource);
for(var n = 0, f = argumentsSource.length; n < f; n++) {
if(argumentsSource[n].match(/\*kernel/gm) != null) {
if(argumentsSource[n].match(/float4/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'buffer_float4_fromKernel',
name:argumentsSource[n].split('*kernel')[1].trim()};
} else if(argumentsSource[n].match(/float/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'buffer_float_fromKernel',
name:argumentsSource[n].split('*kernel')[1].trim()};
}
} else if(argumentsSource[n].match(/\*/gm) != null) {
if(argumentsSource[n].match(/float4/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'buffer_float4',
name:argumentsSource[n].split('*')[1].trim()};
} else if(argumentsSource[n].match(/float/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'buffer_float',
name:argumentsSource[n].split('*')[1].trim()};
}
} else {
if(argumentsSource[n].match(/float4/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'float4',
name:argumentsSource[n].split(' ')[1].trim()};
} else if(argumentsSource[n].match(/float/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'float',
name:argumentsSource[n].split(' ')[1].trim()};
} else if(argumentsSource[n].match(/mat4/gm) != null) {
this.in_vertex_values[n] = {value:undefined,
type:'mat4',
name:argumentsSource[n].split(' ')[1].trim()};
}
}
}
//console.log(this.in_vertex_values);
//console.log('original source: '+vertexSource);
this.vertexSource = vertexSource.replace(/\r\n/gi, '').replace(/\r/gi, '').replace(/\n/gi, '');
this.vertexSource = this.vertexSource.replace(/^\w* \w*\([\w\s\*,]*\) {/gi, '').replace(/}(\s|\t)*$/gi, '');
//console.log('minified source: '+this.vertexSource);
this.vertexSource = this.parseVertexSource(this.vertexSource);
if(this.fragmentSource != undefined) this.compileVertexFragmentSource();
};
/** @private **/
WebCLGLVertexFragmentProgram.prototype.parseVertexSource = function(source) {
//console.log(source);
for(var n = 0, f = this.in_vertex_values.length; n < f; n++) { // for each in_vertex_values (in argument)
var regexp = new RegExp(this.in_vertex_values[n].name+'\\[\\w*\\]',"gm");
var varMatches = source.match(regexp);// "Search current "in_vertex_values.name[xxx]" in source and store in array varMatches
//console.log(varMatches);
if(varMatches != null) {
for(var nB = 0, fB = varMatches.length; nB < fB; nB++) { // for each varMatches ("A[x]", "A[x]")
var regexpNativeGL = new RegExp('```(\s|\t)*gl.*'+varMatches[nB]+'.*```[^```(\s|\t)*gl]',"gm");
var regexpNativeGLMatches = source.match(regexpNativeGL);
if(regexpNativeGLMatches == null) {
var name = varMatches[nB].split('[')[0];
var vari = varMatches[nB].split('[')[1].split(']')[0];
var regexp = new RegExp(name+'\\['+vari.trim()+'\\]',"gm");
if(this.in_vertex_values[n].type == 'buffer_float4_fromKernel')
source = source.replace(regexp, 'buffer_float4_fromKernel_data('+name+'0,'+name+'1,'+name+'2,'+name+'3)');
if(this.in_vertex_values[n].type == 'buffer_float_fromKernel')
source = source.replace(regexp, 'buffer_float_fromKernel_data('+name+'0)');
if(this.in_vertex_values[n].type == 'buffer_float4')
source = source.replace(regexp, name);
if(this.in_vertex_values[n].type == 'buffer_float')
source = source.replace(regexp, name);
}
}
}
}
source = source.replace(/```(\s|\t)*gl/gi, "").replace(/```/gi, "");
//console.log('%c translated source:'+source, "background-color:#000;color:#FFF");
return source;
};
/**
* Update the fragment source
* @type Void
* @param {String} fragmentSource
* @param {String} fragmentHeader
*/
WebCLGLVertexFragmentProgram.prototype.setFragmentSource = function(fragmentSource, fragmentHeader) {
this.fragmentHead =(fragmentHeader!=undefined)?fragmentHeader:'';
this.in_fragment_values = [];//{value,type,name,idPointer}
// value: argument value
// type: 'buffer_float4'(RGBA channels), 'buffer_float'(Red channel)
// name: argument name
// idPointer to: this.fragmentSamplers or this.fragmentUniforms (according to type)
var argumentsSource = fragmentSource.split(')')[0].split('(')[1].split(','); // "float* A", "float* B", "float C", "float4* D"
//console.log(argumentsSource);
for(var n = 0, f = argumentsSource.length; n < f; n++) {
if(argumentsSource[n].match(/\*/gm) != null) {
if(argumentsSource[n].match(/float4/gm) != null) {
this.in_fragment_values[n] = {value:undefined,
type:'buffer_float4',
name:argumentsSource[n].split('*')[1].trim()};
} else if(argumentsSource[n].match(/float/gm) != null) {
this.in_fragment_values[n] = {value:undefined,
type:'buffer_float',
name:argumentsSource[n].split('*')[1].trim()};
}
} else {
if(argumentsSource[n].match(/float4/gm) != null) {
this.in_fragment_values[n] = {value:undefined,
type:'float4',
name:argumentsSource[n].split(' ')[1].trim()};
} else if(argumentsSource[n].match(/float/gm) != null) {
this.in_fragment_values[n] = {value:undefined,
type:'float',
name:argumentsSource[n].split(' ')[1].trim()};
} else if(argumentsSource[n].match(/mat4/gm) != null) {
this.in_fragment_values[n] = {value:undefined,
type:'mat4',
name:argumentsSource[n].split(' ')[1].trim()};
}
}
}
//console.log(this.in_fragment_values);
//console.log('original source: '+source);
this.fragmentSource = fragmentSource.replace(/\r\n/gi, '').replace(/\r/gi, '').replace(/\n/gi, '');
this.fragmentSource = this.fragmentSource.replace(/^\w* \w*\([\w\s\*,]*\) {/gi, '').replace(/}(\s|\t)*$/gi, '');
//console.log('minified source: '+this.fragmentSource);
this.fragmentSource = this.parseFragmentSource(this.fragmentSource);
if(this.vertexSource != undefined) this.compileVertexFragmentSource();
};
/** @private **/
WebCLGLVertexFragmentProgram.prototype.parseFragmentSource = function(source) {
//console.log(source);
for(var n = 0, f = this.in_fragment_values.length; n < f; n++) { // for each in_fragment_values (in argument)
var regexp = new RegExp(this.in_fragment_values[n].name+'\\[\\w*\\]',"gm");
var varMatches = source.match(regexp);// "Search current "in_fragment_values.name[xxx]" in source and store in array varMatches
//console.log(varMatches);
if(varMatches != null) {
for(var nB = 0, fB = varMatches.length; nB < fB; nB++) { // for each varMatches ("A[x]", "A[x]")
var regexpNativeGL = new RegExp('```(\s|\t)*gl.*'+varMatches[nB]+'.*```[^```(\s|\t)*gl]',"gm");
var regexpNativeGLMatches = source.match(regexpNativeGL);
if(regexpNativeGLMatches == null) {
var name = varMatches[nB].split('[')[0];
var vari = varMatches[nB].split('[')[1].split(']')[0];
var regexp = new RegExp(name+'\\['+vari.trim()+'\\]',"gm");
if(this.in_fragment_values[n].type == 'buffer_float4')
source = source.replace(regexp, 'buffer_float4_data('+name+','+vari+')');
if(this.in_fragment_values[n].type == 'buffer_float')
source = source.replace(regexp, 'buffer_float_data('+name+','+vari+')');
}
}
}
}
source = source.replace(/```(\s|\t)*gl/gi, "").replace(/```/gi, "");
//console.log('%c translated source:'+source, "background-color:#000;color:#FFF");
return source;
};
/** @private **/
WebCLGLVertexFragmentProgram.prototype.compileVertexFragmentSource = function() {
lines_vertex_attrs = (function() {
str = '';
for(var n = 0, f = this.in_vertex_values.length; n < f; n++) {
if(this.in_vertex_values[n].type == 'buffer_float4_fromKernel') {
str += 'attribute vec4 '+this.in_vertex_values[n].name+'0;\n';
str += 'attribute vec4 '+this.in_vertex_values[n].name+'1;\n';
str += 'attribute vec4 '+this.in_vertex_values[n].name+'2;\n';
str += 'attribute vec4 '+this.in_vertex_values[n].name+'3;\n';
} else if(this.in_vertex_values[n].type == 'buffer_float_fromKernel') {
str += 'attribute vec4 '+this.in_vertex_values[n].name+'0;\n';
} else if(this.in_vertex_values[n].type == 'buffer_float4') {
str += 'attribute vec4 '+this.in_vertex_values[n].name+';\n';
} else if(this.in_vertex_values[n].type == 'buffer_float') {
str += 'attribute float '+this.in_vertex_values[n].name+';\n';
} else if(this.in_vertex_values[n].type == 'float') {
str += 'uniform float '+this.in_vertex_values[n].name+';\n';
} else if(this.in_vertex_values[n].type == 'float4') {
str += 'uniform vec4 '+this.in_vertex_values[n].name+';\n';
} else if(this.in_vertex_values[n].type == 'mat4') {
str += 'uniform mat4 '+this.in_vertex_values[n].name+';\n';
}
}
return str;
}).bind(this);
lines_fragment_attrs = (function() {
str = '';
for(var n = 0, f = this.in_fragment_values.length; n < f; n++) {
if(this.in_fragment_values[n].type == 'buffer_float4' || this.in_fragment_values[n].type == 'buffer_float') {
str += 'uniform sampler2D '+this.in_fragment_values[n].name+';\n';
} else if(this.in_fragment_values[n].type == 'float') {
str += 'uniform float '+this.in_fragment_values[n].name+';\n';
} else if(this.in_fragment_values[n].type == 'float4') {
str += 'uniform vec4 '+this.in_fragment_values[n].name+';\n';
} else if(this.in_fragment_values[n].type == 'mat4') {
str += 'uniform mat4 '+this.in_fragment_values[n].name+';\n';
}
}
return str;
}).bind(this);
var sourceVertex = this.precision+
'uniform float uOffset;\n'+
lines_vertex_attrs()+
this.utils.unpackGLSLFunctionString()+
'vec4 buffer_float4_fromKernel_data(vec4 arg0, vec4 arg1, vec4 arg2, vec4 arg3) {\n'+
'float argX = (unpack(arg0)*(uOffset*2.0))-uOffset;\n'+
'float argY = (unpack(arg1)*(uOffset*2.0))-uOffset;\n'+
'float argZ = (unpack(arg2)*(uOffset*2.0))-uOffset;\n'+
'float argW = (unpack(arg3)*(uOffset*2.0))-uOffset;\n'+
'return vec4(argX, argY, argZ, argW);\n'+
'}\n'+
'float buffer_float_fromKernel_data(vec4 arg0) {\n'+
'float argX = (unpack(arg0)*(uOffset*2.0))-uOffset;\n'+
'return argX;\n'+
'}\n'+
'vec2 get_global_id() {\n'+
'return vec2(0.0, 0.0);\n'+
'}\n'+
this.vertexHead+
'void main(void) {\n'+
this.vertexSource+
'}\n';
//console.log(sourceVertex);
var sourceFragment = this.precision+
lines_fragment_attrs()+
'vec4 buffer_float4_data(sampler2D arg, vec2 coord) {\n'+
'vec4 textureColor = texture2D(arg, coord);\n'+
'return textureColor;\n'+
'}\n'+
'float buffer_float_data(sampler2D arg, vec2 coord) {\n'+
'vec4 textureColor = texture2D(arg, coord);\n'+
'return textureColor.x;\n'+
'}\n'+<|fim▁hole|>
this.fragmentHead+
'void main(void) {\n'+
this.fragmentSource+
'}\n';
//console.log(sourceFragment);
this.vertexFragmentProgram = this.gl.createProgram();
this.utils.createShader(this.gl, "WEBCLGL VERTEX FRAGMENT PROGRAM", sourceVertex, sourceFragment, this.vertexFragmentProgram);
this.vertexAttributes = []; // {location,value}
this.vertexUniforms = []; // {location,value}
this.fragmentSamplers = []; // {location,value}
this.fragmentUniforms = []; // {location,value}
this.uOffset = this.gl.getUniformLocation(this.vertexFragmentProgram, "uOffset");
// vertexAttributes & vertexUniforms
for(var n = 0, f = this.in_vertex_values.length; n < f; n++) {
if( this.in_vertex_values[n].type == 'buffer_float4_fromKernel') {
this.vertexAttributes.push({location:[this.gl.getAttribLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name+"0"),
this.gl.getAttribLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name+"1"),
this.gl.getAttribLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name+"2"),
this.gl.getAttribLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name+"3")],
value:this.in_vertex_values[n].value,
type: this.in_vertex_values[n].type});
this.in_vertex_values[n].idPointer = this.vertexAttributes.length-1;
} else if(this.in_vertex_values[n].type == 'buffer_float_fromKernel') {
this.vertexAttributes.push({location:[this.gl.getAttribLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name+"0")],
value:this.in_vertex_values[n].value,
type: this.in_vertex_values[n].type});
this.in_vertex_values[n].idPointer = this.vertexAttributes.length-1;
} else if(this.in_vertex_values[n].type == 'buffer_float4' || this.in_vertex_values[n].type == 'buffer_float') {
this.vertexAttributes.push({location:[this.gl.getAttribLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name)],
value:this.in_vertex_values[n].value,
type: this.in_vertex_values[n].type});
this.in_vertex_values[n].idPointer = this.vertexAttributes.length-1;
} else if(this.in_vertex_values[n].type == 'float' || this.in_vertex_values[n].type == 'float4' || this.in_vertex_values[n].type == 'mat4') {
this.vertexUniforms.push({location:[this.gl.getUniformLocation(this.vertexFragmentProgram, this.in_vertex_values[n].name)],
value:this.in_vertex_values[n].value,
type: this.in_vertex_values[n].type});
this.in_vertex_values[n].idPointer = this.vertexUniforms.length-1;
}
}
// fragmentSamplers & fragmentUniforms
for(var n = 0, f = this.in_fragment_values.length; n < f; n++) {
if(this.in_fragment_values[n].type == 'buffer_float4' || this.in_fragment_values[n].type == 'buffer_float') {
this.fragmentSamplers.push({location:[this.gl.getUniformLocation(this.vertexFragmentProgram, this.in_fragment_values[n].name)],
value:this.in_fragment_values[n].value,
type: this.in_fragment_values[n].type});
this.in_fragment_values[n].idPointer = this.fragmentSamplers.length-1;
} else if(this.in_fragment_values[n].type == 'float' || this.in_fragment_values[n].type == 'float4' || this.in_fragment_values[n].type == 'mat4') {
this.fragmentUniforms.push({location:[this.gl.getUniformLocation(this.vertexFragmentProgram, this.in_fragment_values[n].name)],
value:this.in_fragment_values[n].value,
type: this.in_fragment_values[n].type});
this.in_fragment_values[n].idPointer = this.fragmentUniforms.length-1;
}
}
return true;
};
/**
* Bind float, mat4 or a WebCLGLBuffer to a vertex argument
* @type Void
* @param {Int|String} argument Id of argument or name of this
* @param {Float|Int|WebCLGLBuffer} data
*/
WebCLGLVertexFragmentProgram.prototype.setVertexArg = function(argument, data) {
if(data == undefined) alert("Error: setVertexArg("+argument+", undefined)");
var numArg;
if(typeof argument != "string") {
numArg = argument;
} else {
for(var n=0, fn = this.in_vertex_values.length; n < fn; n++) {
if(this.in_vertex_values[n].name == argument) {
numArg = n;
break;
}
}
}
if(this.in_vertex_values[numArg] == undefined) {
console.log("argument "+argument+" not exist in this vertex program");
return;
}
this.in_vertex_values[numArg].value = data;
if( this.in_vertex_values[numArg].type == 'buffer_float4_fromKernel' ||
this.in_vertex_values[numArg].type == 'buffer_float_fromKernel' ||
this.in_vertex_values[numArg].type == 'buffer_float4' ||
this.in_vertex_values[numArg].type == 'buffer_float') {
this.vertexAttributes[this.in_vertex_values[numArg].idPointer].value = this.in_vertex_values[numArg].value;
} else if(this.in_vertex_values[numArg].type == 'float' || this.in_vertex_values[numArg].type == 'float4' || this.in_vertex_values[numArg].type == 'mat4') {
this.vertexUniforms[this.in_vertex_values[numArg].idPointer].value = this.in_vertex_values[numArg].value;
}
};
/**
* Bind float or a WebCLGLBuffer to a fragment argument
* @type Void
* @param {Int|String} argument Id of argument or name of this
* @param {Float|Int|WebCLGLBuffer} data
*/
WebCLGLVertexFragmentProgram.prototype.setFragmentArg = function(argument, data) {
if(data == undefined) alert("Error: setFragmentArg("+argument+", undefined)");
var numArg;
if(typeof argument != "string") {
numArg = argument;
} else {
for(var n=0, fn = this.in_fragment_values.length; n < fn; n++) {
if(this.in_fragment_values[n].name == argument) {
numArg = n;
break;
}
}
}
if(this.in_fragment_values[numArg] == undefined) {
console.log("argument "+argument+" not exist in this fragment program");
return;
}
this.in_fragment_values[numArg].value = data;
if(this.in_fragment_values[numArg].type == 'buffer_float4' || this.in_fragment_values[numArg].type == 'buffer_float') {
this.fragmentSamplers[this.in_fragment_values[numArg].idPointer].value = this.in_fragment_values[numArg].value;
} else if(this.in_fragment_values[numArg].type == 'float' || this.in_fragment_values[numArg].type == 'float4' || this.in_fragment_values[numArg].type == 'mat4') {
this.fragmentUniforms[this.in_fragment_values[numArg].idPointer].value = this.in_fragment_values[numArg].value;
}
};<|fim▁end|> |
'vec2 get_global_id() {\n'+
'return vec2(0.0, 0.0);\n'+
'}\n'+ |
<|file_name|>bitfield.hh<|end_file_name|><|fim▁begin|>#pragma once
#include <type_traits>
#include <limits>
#include <utility>
namespace sio {
template<typename T>
struct is_bit_enum: public std::false_type {};
template<typename Enum, std::enable_if_t<std::is_enum<Enum>{}, int> = 0>
class bitfield {
public:
using integer = std::underlying_type_t<Enum>;
private:
integer bits;
public:
constexpr bitfield() noexcept: bits(0) {}
constexpr bitfield(Enum bit) noexcept: bits(static_cast<integer>(bit)) {}
explicit constexpr bitfield(integer value) noexcept: bits(value) {}
bitfield &operator|=(bitfield rhs) noexcept {
bits |= rhs.bits;
return *this;
}
bitfield &operator&=(bitfield rhs) noexcept {
bits &= rhs.bits;
return *this;
}
bitfield &operator^=(bitfield rhs) noexcept {
bits ^= rhs.bits;
return *this;
}
constexpr bitfield operator|(bitfield rhs) const noexcept {
return bitfield { bits | rhs.bits };
}
constexpr bitfield operator&(bitfield rhs) const noexcept {
return bitfield { bits & rhs.bits };
}
constexpr bitfield operator^(bitfield rhs) const noexcept {
return bitfield { bits ^ rhs.bits };
}
constexpr bitfield operator~() const noexcept {
return bitfield { ~bits };
}
constexpr operator bool() const noexcept {
return !!bits;
}
constexpr bool operator==(bitfield rhs) const noexcept {
return bits == rhs.bits;
}
<|fim▁hole|>};
template<typename Writer, typename Enum>
void
write(Writer &&w, bitfield<Enum> field) {
bool first = true;
for (int i = std::numeric_limits<typename bitfield<Enum>::integer>::digits-1; i >= 0; --i) {
Enum bit = static_cast<Enum>(1 << i);
if (field & bit) {
if (!first) {
write(std::forward<Writer>(w), " | ");
} else {
first = false;
}
write(std::forward<Writer>(w), bit);
}
}
}
} // namespace sio
template<typename Enum,
std::enable_if_t<sio::is_bit_enum<Enum>{}, int> = 0>
sio::bitfield<Enum> operator|(Enum lhs, sio::bitfield<decltype(lhs)> rhs) {
return rhs | lhs;
}
template<typename Enum,
std::enable_if_t<sio::is_bit_enum<Enum>{}, int> = 0>
sio::bitfield<Enum> operator&(Enum lhs, sio::bitfield<decltype(lhs)> rhs) {
return rhs & lhs;
}
template<typename Enum,
std::enable_if_t<sio::is_bit_enum<Enum>{}, int> = 0>
sio::bitfield<Enum> operator^(Enum lhs, sio::bitfield<decltype(lhs)> rhs) {
return rhs ^ lhs;
}
template<typename Enum,
std::enable_if_t<sio::is_bit_enum<Enum>{}, int> = 0>
sio::bitfield<Enum> operator~(Enum lhs) {
return ~sio::bitfield<Enum>(lhs);
}<|fim▁end|> | constexpr bool operator!=(bitfield rhs) const noexcept {
return bits != rhs.bits;
} |
<|file_name|>simulated.rs<|end_file_name|><|fim▁begin|>use std::collections::VecDeque;
use std::io;
use std::io::{Read, Write};
use std_streams::StdStreams;
/// Simulated handles for the standard input streams of a process.
///
/// Simulated input can be provided using
/// [`write_input()`](std_streams/struct.SimulatedStdStreams.html#method.write_input), and output
/// can be observed using [`read_output()`](std_streams/struct.SimulatedStdStreams.html#method.read_output)
/// and [`read_error()`](std_streams/struct.SimulatedStdStreams.html#method.read_error).
#[derive(Default)]
pub struct SimulatedStdStreams {
inputs: ChunkPipe,
output: Vec<u8>,
error: Vec<u8>,
}
impl SimulatedStdStreams {
/// Creates a new `SimulatedStdStreams`.
pub fn new() -> SimulatedStdStreams {
SimulatedStdStreams {
inputs: ChunkPipe::new(),
output: Vec::new(),
error: Vec::new(),
}
}
/// Writes the provided buffer to the queue of buffers to be used when input is requested
/// using [`StdStreams::input()`].
///
/// In particular, this method does NOT append data to a continuous buffer which is consumed
/// by [`StdStreams::input()`]; rather, it enqueues a buffer which will be used for a SINGLE
/// call to [`StdStreams::input()`]. The buffer is then discarded, regardless of how much of it
/// was (or was not) read.
///
/// This enables precise control over the length of data returned from a call to
/// [`StdStreams::input()`].
///
/// [`StdStreams::input()`]: trait.StdStreams.html#tymethod.input
///
/// ## Example
///
/// ```
/// use io_providers::{StdStreams, SimulatedStdStreams};
///
/// let mut streams = SimulatedStdStreams::new();
/// streams.write_input("foo".as_bytes());
/// streams.write_input("bar".as_bytes());
/// // The first read on `streams.input()` will read from "foo"<|fim▁hole|> /// // The second read on `streams.input()` will read from "bar"
/// ```
pub fn write_input(&mut self, input: &[u8]) {
self.inputs.write_all(input).unwrap();
}
/// Gets the data which has been written to the output stream.
///
/// ## Example
///
/// ```
/// use std::io::Write;
/// use io_providers::{StdStreams, SimulatedStdStreams};
///
/// let mut streams = SimulatedStdStreams::new();
/// writeln!(streams.output(), "test1");
/// write!(streams.output(), "test2");
/// assert_eq!("test1\ntest2", ::std::str::from_utf8(streams.read_output()).unwrap());
/// ```
pub fn read_output(&self) -> &[u8] {
&self.output[..]
}
/// Gets the data which has been written to the error stream.
///
/// ## Example
///
/// ```
/// use std::io::Write;
/// use io_providers::{StdStreams, SimulatedStdStreams};
///
/// let mut streams = SimulatedStdStreams::new();
/// writeln!(streams.error(), "test1");
/// write!(streams.error(), "test2");
/// assert_eq!("test1\ntest2", ::std::str::from_utf8(streams.read_error()).unwrap());
/// ```
pub fn read_error(&self) -> &[u8] {
&self.error[..]
}
}
impl StdStreams for SimulatedStdStreams {
fn input(&mut self) -> &mut Read {
&mut self.inputs
}
fn output(&mut self) -> &mut Write {
&mut self.output
}
fn error(&mut self) -> &mut Write {
&mut self.error
}
}
/// A `Read` and `Write` implementer where data is written in chunks and each read consumes a
/// single chunk.
#[derive(Default)]
struct ChunkPipe {
items: VecDeque<Vec<u8>>,
}
impl ChunkPipe {
/// Creates a new, empty `ChunkPipe`.
pub fn new() -> ChunkPipe {
ChunkPipe {
items: VecDeque::new(),
}
}
}
impl Read for ChunkPipe {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if let Some(item) = self.items.pop_front() {
io::Cursor::new(item).read(buf)
} else {
Ok(0)
}
}
}
impl Write for ChunkPipe {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut vec = Vec::new();
let result = vec.write(buf);
self.items.push_back(vec);
result
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[cfg(test)]
#[allow(non_snake_case)]
mod tests {
use super::{ChunkPipe, SimulatedStdStreams, StdStreams};
use std::io::{Read, Write};
#[test]
fn chunk_pipe__no_writes__reads_successfully() {
let mut buf: Vec<u8> = vec![0; 8];
let mut pipe = ChunkPipe::new();
pipe.write(&[]).unwrap();
let result = pipe.read(&mut buf);
assert_eq!(0, result.unwrap());
}
#[test]
fn chunk_pipe__one_write__reads_successfully() {
let data = vec![1, 2, 3];
let mut buf1 = vec![0; 4];
let mut buf2 = vec![0; 4];
let mut pipe = ChunkPipe::new();
pipe.write(&data[..]).unwrap();
let result1 = pipe.read(&mut buf1).unwrap();
let result2 = pipe.read(&mut buf2).unwrap();
assert_eq!(data.len(), result1);
assert_eq!(vec![1, 2, 3, 0], buf1);
assert_eq!(0, result2);
}
#[test]
fn chunk_pipe__two_writes__reads_successfully() {
let data1 = vec![1, 2, 3];
let data2 = vec![4, 5, 6, 7];
let mut buf1 = vec![0; 4];
let mut buf2 = vec![0; 3];
let mut buf3 = vec![0; 3];
let mut pipe = ChunkPipe::new();
pipe.write(&data1[..]).unwrap();
let result1 = pipe.read(&mut buf1).unwrap();
pipe.write(&data2[..]).unwrap();
let result2 = pipe.read(&mut buf2).unwrap();
let result3 = pipe.read(&mut buf3).unwrap();
assert_eq!(data1.len(), result1);
assert_eq!(vec![1, 2, 3, 0], buf1);
assert_eq!(buf2.len(), result2);
assert_eq!(vec![4, 5, 6], buf2);
assert_eq!(0, result3);
}
#[test]
fn provider__empty_input__length_zero_read() {
let mut provider = SimulatedStdStreams::new();
let mut buf = vec![0; 4];
let result = provider.input().read(&mut buf).unwrap();
assert_eq!(0, result);
}
#[test]
fn provider__write_and_read_input__success() {
let mut provider = SimulatedStdStreams::new();
let expected = "test";
let mut actual = String::new();
let mut buf = vec![0; 4];
provider.write_input(expected.as_bytes());
let result = provider.input().read_to_string(&mut actual).unwrap();
assert_eq!(expected.len(), result);
assert_eq!(expected, actual);
let result = provider.input().read(&mut buf).unwrap();
assert_eq!(0, result);
}
#[test]
fn provider__two_input_writes__two_reads() {
let mut provider = SimulatedStdStreams::new();
let (expected1, expected2) = (vec![1, 2, 3], vec![4, 5, 6]);
let (mut actual1, mut actual2) = (vec![0; 3], vec![0; 3]);
provider.write_input(&expected1[..]);
provider.write_input(&expected2[..]);
let result1 = provider.input().read(&mut actual1).unwrap();
let result2 = provider.input().read(&mut actual2).unwrap();
assert_eq!(expected1.len(), result1);
assert_eq!(expected1, actual1);
assert_eq!(expected2.len(), result2);
assert_eq!(expected2, actual2);
}
#[test]
fn provider__write_read_output__success() {
let mut provider = SimulatedStdStreams::new();
let result1 = provider.output().write(&[1, 2]).unwrap();
let result2 = provider.output().write(&[3, 4]).unwrap();
let actual = provider.read_output();
assert_eq!(2, result1);
assert_eq!(2, result2);
assert_eq!(&[1, 2, 3, 4], actual);
}
#[test]
fn provider__write_read_error__success() {
let mut provider = SimulatedStdStreams::new();
let result1 = provider.error().write(&[1, 2]).unwrap();
let result2 = provider.error().write(&[3, 4]).unwrap();
let actual = provider.read_error();
assert_eq!(2, result1);
assert_eq!(2, result2);
assert_eq!(&[1, 2, 3, 4], actual);
}
}<|fim▁end|> | |
<|file_name|>synchronizing_consumer.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2011 Sveriges Television AB <[email protected]>
*
* This file is part of CasparCG (www.casparcg.com).
*
* CasparCG is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* CasparCG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Helge Norberg, [email protected]
*/
#include "../../StdAfx.h"
#include "synchronizing_consumer.h"
<|fim▁hole|>#include <common/log/log.h>
#include <common/diagnostics/graph.h>
#include <common/concurrency/future_util.h>
#include <core/video_format.h>
#include <boost/range/adaptor/transformed.hpp>
#include <boost/range/algorithm/min_element.hpp>
#include <boost/range/algorithm/max_element.hpp>
#include <boost/range/algorithm/for_each.hpp>
#include <boost/range/algorithm/count_if.hpp>
#include <boost/range/numeric.hpp>
#include <boost/algorithm/string/join.hpp>
#include <boost/thread/future.hpp>
#include <functional>
#include <vector>
#include <queue>
#include <utility>
#include <tbb/atomic.h>
namespace caspar { namespace core {
using namespace boost::adaptors;
class delegating_frame_consumer : public frame_consumer
{
safe_ptr<frame_consumer> consumer_;
public:
delegating_frame_consumer(const safe_ptr<frame_consumer>& consumer)
: consumer_(consumer)
{
}
frame_consumer& get_delegate()
{
return *consumer_;
}
const frame_consumer& get_delegate() const
{
return *consumer_;
}
virtual void initialize(
const video_format_desc& format_desc, int channel_index) override
{
get_delegate().initialize(format_desc, channel_index);
}
virtual int64_t presentation_frame_age_millis() const
{
return get_delegate().presentation_frame_age_millis();
}
virtual boost::unique_future<bool> send(
const safe_ptr<read_frame>& frame) override
{
return get_delegate().send(frame);
}
virtual std::wstring print() const override
{
return get_delegate().print();
}
virtual boost::property_tree::wptree info() const override
{
return get_delegate().info();
}
virtual bool has_synchronization_clock() const override
{
return get_delegate().has_synchronization_clock();
}
virtual size_t buffer_depth() const override
{
return get_delegate().buffer_depth();
}
virtual int index() const override
{
return get_delegate().index();
}
};
const std::vector<int>& diag_colors()
{
static std::vector<int> colors = boost::assign::list_of<int>
(diagnostics::color(0.0f, 0.6f, 0.9f))
(diagnostics::color(0.6f, 0.3f, 0.3f))
(diagnostics::color(0.3f, 0.6f, 0.3f))
(diagnostics::color(0.4f, 0.3f, 0.8f))
(diagnostics::color(0.9f, 0.9f, 0.5f))
(diagnostics::color(0.2f, 0.9f, 0.9f));
return colors;
}
class buffering_consumer_adapter : public delegating_frame_consumer
{
std::queue<safe_ptr<read_frame>> buffer_;
tbb::atomic<size_t> buffered_;
tbb::atomic<int64_t> duplicate_next_;
public:
buffering_consumer_adapter(const safe_ptr<frame_consumer>& consumer)
: delegating_frame_consumer(consumer)
{
buffered_ = 0;
duplicate_next_ = 0;
}
boost::unique_future<bool> consume_one()
{
if (!buffer_.empty())
{
buffer_.pop();
--buffered_;
}
return get_delegate().send(buffer_.front());
}
virtual boost::unique_future<bool> send(
const safe_ptr<read_frame>& frame) override
{
if (duplicate_next_)
{
--duplicate_next_;
}
else if (!buffer_.empty())
{
buffer_.pop();
--buffered_;
}
buffer_.push(frame);
++buffered_;
return get_delegate().send(buffer_.front());
}
void duplicate_next(int64_t to_duplicate)
{
duplicate_next_ += to_duplicate;
}
size_t num_buffered() const
{
return buffered_ - 1;
}
virtual std::wstring print() const override
{
return L"buffering[" + get_delegate().print() + L"]";
}
virtual boost::property_tree::wptree info() const override
{
boost::property_tree::wptree info;
info.add(L"type", L"buffering-consumer-adapter");
info.add_child(L"consumer", get_delegate().info());
info.add(L"buffered-frames", num_buffered());
return info;
}
};
static const uint64_t MAX_BUFFERED_OUT_OF_MEMORY_GUARD = 5;
struct synchronizing_consumer::implementation
{
private:
std::vector<safe_ptr<buffering_consumer_adapter>> consumers_;
size_t buffer_depth_;
bool has_synchronization_clock_;
std::vector<boost::unique_future<bool>> results_;
boost::promise<bool> promise_;
video_format_desc format_desc_;
safe_ptr<diagnostics::graph> graph_;
int64_t grace_period_;
tbb::atomic<int64_t> current_diff_;
public:
implementation(const std::vector<safe_ptr<frame_consumer>>& consumers)
: grace_period_(0)
{
BOOST_FOREACH(auto& consumer, consumers)
consumers_.push_back(make_safe<buffering_consumer_adapter>(consumer));
current_diff_ = 0;
auto buffer_depths = consumers | transformed(std::mem_fn(&frame_consumer::buffer_depth));
std::vector<size_t> depths(buffer_depths.begin(), buffer_depths.end());
buffer_depth_ = *boost::max_element(depths);
has_synchronization_clock_ = boost::count_if(consumers, std::mem_fn(&frame_consumer::has_synchronization_clock)) > 0;
diagnostics::register_graph(graph_);
}
boost::unique_future<bool> send(const safe_ptr<read_frame>& frame)
{
results_.clear();
BOOST_FOREACH(auto& consumer, consumers_)
results_.push_back(consumer->send(frame));
promise_ = boost::promise<bool>();
promise_.set_wait_callback(std::function<void(boost::promise<bool>&)>([this](boost::promise<bool>& promise)
{
BOOST_FOREACH(auto& result, results_)
{
result.get();
}
auto frame_ages = consumers_ | transformed(std::mem_fn(&frame_consumer::presentation_frame_age_millis));
std::vector<int64_t> ages(frame_ages.begin(), frame_ages.end());
auto max_age_iter = boost::max_element(ages);
auto min_age_iter = boost::min_element(ages);
int64_t min_age = *min_age_iter;
if (min_age == 0)
{
// One of the consumers have yet no measurement, wait until next
// frame until we make any assumptions.
promise.set_value(true);
return;
}
int64_t max_age = *max_age_iter;
int64_t age_diff = max_age - min_age;
current_diff_ = age_diff;
for (unsigned i = 0; i < ages.size(); ++i)
graph_->set_value(
narrow(consumers_[i]->print()),
static_cast<double>(ages[i]) / *max_age_iter);
bool grace_period_over = grace_period_ == 1;
if (grace_period_)
--grace_period_;
if (grace_period_ == 0)
{
int64_t frame_duration = static_cast<int64_t>(1000 / format_desc_.fps);
if (age_diff >= frame_duration)
{
CASPAR_LOG(info) << print() << L" Consumers not in sync. min: " << min_age << L" max: " << max_age;
auto index = min_age_iter - ages.begin();
auto to_duplicate = age_diff / frame_duration;
auto& consumer = *consumers_.at(index);
auto currently_buffered = consumer.num_buffered();
if (currently_buffered + to_duplicate > MAX_BUFFERED_OUT_OF_MEMORY_GUARD)
{
CASPAR_LOG(info) << print() << L" Protecting from out of memory. Duplicating less frames than calculated";
to_duplicate = MAX_BUFFERED_OUT_OF_MEMORY_GUARD - currently_buffered;
}
consumer.duplicate_next(to_duplicate);
grace_period_ = 10 + to_duplicate + buffer_depth_;
}
else if (grace_period_over)
{
CASPAR_LOG(info) << print() << L" Consumers resynced. min: " << min_age << L" max: " << max_age;
}
}
blocking_consume_unnecessarily_buffered();
promise.set_value(true);
}));
return promise_.get_future();
}
void blocking_consume_unnecessarily_buffered()
{
auto buffered = consumers_ | transformed(std::mem_fn(&buffering_consumer_adapter::num_buffered));
std::vector<size_t> num_buffered(buffered.begin(), buffered.end());
auto min_buffered = *boost::min_element(num_buffered);
if (min_buffered)
CASPAR_LOG(info) << print() << L" " << min_buffered
<< L" frames unnecessarily buffered. Consuming and letting channel pause during that time.";
while (min_buffered)
{
std::vector<boost::unique_future<bool>> results;
BOOST_FOREACH(auto& consumer, consumers_)
results.push_back(consumer->consume_one());
BOOST_FOREACH(auto& result, results)
result.get();
--min_buffered;
}
}
void initialize(const video_format_desc& format_desc, int channel_index)
{
for (size_t i = 0; i < consumers_.size(); ++i)
{
auto& consumer = consumers_.at(i);
consumer->initialize(format_desc, channel_index);
graph_->set_color(
narrow(consumer->print()),
diag_colors().at(i % diag_colors().size()));
}
graph_->set_text(print());
format_desc_ = format_desc;
}
int64_t presentation_frame_age_millis() const
{
int64_t result = 0;
BOOST_FOREACH(auto& consumer, consumers_)
result = std::max(result, consumer->presentation_frame_age_millis());
return result;
}
std::wstring print() const
{
return L"synchronized[" + boost::algorithm::join(consumers_ | transformed(std::mem_fn(&frame_consumer::print)), L"|") + L"]";
}
boost::property_tree::wptree info() const
{
boost::property_tree::wptree info;
info.add(L"type", L"synchronized-consumer");
BOOST_FOREACH(auto& consumer, consumers_)
info.add_child(L"consumer", consumer->info());
info.add(L"age-diff", current_diff_);
return info;
}
bool has_synchronization_clock() const
{
return has_synchronization_clock_;
}
size_t buffer_depth() const
{
return buffer_depth_;
}
int index() const
{
return boost::accumulate(consumers_ | transformed(std::mem_fn(&frame_consumer::index)), 10000);
}
};
synchronizing_consumer::synchronizing_consumer(const std::vector<safe_ptr<frame_consumer>>& consumers)
: impl_(new implementation(consumers))
{
}
boost::unique_future<bool> synchronizing_consumer::send(const safe_ptr<read_frame>& frame)
{
return impl_->send(frame);
}
void synchronizing_consumer::initialize(const video_format_desc& format_desc, int channel_index)
{
impl_->initialize(format_desc, channel_index);
}
int64_t synchronizing_consumer::presentation_frame_age_millis() const
{
return impl_->presentation_frame_age_millis();
}
std::wstring synchronizing_consumer::print() const
{
return impl_->print();
}
boost::property_tree::wptree synchronizing_consumer::info() const
{
return impl_->info();
}
bool synchronizing_consumer::has_synchronization_clock() const
{
return impl_->has_synchronization_clock();
}
size_t synchronizing_consumer::buffer_depth() const
{
return impl_->buffer_depth();
}
int synchronizing_consumer::index() const
{
return impl_->index();
}
}}<|fim▁end|> | |
<|file_name|>combat.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# Benjamin Bean <[email protected]>
# Leif Theden <[email protected]>
#
#
# core.states.combat Combat Start module
#
#
from __future__ import division
import logging
from collections import namedtuple, defaultdict
from functools import partial
from itertools import chain
from operator import attrgetter
import pygame
from core import tools, state
from core.components.locale import translator
from core.components.pyganim import PygAnimation
from core.components.sprite import Sprite
from core.components.technique import Technique
from core.components.ui.draw import GraphicBox
from core.components.ui.text import TextArea
from .combat_animations import CombatAnimations
trans = translator.translate
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("%s successfully imported" % __name__)
EnqueuedAction = namedtuple("EnqueuedAction", "user technique target")
faint = Technique("status_faint")
def check_status(monster, status_name):
return any(t for t in monster.status if t.slug == status_name)
def fainted(monster):
return check_status(monster, "status_faint")
def get_awake_monsters(player):
""" Iterate all non-fainted monsters in party
:param player:
:return:
"""
for monster in player.monsters:
if not fainted(monster):
yield monster
def fainted_party(party):
return all(map(fainted, party))
def defeated(player):
return fainted_party(player.monsters)
class WaitForInputState(state.State):
""" Just wait for input blocking everything
"""
def process_event(self, event):
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.game.pop_state(self)
class CombatState(CombatAnimations):
""" The state-menu responsible for all combat related tasks and functions.
.. image:: images/combat/monster_drawing01.png
General description of this class:
* implements a simple state machine
* various phases are executed using a queue of actions
* "decision queue" is used to queue player interactions/menus
* this class holds mostly logic, though some graphical functions exist
* most graphical functions are contained in "CombatAnimations" class
Currently, status icons are implemented as follows:
each round, all status icons are destroyed
status icons are created for each status on each monster
obvs, not ideal, maybe someday make it better? (see transition_phase)
"""
background_filename = "gfx/ui/combat/battle_bg03.png"
draw_borders = False
escape_key_exits = False
def startup(self, **kwargs):
self.max_positions = 1 # TODO: make dependant on match type
self.phase = None
self.monsters_in_play = defaultdict(list)
self._damage_map = defaultdict(set) # track damage so experience can be awarded later
self._technique_cache = dict() # cache for technique animations
self._decision_queue = list() # queue for monsters that need decisions
self._position_queue = list() # queue for asking players to add a monster into play (subject to change)
self._action_queue = list() # queue for techniques, items, and status effects
self._status_icons = list() # list of sprites that are status icons
self._monster_sprite_map = dict() # monster => sprite
self._hp_bars = dict() # monster => hp bar
self._layout = dict() # player => home areas on screen
self._animation_in_progress = False # if true, delay phase change
self._winner = None # when set, combat ends
self._round = 0
super(CombatState, self).startup(**kwargs)
self.players = list(self.players)
self.show_combat_dialog()
self.transition_phase("begin")
self.task(partial(setattr, self, "phase", "ready"), 3)
def update(self, time_delta):
""" Update the combat state. State machine is checked.
General operation:
* determine what phase to execute
* if new phase, then run transition into new one
* update the new phase, or the current one
"""
super(CombatState, self).update(time_delta)
if not self._animation_in_progress:
new_phase = self.determine_phase(self.phase)
if new_phase:
self.phase = new_phase
self.transition_phase(new_phase)
self.update_phase()
def draw(self, surface):
super(CombatState, self).draw(surface)
self.draw_hp_bars()
def draw_hp_bars(self):
""" Go through the HP bars and redraw them
:returns: None
"""
for monster, hud in self.hud.items():
rect = pygame.Rect(0, 0, tools.scale(70), tools.scale(8))
rect.right = hud.image.get_width() - tools.scale(8)
rect.top += tools.scale(12)
self._hp_bars[monster].draw(hud.image, rect)
def determine_phase(self, phase):
""" Determine the next phase and set it
Part of state machine
Only test and set new phase.
* Do not execute phase actions
* Try not to modify any values
* Return a phase name and phase will change
* Return None and phase will not change
:returns: None or String
"""<|fim▁hole|> if phase == "ready":
return "housekeeping phase"
elif phase == "housekeeping phase":
# this will wait for players to fill battleground positions
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
return
return "decision phase"
elif phase == "decision phase":
# assume each monster executes one action
# if number of actions == monsters, then all monsters are ready
if len(self._action_queue) == len(self.active_monsters):
return "pre action phase"
# TODO: change check so that it doesn't change state
# (state is changed because check_match_status will modify _winner)
# if a player runs, it will be known here
self.determine_winner()
if self._winner:
return "ran away"
elif phase == "pre action phase":
return "action phase"
if phase == "action phase":
if not self._action_queue:
return "post action phase"
elif phase == "post action phase":
if not self._action_queue:
return "resolve match"
elif phase == "ran away":
return "end combat"
elif phase == "has winner":
return "end combat"
elif phase == "resolve match":
if self._winner:
return "has winner"
else:
return "housekeeping phase"
def transition_phase(self, phase):
""" Change from one phase from another.
Part of state machine
* Will be run just -once- when phase changes.
* Do not change phase.
* Execute code only to change into new phase.
* The phase's update will be executed -after- this
:param phase:
:return:
"""
if phase == "housekeeping phase":
self._round += 1
# fill all battlefield positions, but on round 1, don't ask
self.fill_battlefield_positions(ask=self._round > 1)
if phase == "decision phase":
self.reset_status_icons()
if not self._decision_queue:
for player in self.human_players:
# the decision queue tracks human players who need to choose an
# action
self._decision_queue.extend(self.monsters_in_play[player])
for trainer in self.ai_players:
for monster in self.monsters_in_play[trainer]:
opponents = self.monsters_in_play[self.players[0]]
action, target = monster.ai.make_decision(monster, opponents)
self.enqueue_action(monster, action, target)
elif phase == "action phase":
self._action_queue.sort(key=attrgetter("user.speed"))
# TODO: Running happens somewhere else, it should be moved here i think.
# TODO: Sort other items not just healing, Swap/Run?
#Create a new list for items, possibly running/swap
#sort items by speed of monster applied to
#remove items from action_queue and insert them into their new location
precedent = []
for action in self._action_queue:
if action.technique.effect == 'heal':
precedent.append(action)
#sort items by fastest target
precedent.sort(key=attrgetter("target.speed"))
for action in precedent:
self._action_queue.remove(action)
self._action_queue.insert(0,action)
elif phase == "post action phase":
# apply status effects to the monsters
for monster in self.active_monsters:
for technique in monster.status:
self.enqueue_action(None, technique, monster)
elif phase == "resolve match":
self.determine_winner()
elif phase == "ran away":
# after 3 seconds, push a state that blocks until enter is pressed
# after the state is popped, the combat state will clean up and close
# if you run in PvP, you need "defeated message"
self.task(partial(self.game.push_state, "WaitForInputState"), 1)
self.suppress_phase_change(1)
elif phase == "has winner":
if self._winner:
# TODO: proper match check, etc
if self._winner.name == "Maple":
self.alert(trans('combat_defeat'))
else:
self.alert(trans('combat_victory'))
# after 3 seconds, push a state that blocks until enter is pressed
# after the state is popped, the combat state will clean up and close
self.task(partial(self.game.push_state, "WaitForInputState"), 1)
self.suppress_phase_change(1)
elif phase == "end combat":
self.end_combat()
def update_phase(self):
""" Execute/update phase actions
Part of state machine
* Do not change phase.
* Will be run each iteration phase is active.
* Do not test conditions to change phase.
:return: None
"""
if self.phase == "decision phase":
# show monster action menu for human players
if self._decision_queue:
monster = self._decision_queue.pop()
self.show_monster_action_menu(monster)
elif self.phase == "action phase":
self.handle_action_queue()
elif self.phase == "post action phase":
self.handle_action_queue()
def handle_action_queue(self):
""" Take one action from the queue and do it
:return: None
"""
if self._action_queue:
action = self._action_queue.pop()
self.perform_action(*action)
self.check_party_hp()
self.task(self.animate_party_status, 3)
def ask_player_for_monster(self, player):
""" Open dialog to allow player to choose a TXMN to enter into play
:param player:
:return:
"""
def add(menuitem):
monster = menuitem.game_object
if monster.current_hp == 0:
tools.open_dialog(self.game, [trans("combat_fainted", parameters={"name":monster.name})])
elif monster in self.active_monsters:
tools.open_dialog(self.game, [trans("combat_isactive", parameters={"name":monster.name})])
msg = trans("combat_replacement_is_fainted")
tools.open_dialog(self.game, [msg])
else:
self.add_monster_into_play(player, monster)
self.game.pop_state()
state = self.game.push_state("MonsterMenuState")
# must use a partial because alert relies on a text box that may not exist
# until after the state hs been startup
state.task(partial(state.alert, trans("combat_replacement")), 0)
state.on_menu_selection = add
def fill_battlefield_positions(self, ask=False):
""" Check the battlefield for unfilled positions and send out monsters
:param ask: bool. if True, then open dialog for human players
:return:
"""
# TODO: let work for trainer battles
humans = list(self.human_players)
# TODO: integrate some values for different match types
released = False
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
available = get_awake_monsters(player)
for i in range(positions_available):
released = True
if player in humans and ask:
self.ask_player_for_monster(player)
else:
self.add_monster_into_play(player, next(available))
if released:
self.suppress_phase_change()
def add_monster_into_play(self, player, monster):
"""
:param player:
:param monster:
:return:
"""
# TODO: refactor some into the combat animations
feet = list(self._layout[player]['home'][0].center)
feet[1] += tools.scale(11)
self.animate_monster_release_bottom(feet, monster)
self.build_hud(self._layout[player]['hud'][0], monster)
self.monsters_in_play[player].append(monster)
# TODO: not hardcode
if player is self.players[0]:
self.alert(trans('combat_call_tuxemon', {"name": monster.name.upper()}))
else:
self.alert(trans('combat_wild_appeared', {"name": monster.name.upper()}))
def reset_status_icons(self):
""" Update/reset status icons for monsters
TODO: caching, etc
"""
# remove all status icons
for s in self._status_icons:
self.sprites.remove(s)
# add status icons
for monster in self.active_monsters:
for status in monster.status:
if status.icon:
# get the rect of the monster
rect = self._monster_sprite_map[monster].rect
# load the sprite and add it to the display
self.load_sprite(status.icon, layer=200, center=rect.topleft)
def show_combat_dialog(self):
""" Create and show the area where battle messages are displayed
"""
# make the border and area at the bottom of the screen for messages
x, y, w, h = self.game.screen.get_rect()
rect = pygame.Rect(0, 0, w, h // 4)
rect.bottomright = w, h
border = tools.load_and_scale(self.borders_filename)
self.dialog_box = GraphicBox(border, None, self.background_color)
self.dialog_box.rect = rect
self.sprites.add(self.dialog_box, layer=100)
# make a text area to show messages
self.text_area = TextArea(self.font, self.font_color)
self.text_area.rect = self.dialog_box.calc_inner_rect(self.dialog_box.rect)
self.sprites.add(self.text_area, layer=100)
def show_monster_action_menu(self, monster):
""" Show the main window for choosing player actions
:param monster: Monster to choose an action for
:type monster: core.components.monster.Monster
:returns: None
"""
message = trans('combat_monster_choice', {"name": monster.name})
self.alert(message)
x, y, w, h = self.game.screen.get_rect()
rect = pygame.Rect(0, 0, w // 2.5, h // 4)
rect.bottomright = w, h
state = self.game.push_state("MainCombatMenuState", columns=2)
state.monster = monster
state.rect = rect
def skip_phase_change(self):
""" Skip phase change animations
Useful if player wants to skip a battle animation
"""
for ani in self.animations:
ani.finish()
def enqueue_action(self, user, technique, target=None):
""" Add some technique or status to the action queue
:param user:
:param technique:
:param target:
:returns: None
"""
self._action_queue.append(EnqueuedAction(user, technique, target))
def remove_monster_actions_from_queue(self, monster):
""" Remove all queued actions for a particular monster
This is used mainly for removing actions after monster is fainted
:type monster: core.components.monster.Monster
:returns: None
"""
to_remove = set()
for action in self._action_queue:
if action.user is monster or action.target is monster:
to_remove.add(action)
[self._action_queue.remove(action) for action in to_remove]
def suppress_phase_change(self, delay=3):
""" Prevent the combat phase from changing for a limited time
Use this function to prevent the phase from changing. When
animating elements of the phase, call this to prevent player
input as well as phase changes.
:param delay:
:return:
"""
if self._animation_in_progress:
logger.debug("double suppress: bug?")
else:
self._animation_in_progress = True
self.task(partial(setattr, self, "_animation_in_progress", False), delay)
def perform_action(self, user, technique, target=None):
""" Do something with the thing: animated
:param user:
:param technique: Not a dict: a Technique or Item
:param target:
:returns:
"""
technique.advance_round()
# This is the time, in seconds, that the animation takes to finish.
action_time = 3.0
result = technique.use(user, target)
try:
tools.load_sound(technique.sfx).play()
except AttributeError:
pass
# action is performed, so now use sprites to animate it
# this value will be None if the target is off screen
target_sprite = self._monster_sprite_map.get(target, None)
# slightly delay the monster shake, so technique animation
# is synchronized with the damage shake motion
hit_delay = 0
if user:
message = trans('combat_used_x', {"user": user.name, "name": technique.name})
# TODO: a real check or some params to test if should tackle, etc
if result["should_tackle"]:
hit_delay += .5
user_sprite = self._monster_sprite_map[user]
self.animate_sprite_tackle(user_sprite)
if target_sprite:
self.task(partial(self.animate_sprite_take_damage, target_sprite), hit_delay + .2)
self.task(partial(self.blink, target_sprite), hit_delay + .6)
# Track damage
self._damage_map[target].add(user)
else: # assume this was an item used
if result["name"] == "capture":
message += "\n" + trans('attempting_capture')
self.task(partial(self.animate_capture_monster, result["success"], result["num_shakes"], target))
action_time = result["num_shakes"] + 1.8
if result["success"]: # end combat right here
self.task(self.end_combat, action_time + 0.5) # Display 'Gotcha!' first.
self.task(partial(self.alert, trans('gotcha')), action_time)
self.alert(message)
self._animation_in_progress = True
return
if result["success"]:
message += "\n" + trans('item_success')
else:
message += "\n" + trans('item_failure')
self.alert(message)
self.suppress_phase_change(action_time)
else:
if result["success"]:
self.suppress_phase_change()
self.alert(trans('combat_status_damage', {"name": target.name, "status": technique.name}))
if result["success"] and target_sprite and hasattr(technique, "images"):
tech_sprite = self.get_technique_animation(technique)
tech_sprite.rect.center = target_sprite.rect.center
self.task(tech_sprite.image.play, hit_delay)
self.task(partial(self.sprites.add, tech_sprite, layer=50), hit_delay)
self.task(tech_sprite.kill, 3)
def faint_monster(self, monster):
""" Instantly make the monster faint (will be removed later)
:type monster: core.components.monster.Monster
:returns: None
"""
monster.current_hp = 0
monster.status = [faint]
"""
Experience is earned when the target monster is fainted.
Any monsters who contributed any amount of damage will be awarded
Experience is distributed evenly to all participants
"""
if monster in self._damage_map:
# Award Experience
awarded_exp = monster.total_experience / monster.level / len(self._damage_map[monster])
for winners in self._damage_map[monster]:
winners.give_experience(awarded_exp)
# Remove monster from damage map
del self._damage_map[monster]
def animate_party_status(self):
""" Animate monsters that need to be fainted
* Animation to remove monster is handled here
TODO: check for faint status, not HP
:returns: None
"""
for player in self.monsters_in_play.keys():
for monster in self.monsters_in_play[player]:
if fainted(monster):
self.alert(trans('combat_fainted', {"name": monster.name}))
self.animate_monster_faint(monster)
self.suppress_phase_change(3)
def check_party_hp(self):
""" Apply status effects, then check HP, and party status
* Monsters will be removed from play here
:returns: None
"""
for player in self.monsters_in_play.keys():
for monster in self.monsters_in_play[player]:
self.animate_hp(monster)
if monster.current_hp <= 0 and not fainted(monster):
self.remove_monster_actions_from_queue(monster)
self.faint_monster(monster)
def get_technique_animation(self, technique):
""" Return a sprite usable as a technique animation
TODO: move to some generic animation loading thingy
:type technique: core.components.technique.Technique
:rtype: core.components.sprite.Sprite
"""
try:
return self._technique_cache[technique]
except KeyError:
sprite = self.load_technique_animation(technique)
self._technique_cache[technique] = sprite
return sprite
@staticmethod
def load_technique_animation(technique):
"""
TODO: move to some generic animation loading thingy
:param technique:
:rtype: core.components.sprite.Sprite
"""
frame_time = .09
images = list()
for fn in technique.images:
image = tools.load_and_scale(fn)
images.append((image, frame_time))
tech = PygAnimation(images, False)
sprite = Sprite()
sprite.image = tech
sprite.rect = tech.get_rect()
return sprite
@property
def active_players(self):
""" Generator of any non-defeated players/trainers
:rtype: collections.Iterable[core.components.player.Player]
"""
for player in self.players:
if not defeated(player):
yield player
@property
def human_players(self):
for player in self.players:
if player.isplayer:
yield player
@property
def ai_players(self):
for player in set(self.active_players) - set(self.human_players):
yield player
@property
def active_monsters(self):
""" List of any non-defeated monsters on battlefield
:rtype: list
"""
return list(chain.from_iterable(self.monsters_in_play.values()))
def remove_player(self, player):
# TODO: non SP things
self.players.remove(player)
self.suppress_phase_change()
self.alert(trans('combat_player_run'))
def determine_winner(self):
""" Determine if match should continue or not
:return:
"""
if self._winner:
return
players = list(self.active_players)
if len(players) == 1:
self._winner = players[0]
def end_combat(self):
""" End the combat
"""
# TODO: End combat differently depending on winning or losing
# clear action queue
self._action_queue = list()
event_engine = self.game.event_engine
fadeout_action = namedtuple("action", ["type", "parameters"])
fadeout_action.type = "fadeout_music"
fadeout_action.parameters = [1000]
event_engine.actions["fadeout_music"]["method"](self.game, fadeout_action)
# remove any menus that may be on top of the combat state
while self.game.current_state is not self:
self.game.pop_state()
self.game.push_state("FadeOutTransition", caller=self)<|fim▁end|> | |
<|file_name|>Transformer.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright 2006 - 2012 Vienna University of Technology,
* Department of Software Technology and Interactive Systems, IFS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This work originates from the Planets project, co-funded by the European Union under the Sixth Framework Programme.
<|fim▁hole|>import java.io.Serializable;
import java.util.List;
import javax.persistence.CascadeType;
import javax.persistence.DiscriminatorColumn;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.Inheritance;
import javax.persistence.ManyToOne;
import eu.scape_project.planning.model.ChangeLog;
import eu.scape_project.planning.model.IChangesHandler;
import eu.scape_project.planning.model.ITouchable;
import eu.scape_project.planning.model.Values;
import eu.scape_project.planning.model.values.INumericValue;
import eu.scape_project.planning.model.values.IOrdinalValue;
import eu.scape_project.planning.model.values.TargetValues;
import eu.scape_project.planning.model.values.Value;
import eu.scape_project.planning.validation.ValidationError;
/**
* Implements basic transformation functionality, i.e. aggregation over {@link Values} and
* common properties of transformers.
* @author Hannes Kulovits
*/
@Entity
@Inheritance
@DiscriminatorColumn(name = "type")
public abstract class Transformer implements ITransformer, Serializable, ITouchable
{
private static final long serialVersionUID = -3708795251848706848L;
@Id
@GeneratedValue
protected int id;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
@ManyToOne(cascade=CascadeType.ALL)
private ChangeLog changeLog = new ChangeLog();
/**
* Transforms all the values in the list of the provided {@link Values}.
* According to the type of each {@link Value}, either
* {@link ITransformer#transform(INumericValue)} or {@link ITransformer#transform(IOrdinalValue)}
* is called.
* @param values List of values to be transformed
* @return {@link TargetValues}, which contains a list of all transformed values corresponding to the provided input
*/
public TargetValues transformValues(Values values) {
TargetValues result = new TargetValues();
for (Value v : values.getList()) {
if (v instanceof INumericValue) {
result.add(transform((INumericValue) v));
} else {
result.add(transform((IOrdinalValue) v));
}
}
return result;
}
public ChangeLog getChangeLog() {
return this.changeLog;
}
public void setChangeLog(ChangeLog value) {
changeLog = value;
}
public boolean isChanged() {
return changeLog.isAltered();
}
public void touch(String username) {
getChangeLog().touch(username);
}
public void touch() {
getChangeLog().touch();
}
/**
* @see ITouchable#handleChanges(IChangesHandler)
*/
public void handleChanges(IChangesHandler h){
h.visit(this);
}
/**
* If this Transformer is not correctly configured, this method adds
* an appropriate error-message to the given list and returns false.
*
* @return true if this transformer is correctly configured
*/
public abstract boolean isTransformable(List<ValidationError> errors);
public abstract Transformer clone();
}<|fim▁end|> | ******************************************************************************/
package eu.scape_project.planning.model.transform;
|
<|file_name|>MissionPlanner.java<|end_file_name|><|fim▁begin|>/*
* Kerbal Space App
*
* Copyright (C) 2014 Jim Pekarek (Amagi82)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.amagi82.kerbalspaceapp;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OptionalDataException;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Locale;
import android.animation.Animator;
import android.animation.AnimatorListenerAdapter;
import android.app.ActionBar;
import android.app.Activity;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.res.Configuration;
import android.os.Build;
import android.os.Bundle;
import android.os.Environment;
import android.os.Parcelable;
import android.support.v4.app.NavUtils;
import android.view.Menu;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewConfiguration;
import android.view.ViewTreeObserver;
import android.widget.ImageView;
import android.widget.ListView;
import android.widget.TextView;
public class MissionPlanner extends Activity {
StableArrayAdapter mAdapter;
ListView mListView;
BackgroundContainer mBackgroundContainer;
ArrayList<MissionData> missionData = new ArrayList<MissionData>();
HashMap<Long, Integer> mItemIdTopMap = new HashMap<Long, Integer>();
boolean mSwiping = false, mItemPressed = false;
int totalDeltaV;
TextView tvTotalDeltaV;
private static final int SWIPE_DURATION = 250;
private static final int MOVE_DURATION = 150;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_mission_planner);
ActionBar actionBar = getActionBar();
actionBar.setDisplayHomeAsUpEnabled(true);
actionBar.setDisplayShowTitleEnabled(true);
getActionBar().setTitle(R.string.title_activity_mission_planner);
if (savedInstanceState == null) {
// Load saved missionData if available.
try {
FileInputStream inStream = new FileInputStream(Environment.getExternalStorageDirectory() + File.separator + "MissionData");
ObjectInputStream objectInStream = new ObjectInputStream(inStream);
int count = objectInStream.readInt();
for (int i = 0; i < count; i++)
missionData.add((MissionData) objectInStream.readObject());
objectInStream.close();
} catch (OptionalDataException e) {
e.printStackTrace();
} catch (ClassNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
// if the list is empty, add the default planet
if (missionData.size() == 0) {
missionData = setFirstMissionData();
}
} else {
missionData = savedInstanceState.getParcelableArrayList("key");
}
mBackgroundContainer = (BackgroundContainer) findViewById(R.id.listViewBackground);
mListView = (ListView) findViewById(R.id.list);
tvTotalDeltaV = (TextView) findViewById(R.id.tvTotalDeltaV);
mAdapter = new StableArrayAdapter(this, missionData, mTouchListener);
// add the newDestination button as a footer below the listview
ImageView newDestination = new ImageView(this);
newDestination.setImageResource(R.drawable.ic_plus);
mListView.addFooterView(newDestination);
newDestination.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
int possibleIconState = 0; // Lets MissionDestination know which icons it's allowed to use
if (missionData.size() < 1) {
possibleIconState = 1;
}
Intent intent = new Intent(MissionPlanner.this, MissionDestination.class);
intent.putExtra("possibleIconState", possibleIconState);
intent.putExtra("isNewItem", true); // Places the result as a new item in the listview
startActivityForResult(intent, 0);
}
});
mListView.setAdapter(mAdapter);
}
@Override
protected void onSaveInstanceState(Bundle outState) {
outState.putParcelableArrayList("key", missionData);
super.onSaveInstanceState(outState);
}
@Override
public void onResume() {
super.onResume();
// This is in onResume so it refreshes deltaV when the user returns from adjusting settings
SharedPreferences prefs = getSharedPreferences("settings", MODE_PRIVATE);
int mClearanceValue = prefs.getInt("mClearanceValue", 1000);
int mMarginsValues = prefs.getInt("mMarginsValue", 10);
int mInclinationValues = prefs.getInt("mInclinationValue", 30);
float mMarginsValue = (float) mMarginsValues / 100 + 1;
float mInclinationValue = (float) mInclinationValues / 100;
// Update OrbitalMechanics with the new values
OrbitalMechanics.mClearanceValue = mClearanceValue;
OrbitalMechanics.mMarginsValue = mMarginsValue;
OrbitalMechanics.mInclinationValue = mInclinationValue;
refreshDeltaV();
}
// This method calculates the total delta V and displays it at the bottom
private void refreshDeltaV() {
totalDeltaV = 0;
for (int i = 0; i < missionData.size(); i++) {
int takeoffDeltaV = 0, transferDeltaV = 0, landingDeltaV = 0;
if (missionData.get(i).getIconStatus() == 3) {
takeoffDeltaV = OrbitalMechanics.getToOrbit(missionData.get(i).getPlanetId(), missionData.get(i).getTakeoffAltitude(),
missionData.get(i).getOrbitAltitude());
}
if (missionData.get(i).getLanding()) {
landingDeltaV = OrbitalMechanics.getLandingDeltaV(missionData.get(i).getPlanetId(),
missionData.get(i).getTakeoffAltitude(), missionData.get(i).getOrbitAltitude());
}
if (missionData.get(i).getToOrbit() && missionData.get(i).getLanding()) {
takeoffDeltaV = OrbitalMechanics.getToOrbit(missionData.get(i).getPlanetId(), missionData.get(i).getTakeoffAltitude(),
missionData.get(i).getOrbitAltitude());
}
if (i != 0) {
transferDeltaV = OrbitalMechanics.getTransferDeltaV(missionData.get(i - 1).getPlanetId(), missionData.get(i).getPlanetId(),
missionData.get(i - 1).getOrbitAltitude(), missionData.get(i).getOrbitAltitude());
}
totalDeltaV = totalDeltaV + takeoffDeltaV + transferDeltaV + landingDeltaV;
}
String value = NumberFormat.getNumberInstance(Locale.getDefault()).format(totalDeltaV);
tvTotalDeltaV.setText(value + " m/s");
}
// Save missionData on pause
@Override
public void onPause() {
super.onPause();
try {
File file = new File(Environment.getExternalStorageDirectory() + File.separator + "MissionData");
file.createNewFile();
FileOutputStream outStream = new FileOutputStream(file);
ObjectOutputStream objectOutStream = new ObjectOutputStream(outStream);
objectOutStream.writeInt(missionData.size());
for (MissionData r : missionData)
objectOutStream.writeObject(r);
objectOutStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
// Handles touch events to fade/move dragged items as they are swiped out
private final View.OnTouchListener mTouchListener = new View.OnTouchListener() {
float mDownX;
private int mSwipeSlop = -1;
@Override
public boolean onTouch(final View v, MotionEvent event) {
if (mSwipeSlop < 0) {
mSwipeSlop = ViewConfiguration.get(MissionPlanner.this).getScaledTouchSlop();
}
switch (event.getAction()) {
case MotionEvent.ACTION_DOWN:
if (mItemPressed) {
// Multi-item swipes not handled
return false;
}
mItemPressed = true;
mDownX = event.getX();
break;
case MotionEvent.ACTION_CANCEL:
v.setAlpha(1);
v.setTranslationX(0);
mItemPressed = false;
break;
case MotionEvent.ACTION_MOVE: {
float x = event.getX() + v.getTranslationX();
float deltaX = x - mDownX;
float deltaXAbs = Math.abs(deltaX);
if (!mSwiping) {
if (deltaXAbs > mSwipeSlop) {
mSwiping = true;
mListView.requestDisallowInterceptTouchEvent(true);
mBackgroundContainer.showBackground(v.getTop(), v.getHeight());
}
}
if (mSwiping) {
v.setTranslationX((x - mDownX));
v.setAlpha(1 - deltaXAbs / v.getWidth());
}
}
break;
case MotionEvent.ACTION_UP: {
// User let go - figure out whether to animate the view out, or back into place
if (mSwiping) {
float x = event.getX() + v.getTranslationX();
float deltaX = x - mDownX;
float deltaXAbs = Math.abs(deltaX);
float fractionCovered = 0;
float endX;
float endAlpha;
final boolean remove;
if (deltaXAbs > v.getWidth() / 4) {
// Greater than a quarter of the width - animate it out
fractionCovered = deltaXAbs / v.getWidth();
endX = deltaX < 0 ? -v.getWidth() : v.getWidth();
endAlpha = 0;
remove = true;
} else {
// Not far enough - animate it back
fractionCovered = 1 - (deltaXAbs / v.getWidth());
endX = 0;
endAlpha = 1;
remove = false;
}
// Animate position and alpha of swiped item
long duration = (int) ((1 - fractionCovered) * SWIPE_DURATION);
mListView.setEnabled(false);
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN) {
v.animate().setDuration(duration).alpha(endAlpha).translationX(endX).setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
// Restore animated values
v.setAlpha(1);
v.setTranslationX(0);
if (remove) {
animateRemoval(mListView, v);
} else {
mBackgroundContainer.hideBackground();
mSwiping = false;
mListView.setEnabled(true);
}
}
});
} else {
v.animate().setDuration(duration).alpha(endAlpha).translationX(endX).withEndAction(new Runnable() {
@Override
public void run() {
// Restore animated values
v.setAlpha(1);
v.setTranslationX(0);
if (remove) {
animateRemoval(mListView, v);
} else {
mBackgroundContainer.hideBackground();
mSwiping = false;
mListView.setEnabled(true);
}
}
});
}
mItemPressed = false;
break;
}
}
// Item was clicked - allow user to edit list item
mItemPressed = false;
int position = mListView.getPositionForView(v);
int possibleIconState = 0;
if (position == 0) {
possibleIconState = 1;
}
Intent intent = new Intent(MissionPlanner.this, MissionDestination.class);
intent.putExtra("listItem", (Parcelable) missionData.get(position));
intent.putExtra("possibleIconState", possibleIconState);
intent.putExtra("isNewItem", false);
startActivityForResult(intent, position);
break;
default:
return false;
}
return true;
}
};
/**
* This method animates all other views in the ListView container (not including ignoreView) into their final positions. It is called
* after ignoreView has been removed from the adapter, but before layout has been run. The approach here is to figure out where
* everything is now, then allow layout to run, then figure out where everything is after layout, and then to run animations between all
* of those start/end positions.
*/
private void animateRemoval(final ListView listview, View viewToRemove) {
int firstVisiblePosition = listview.getFirstVisiblePosition();
for (int i = 0; i < listview.getChildCount(); ++i) {
View child = listview.getChildAt(i);
if (child != viewToRemove) {
int position = firstVisiblePosition + i;<|fim▁hole|> // Delete the item from the adapter
int position = mListView.getPositionForView(viewToRemove);
mAdapter.remove(mAdapter.getItem(position));
mAdapter.notifyDataSetChanged();
refreshDeltaV();
final ViewTreeObserver observer = listview.getViewTreeObserver();
observer.addOnPreDrawListener(new ViewTreeObserver.OnPreDrawListener() {
@Override
public boolean onPreDraw() {
observer.removeOnPreDrawListener(this);
boolean firstAnimation = true;
int firstVisiblePosition = listview.getFirstVisiblePosition();
for (int i = 0; i < listview.getChildCount(); ++i) {
final View child = listview.getChildAt(i);
int position = firstVisiblePosition + i;
long itemId = mAdapter.getItemId(position);
Integer startTop = mItemIdTopMap.get(itemId);
int top = child.getTop();
if (startTop != null) {
if (startTop != top) {
int delta = startTop - top;
child.setTranslationY(delta);
child.animate().setDuration(MOVE_DURATION).translationY(0);
if (firstAnimation) {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN) {
child.animate().setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
mBackgroundContainer.hideBackground();
mSwiping = false;
mListView.setEnabled(true);
}
});
} else {
child.animate().withEndAction(new Runnable() {
@Override
public void run() {
mBackgroundContainer.hideBackground();
mSwiping = false;
mListView.setEnabled(true);
}
});
firstAnimation = false;
}
}
}
} else {
// Animate new views along with the others. The catch is that they did not
// exist in the start state, so we must calculate their starting position
// based on neighboring views.
int childHeight = child.getHeight() + listview.getDividerHeight();
startTop = top + (i > 0 ? childHeight : -childHeight);
int delta = startTop - top;
child.setTranslationY(delta);
child.animate().setDuration(MOVE_DURATION).translationY(0);
if (firstAnimation) {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN) {
child.animate().setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
mBackgroundContainer.hideBackground();
mSwiping = false;
mListView.setEnabled(true);
}
});
} else {
child.animate().withEndAction(new Runnable() {
@Override
public void run() {
mBackgroundContainer.hideBackground();
mSwiping = false;
mListView.setEnabled(true);
}
});
firstAnimation = false;
}
}
}
}
mItemIdTopMap.clear();
return true;
}
});
}
// Inflate the menu; this adds items to the action bar if it is present.
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.mission_planner, menu);
return true;
}
// Action bar functionality
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
NavUtils.navigateUpFromSameTask(this);
break;
case R.id.action_settings:
startActivity(new Intent(this, Settings.class));
break;
case R.id.action_delete:
missionData.clear();
setFirstMissionData();
mAdapter.notifyDataSetChanged();
refreshDeltaV();
break;
default:
break;
}
return super.onOptionsItemSelected(item);
}
@Override
public void onConfigurationChanged(Configuration config) {
super.onConfigurationChanged(config);
if (Settings.language == null) {
Settings.language = Locale.getDefault();
} else if (!config.locale.equals(Settings.language) && !Locale.getDefault().equals(Settings.language)) {
config.locale = Settings.language;
Locale.setDefault(config.locale);
getBaseContext().getResources().updateConfiguration(config, getResources().getDisplayMetrics());
recreate();
}
}
// Grab the parcel from MissionDestination, and either add a new listview item or update an existing one
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (resultCode == RESULT_OK && data != null) {
MissionData result = data.getParcelableExtra("returnItem");
if (data.getBooleanExtra("isNewItem", true)) {
missionData.add(result);
} else {
missionData.set(requestCode, result);
}
mAdapter.notifyDataSetChanged();
refreshDeltaV();
}
if (resultCode == RESULT_CANCELED) {
}
}
// Adds Kerbin as the default departure planet
private ArrayList<MissionData> setFirstMissionData() {
MissionData a = new MissionData(4, false, 0, true, 100000, 3);
missionData.add(a);
return missionData;
}
}<|fim▁end|> | long itemId = mAdapter.getItemId(position);
mItemIdTopMap.put(itemId, child.getTop());
}
} |
<|file_name|>bank.py<|end_file_name|><|fim▁begin|># Copyright (C) 2012 Alex Nitz, Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides classes that describe banks of waveforms
"""
import types
import logging
import os.path
import h5py
from copy import copy
import numpy as np
from ligo.lw import table, lsctables, utils as ligolw_utils
import pycbc.waveform
import pycbc.pnutils
import pycbc.waveform.compress
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries, zeros
import pycbc.io
from pycbc.io.ligolw import LIGOLWContentHandler
import hashlib
def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = \
pycbc.waveform.get_waveform_filter_norm(
self.approximant,
psd,
len(psd),
psd.delta_f,
self.min_f_lower
)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
curr_sigmasq = psd.sigmasq_vec[self.approximant]
kmin = int(self.f_lower / psd.delta_f)
self._sigmasq[key] = self.sigma_scale * \
(curr_sigmasq[self.end_idx-1] - curr_sigmasq[kmin])
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt[self.sslice])
return self._sigmasq[key]
# helper function for parsing approximant strings
def boolargs_from_apprxstr(approximant_strs):
"""Parses a list of strings specifying an approximant and where that
approximant should be used into a list that can be understood by
FieldArray.parse_boolargs.
Parameters
----------
apprxstr : (list of) string(s)
The strings to parse. Each string should be formatted `APPRX:COND`,
where `APPRX` is the approximant and `COND` is a string specifying
where it should be applied (see `FieldArgs.parse_boolargs` for examples
of conditional strings). The last string in the list may exclude a
conditional argument, which is the same as specifying ':else'.
Returns
-------
boolargs : list
A list of tuples giving the approximant and where to apply them. This
can be passed directly to `FieldArray.parse_boolargs`.
"""
if not isinstance(approximant_strs, list):
approximant_strs = [approximant_strs]
return [tuple(arg.split(':')) for arg in approximant_strs]
def add_approximant_arg(parser, default=None, help=None):
"""Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant.
"""
if help is None:
help=str("The approximant(s) to use. Multiple approximants to use "
"in different regions may be provided. If multiple "
"approximants are provided, every one but the last must be "
"be followed by a conditional statement defining where that "
"approximant should be used. Conditionals can be any boolean "
"test understood by numpy. For example, 'Apprx:(mtotal > 4) & "
"(mchirp <= 5)' would use approximant 'Apprx' where total mass "
"is > 4 and chirp mass is <= 5. "
"Conditionals are applied in order, with each successive one "
"only applied to regions not covered by previous arguments. "
"For example, `'TaylorF2:mtotal < 4' 'IMRPhenomD:mchirp < 3'` "
"would result in IMRPhenomD being used where chirp mass is < 3 "
"and total mass is >= 4. The last approximant given may use "
"'else' as the conditional or include no conditional. In either "
"case, this will cause the last approximant to be used in any "
"remaning regions after all the previous conditionals have been "
"applied. For the full list of possible parameters to apply "
"conditionals to, see WaveformArray.default_fields(). Math "
"operations may also be used on parameters; syntax is python, "
"with any operation recognized by numpy.")
parser.add_argument("--approximant", nargs='+', type=str, default=default,
metavar='APPRX[:COND]',
help=help)
def parse_approximant_arg(approximant_arg, warray):
"""Given an approximant arg (see add_approximant_arg) and a field
array, figures out what approximant to use for each template in the array.
Parameters
----------
approximant_arg : list
The approximant argument to parse. Should be the thing returned by
ArgumentParser when parsing the argument added by add_approximant_arg.
warray : FieldArray
The array to parse. Must be an instance of a FieldArray, or a class
that inherits from FieldArray.
Returns
-------
array
A numpy array listing the approximants to use for each element in
the warray.
"""
return warray.parse_boolargs(boolargs_from_apprxstr(approximant_arg))[0]
def tuple_to_hash(tuple_to_be_hashed):
"""
Return a hash for a numpy array, avoids native (unsafe) python3 hash function
Parameters
----------
tuple_to_be_hashed: tuple
The tuple which is being hashed
Must be convertible to a numpy array
Returns
-------
int
an integer representation of the hashed array
"""
h = hashlib.blake2b(np.array(tuple_to_be_hashed).tobytes('C'),
digest_size=8)
return np.fromstring(h.digest(), dtype=int)[0]
class TemplateBank(object):
"""Class to provide some basic helper functions and information
about elements of a template bank.
Parameters
----------
filename : string
The name of the file to load. Must end in '.xml[.gz]' or '.hdf'. If an
hdf file, it should have a 'parameters' in its `attrs` which gives a
list of the names of fields to load from the file. If no 'parameters'
are found, all of the top-level groups in the file will assumed to be
parameters (a warning will be printed to stdout in this case). If an
xml file, it must have a `SnglInspiral` table.
approximant : {None, (list of) string(s)}
Specify the approximant(s) for each template in the bank. If None
provided, will try to load the approximant from the file. The
approximant may either be a single string (in which case the same
approximant will be used for all templates) or a list of strings and
conditionals specifying where to use the approximant. See
`boolargs_from_apprxstr` for syntax.
parameters : {None, (list of) sting(s)}
Specify what parameters to load from the file. If None, all of the
parameters in the file (if an xml file, this is all of the columns in
the SnglInspiral table, if an hdf file, this is given by the
parameters attribute in the file). The list may include parameters that
are derived from the file's parameters, or functions thereof. For a
full list of possible parameters, see `WaveformArray.default_fields`.
If a derived parameter is specified, only the parameters needed to
compute that parameter will be loaded from the file. For example, if
`parameters='mchirp'`, then only `mass1, mass2` will be loaded from
the file. Note that derived parameters can only be used if the
needed parameters are in the file; e.g., you cannot use `chi_eff` if
`spin1z`, `spin2z`, `mass1`, and `mass2` are in the input file.
\**kwds :
Any additional keyword arguments are stored to the `extra_args`
attribute.
Attributes
----------
table : WaveformArray
An instance of a WaveformArray containing all of the information about
the parameters of the bank.
has_compressed_waveforms : {False, bool}
True if compressed waveforms are present in the the (hdf) file; False
otherwise.
parameters : tuple
The parameters loaded from the input file. Same as `table.fieldnames`.
indoc : {None, xmldoc}
If an xml file was provided, an in-memory representation of the xml.
Otherwise, None.
filehandler : {None, h5py.File}
If an hdf file was provided, the file handler pointing to the hdf file
(left open after initialization). Otherwise, None.
extra_args : {None, dict}
Any extra keyword arguments that were provided on initialization.
"""
def __init__(self, filename, approximant=None, parameters=None,
**kwds):
self.has_compressed_waveforms = False
ext = os.path.basename(filename)
if ext.endswith(('.xml', '.xml.gz', '.xmlgz')):
self.filehandler = None
self.indoc = ligolw_utils.load_filename(
filename, False, contenthandler=LIGOLWContentHandler)
self.table = table.get_table(
self.indoc, lsctables.SnglInspiralTable.tableName)
self.table = pycbc.io.WaveformArray.from_ligolw_table(self.table,
columns=parameters)
# inclination stored in xml alpha3 column
names = list(self.table.dtype.names)
names = tuple([n if n != 'alpha3' else 'inclination' for n in names])
# low frequency cutoff in xml alpha6 column
names = tuple([n if n!= 'alpha6' else 'f_lower' for n in names])
self.table.dtype.names = names
elif ext.endswith(('hdf', '.h5')):
self.indoc = None
f = h5py.File(filename, 'r')
self.filehandler = f
try:
fileparams = list(f.attrs['parameters'])
except KeyError:
# just assume all of the top-level groups are the parameters
fileparams = list(f.keys())
logging.info("WARNING: no parameters attribute found. "
"Assuming that %s " %(', '.join(fileparams)) +
"are the parameters.")
tmp_params = []
# At this point fileparams might be bytes. Fix if it is
for param in fileparams:
try:
param = param.decode()
tmp_params.append(param)
except AttributeError:
tmp_params.append(param)
fileparams = tmp_params
# use WaveformArray's syntax parser to figure out what fields
# need to be loaded
if parameters is None:
parameters = fileparams
common_fields = list(pycbc.io.WaveformArray(1,
names=parameters).fieldnames)
add_fields = list(set(parameters) &
(set(fileparams) - set(common_fields)))
# load
dtype = []
data = {}
for key in common_fields+add_fields:
data[key] = f[key][:]
dtype.append((key, data[key].dtype))
num = f[fileparams[0]].size
self.table = pycbc.io.WaveformArray(num, dtype=dtype)
for key in data:
self.table[key] = data[key]
# add the compressed waveforms, if they exist
self.has_compressed_waveforms = 'compressed_waveforms' in f
else:
raise ValueError("Unsupported template bank file extension %s" %(
ext))
# if approximant is specified, override whatever was in the file
# (if anything was in the file)
if approximant is not None:
# get the approximant for each template
dtype = h5py.string_dtype(encoding='utf-8')
apprxs = np.array(self.parse_approximant(approximant),
dtype=dtype)
if 'approximant' not in self.table.fieldnames:
self.table = self.table.add_fields(apprxs, 'approximant')
else:
self.table['approximant'] = apprxs
self.extra_args = kwds
self.ensure_hash()
@property
def parameters(self):
return self.table.fieldnames
def ensure_hash(self):
"""Ensure that there is a correctly populated template_hash.
Check for a correctly populated template_hash and create if it doesn't
already exist.
"""
fields = self.table.fieldnames
if 'template_hash' in fields:
return
# The fields to use in making a template hash
hash_fields = ['mass1', 'mass2', 'inclination',
'spin1x', 'spin1y', 'spin1z',
'spin2x', 'spin2y', 'spin2z',]
fields = [f for f in hash_fields if f in fields]
template_hash = np.array([tuple_to_hash(v) for v in zip(*[self.table[p]
for p in fields])])
if not np.unique(template_hash).size == template_hash.size:
raise RuntimeError("Some template hashes clash. This should not "
"happen.")
self.table = self.table.add_fields(template_hash, 'template_hash')
def write_to_hdf(self, filename, start_index=None, stop_index=None,
force=False, skip_fields=None,
write_compressed_waveforms=True):
"""Writes self to the given hdf file.
Parameters
----------
filename : str
The name of the file to write to. Must end in '.hdf'.
start_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
first template in the slice
stop_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
last template in the slice
force : {False, bool}
If the file already exists, it will be overwritten if True.
Otherwise, an OSError is raised if the file exists.
skip_fields : {None, (list of) strings}
Do not write the given fields to the hdf file. Default is None,
in which case all fields in self.table.fieldnames are written.
write_compressed_waveforms : {True, bool}
Write compressed waveforms to the output (hdf) file if this is
True, which is the default setting. If False, do not write the
compressed waveforms group, but only the template parameters to
the output file.
Returns
-------
h5py.File
The file handler to the output hdf file (left open).
"""
if not filename.endswith('.hdf'):
raise ValueError("Unrecoginized file extension")
if os.path.exists(filename) and not force:
raise IOError("File %s already exists" %(filename))
f = h5py.File(filename, 'w')
parameters = self.parameters
if skip_fields is not None:
if not isinstance(skip_fields, list):
skip_fields = [skip_fields]
parameters = [p for p in parameters if p not in skip_fields]
# save the parameters
f.attrs['parameters'] = parameters
write_tbl = self.table[start_index:stop_index]
for p in parameters:
f[p] = write_tbl[p]
if write_compressed_waveforms and self.has_compressed_waveforms:
for tmplt_hash in write_tbl.template_hash:
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
compressed_waveform.write_to_hdf(f, tmplt_hash)
return f
def end_frequency(self, index):
""" Return the end frequency of the waveform at the given index value
"""
if hasattr(self.table[index], 'f_final'):
return self.table[index].f_final
return pycbc.waveform.get_waveform_end_frequency(
self.table[index],
approximant=self.approximant(index),
**self.extra_args)
def parse_approximant(self, approximant):
"""Parses the given approximant argument, returning the approximant to
use for each template in self. This is done by calling
`parse_approximant_arg` using self's table as the array; see that
function for more details."""
return parse_approximant_arg(approximant, self.table)
def approximant(self, index):
""" Return the name of the approximant ot use at the given index
"""
if 'approximant' not in self.table.fieldnames:
raise ValueError("approximant not found in input file and no "
"approximant was specified on initialization")
apx = self.table["approximant"][index]
if hasattr(apx, 'decode'):
apx = apx.decode()
return apx
def __len__(self):
return len(self.table)
def template_thinning(self, inj_filter_rejector):
"""Remove templates from bank that are far from all injections."""
if not inj_filter_rejector.enabled or \
inj_filter_rejector.chirp_time_window is None:
# Do nothing!
return
injection_parameters = inj_filter_rejector.injection_params.table
fref = inj_filter_rejector.f_lower
threshold = inj_filter_rejector.chirp_time_window
m1= self.table['mass1']
m2= self.table['mass2']
tau0_temp, _ = pycbc.pnutils.mass1_mass2_to_tau0_tau3(m1, m2, fref)
indices = []
sort = tau0_temp.argsort()
tau0_temp = tau0_temp[sort]
for inj in injection_parameters:
tau0_inj, _ = \
pycbc.pnutils.mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
fref)
lid = np.searchsorted(tau0_temp, tau0_inj - threshold)
rid = np.searchsorted(tau0_temp, tau0_inj + threshold)
inj_indices = sort[lid:rid]
indices.append(inj_indices)
indices_combined = np.concatenate(indices)
indices_unique= np.unique(indices_combined)
self.table = self.table[indices_unique]
def ensure_standard_filter_columns(self, low_frequency_cutoff=None):
""" Initialize FilterBank common fields
Parameters
----------
low_frequency_cutoff: {float, None}, Optional
A low frequency cutoff which overrides any given within the
template bank file.
"""
# Make sure we have a template duration field
if not hasattr(self.table, 'template_duration'):
self.table = self.table.add_fields(np.zeros(len(self.table),
dtype=np.float32), 'template_duration')
# Make sure we have a f_lower field
if low_frequency_cutoff is not None:
if not hasattr(self.table, 'f_lower'):
vec = np.zeros(len(self.table), dtype=np.float32)
self.table = self.table.add_fields(vec, 'f_lower')
self.table['f_lower'][:] = low_frequency_cutoff
self.min_f_lower = min(self.table['f_lower'])
if self.f_lower is None and self.min_f_lower == 0.:
raise ValueError('Invalid low-frequency cutoff settings')
class LiveFilterBank(TemplateBank):
def __init__(self, filename, sample_rate, minimum_buffer,
approximant=None, increment=8, parameters=None,
low_frequency_cutoff=None,
**kwds):
self.increment = increment
self.filename = filename
self.sample_rate = sample_rate
self.minimum_buffer = minimum_buffer
self.f_lower = low_frequency_cutoff
super(LiveFilterBank, self).__init__(filename, approximant=approximant,
parameters=parameters, **kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
self.param_lookup = {}
for i, p in enumerate(self.table):
key = (p.mass1, p.mass2, p.spin1z, p.spin2z)
assert(key not in self.param_lookup) # Uh, oh, template confusion!
self.param_lookup[key] = i
def round_up(self, num):
"""Determine the length to use for this waveform by rounding.
Parameters
----------
num : int
Proposed size of waveform in seconds
Returns
-------
size: int
The rounded size to use for the waveform buffer in seconds. This
is calculaed using an internal `increment` attribute, which determines
the discreteness of the rounding.
"""
inc = self.increment
size = np.ceil(num / self.sample_rate / inc) * self.sample_rate * inc
return size
def getslice(self, sindex):
instance = copy(self)
instance.table = self.table[sindex]
return instance
def id_from_param(self, param_tuple):
"""Get the index of this template based on its param tuple
Parameters
----------
param_tuple : tuple
Tuple of the parameters which uniquely identify this template
Returns
--------
index : int
The ordered index that this template has in the template bank.
"""
return self.param_lookup[param_tuple]
def __getitem__(self, index):
if isinstance(index, slice):
return self.getslice(index)
return self.get_template(index)
def get_template(self, index, min_buffer=None):
approximant = self.approximant(index)
f_end = self.end_frequency(index)
flow = self.table[index].f_lower
# Determine the length of time of the filter, rounded up to
# nearest power of two
if min_buffer is None:
min_buffer = self.minimum_buffer
min_buffer += 0.5
from pycbc.waveform.waveform import props
p = props(self.table[index])
p.pop('approximant')
buff_size = pycbc.waveform.get_waveform_filter_length_in_time(approximant, **p)
tlen = self.round_up((buff_size + min_buffer) * self.sample_rate)
flen = int(tlen / 2 + 1)
delta_f = self.sample_rate / float(tlen)
if f_end is None or f_end >= (flen * delta_f):
f_end = (flen-1) * delta_f
logging.info("Generating %s, %ss, %i, starting from %s Hz",
approximant, 1.0/delta_f, index, flow)
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
htilde = pycbc.waveform.get_waveform_filter(
zeros(flen, dtype=np.complex64), self.table[index],
approximant=approximant, f_lower=flow, f_final=f_end,
delta_f=delta_f, delta_t=1.0/self.sample_rate, distance=distance,
**self.extra_args)
# If available, record the total duration (which may
# include ringdown) and the duration up to merger since they will be
# erased by the type conversion below.
ttotal = template_duration = -1
time_offset = None
if hasattr(htilde, 'length_in_time'):
ttotal = htilde.length_in_time
if hasattr(htilde, 'chirp_length'):
template_duration = htilde.chirp_length
if hasattr(htilde, 'time_offset'):
time_offset = htilde.time_offset
self.table[index].template_duration = template_duration
htilde = htilde.astype(np.complex64)
htilde.f_lower = flow
htilde.min_f_lower = self.min_f_lower
htilde.end_idx = int(f_end / htilde.delta_f)
htilde.params = self.table[index]
htilde.chirp_length = template_duration
htilde.length_in_time = ttotal
htilde.approximant = approximant
htilde.end_frequency = f_end
if time_offset:
htilde.time_offset = time_offset
# Add sigmasq as a method of this instance
htilde.sigmasq = types.MethodType(sigma_cached, htilde)
htilde.id = self.id_from_param((htilde.params.mass1,
htilde.params.mass2,
htilde.params.spin1z,
htilde.params.spin2z))
return htilde
class FilterBank(TemplateBank):
def __init__(self, filename, filter_length, delta_f, dtype,
out=None, max_template_length=None,
approximant=None, parameters=None,
enable_compressed_waveforms=True,
low_frequency_cutoff=None,
waveform_decompression_method=None,
**kwds):
self.out = out
self.dtype = dtype
self.f_lower = low_frequency_cutoff
self.filename = filename
self.delta_f = delta_f
self.N = (filter_length - 1 ) * 2
self.delta_t = 1.0 / (self.N * self.delta_f)
self.filter_length = filter_length
self.max_template_length = max_template_length
self.enable_compressed_waveforms = enable_compressed_waveforms
self.waveform_decompression_method = waveform_decompression_method
super(FilterBank, self).__init__(filename, approximant=approximant,
parameters=parameters, **kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
def get_decompressed_waveform(self, tempout, index, f_lower=None,
approximant=None, df=None):
"""Returns a frequency domain decompressed waveform for the template
in the bank corresponding to the index taken in as an argument. The
decompressed waveform is obtained by interpolating in frequency space,
the amplitude and phase points for the compressed template that are
read in from the bank."""
from pycbc.waveform.waveform import props
from pycbc.waveform import get_waveform_filter_length_in_time
# Get the template hash corresponding to the template index taken in as argument
tmplt_hash = self.table.template_hash[index]
# Read the compressed waveform from the bank file
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
# Get the interpolation method to be used to decompress the waveform
if self.waveform_decompression_method is not None :
decompression_method = self.waveform_decompression_method
else :
decompression_method = compressed_waveform.interpolation
logging.info("Decompressing waveform using %s", decompression_method)
if df is not None :
delta_f = df
else :
delta_f = self.delta_f
# Create memory space for writing the decompressed waveform
decomp_scratch = FrequencySeries(tempout[0:self.filter_length], delta_f=delta_f, copy=False)
# Get the decompressed waveform
hdecomp = compressed_waveform.decompress(out=decomp_scratch, f_lower=f_lower, interpolation=decompression_method)
p = props(self.table[index])
p.pop('approximant')
try:
tmpltdur = self.table[index].template_duration
except AttributeError:
tmpltdur = None
if tmpltdur is None or tmpltdur==0.0 :
tmpltdur = get_waveform_filter_length_in_time(approximant, **p)
hdecomp.chirp_length = tmpltdur
hdecomp.length_in_time = hdecomp.chirp_length
return hdecomp
def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f,
low_frequency_cutoff=None,
cached_mem=None):
"""Generate the template with index t_num using custom length."""
approximant = self.approximant(t_num)
# Don't want to use INTERP waveforms in here
if approximant.endswith('_INTERP'):
approximant = approximant.replace('_INTERP', '')
# Using SPAtmplt here is bad as the stored cbrt and logv get
# recalculated as we change delta_f values. Fall back to TaylorF2
# in lalsimulation.
if approximant == 'SPAtmplt':
approximant = 'TaylorF2'
if cached_mem is None:
wav_len = int(max_freq / delta_f) + 1
cached_mem = zeros(wav_len, dtype=np.complex64)
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(cached_mem, t_num,
f_lower=low_frequency_cutoff,
approximant=approximant,
df=delta_f)
else :
htilde = pycbc.waveform.get_waveform_filter(
cached_mem, self.table[t_num], approximant=approximant,
f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f,
distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq))
return htilde
def __getitem__(self, index):
# Make new memory for templates if we aren't given output memory
if self.out is None:
tempout = zeros(self.filter_length, dtype=self.dtype)
else:
tempout = self.out
approximant = self.approximant(index)
f_end = self.end_frequency(index)
if f_end is None or f_end >= (self.filter_length * self.delta_f):
f_end = (self.filter_length-1) * self.delta_f
# Find the start frequency, if variable
f_low = find_variable_start_frequency(approximant,
self.table[index],
self.f_lower,
self.max_template_length)
logging.info('%s: generating %s from %s Hz' % (index, approximant, f_low))
# Clear the storage memory
poke = tempout.data # pylint:disable=unused-variable
tempout.clear()
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(tempout, index, f_lower=f_low,
approximant=approximant, df=None)
else :
htilde = pycbc.waveform.get_waveform_filter(
tempout[0:self.filter_length], self.table[index],
approximant=approximant, f_lower=f_low, f_final=f_end,
delta_f=self.delta_f, delta_t=self.delta_t, distance=distance,
**self.extra_args)
# If available, record the total duration (which may
# include ringdown) and the duration up to merger since they will be
# erased by the type conversion below.
ttotal = template_duration = None
if hasattr(htilde, 'length_in_time'):
ttotal = htilde.length_in_time
if hasattr(htilde, 'chirp_length'):
template_duration = htilde.chirp_length
self.table[index].template_duration = template_duration
htilde = htilde.astype(self.dtype)
htilde.f_lower = f_low
htilde.min_f_lower = self.min_f_lower
htilde.end_idx = int(f_end / htilde.delta_f)
htilde.params = self.table[index]
htilde.chirp_length = template_duration
htilde.length_in_time = ttotal
htilde.approximant = approximant
htilde.end_frequency = f_end
# Add sigmasq as a method of this instance
htilde.sigmasq = types.MethodType(sigma_cached, htilde)
htilde._sigmasq = {}
return htilde
def find_variable_start_frequency(approximant, parameters, f_start, max_length,
delta_f = 1):
""" Find a frequency value above the starting frequency that results in a
waveform shorter than max_length.
"""
if (f_start is None):
f = parameters.f_lower
elif (max_length is not None):
l = max_length + 1
f = f_start - delta_f
while l > max_length:
f += delta_f
l = pycbc.waveform.get_waveform_filter_length_in_time(approximant,
parameters, f_lower=f)
else :
f = f_start
return f
class FilterBankSkyMax(TemplateBank):
def __init__(self, filename, filter_length, delta_f,
dtype, out_plus=None, out_cross=None,
max_template_length=None, parameters=None,
low_frequency_cutoff=None, **kwds):
self.out_plus = out_plus
self.out_cross = out_cross
self.dtype = dtype
self.f_lower = low_frequency_cutoff
self.filename = filename
self.delta_f = delta_f
self.N = (filter_length - 1 ) * 2
self.delta_t = 1.0 / (self.N * self.delta_f)
self.filter_length = filter_length
self.max_template_length = max_template_length
super(FilterBankSkyMax, self).__init__(filename, parameters=parameters,
**kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
def __getitem__(self, index):
# Make new memory for templates if we aren't given output memory
if self.out_plus is None:
tempoutplus = zeros(self.filter_length, dtype=self.dtype)
else:
tempoutplus = self.out_plus
if self.out_cross is None:
tempoutcross = zeros(self.filter_length, dtype=self.dtype)
else:
tempoutcross = self.out_cross
approximant = self.approximant(index)
# Get the end of the waveform if applicable (only for SPAtmplt atm)
f_end = self.end_frequency(index)
if f_end is None or f_end >= (self.filter_length * self.delta_f):
f_end = (self.filter_length-1) * self.delta_f
# Find the start frequency, if variable
f_low = find_variable_start_frequency(approximant,
self.table[index],
self.f_lower,
self.max_template_length)
logging.info('%s: generating %s from %s Hz', index, approximant, f_low)
# What does this do???
poke1 = tempoutplus.data # pylint:disable=unused-variable
poke2 = tempoutcross.data # pylint:disable=unused-variable
# Clear the storage memory
tempoutplus.clear()
tempoutcross.clear()
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
hplus, hcross = pycbc.waveform.get_two_pol_waveform_filter(<|fim▁hole|> tempoutcross[0:self.filter_length], self.table[index],
approximant=approximant, f_lower=f_low,
f_final=f_end, delta_f=self.delta_f, delta_t=self.delta_t,
distance=distance, **self.extra_args)
if hasattr(hplus, 'chirp_length') and hplus.chirp_length is not None:
self.table[index].template_duration = hplus.chirp_length
hplus = hplus.astype(self.dtype)
hcross = hcross.astype(self.dtype)
hplus.f_lower = f_low
hcross.f_lower = f_low
hplus.min_f_lower = self.min_f_lower
hcross.min_f_lower = self.min_f_lower
hplus.end_frequency = f_end
hcross.end_frequency = f_end
hplus.end_idx = int(hplus.end_frequency / hplus.delta_f)
hcross.end_idx = int(hplus.end_frequency / hplus.delta_f)
hplus.params = self.table[index]
hcross.params = self.table[index]
hplus.approximant = approximant
hcross.approximant = approximant
# Add sigmasq as a method of this instance
hplus.sigmasq = types.MethodType(sigma_cached, hplus)
hplus._sigmasq = {}
hcross.sigmasq = types.MethodType(sigma_cached, hcross)
hcross._sigmasq = {}
return hplus, hcross
__all__ = ('sigma_cached', 'boolargs_from_apprxstr', 'add_approximant_arg',
'parse_approximant_arg', 'tuple_to_hash', 'TemplateBank',
'LiveFilterBank', 'FilterBank', 'find_variable_start_frequency',
'FilterBankSkyMax')<|fim▁end|> | tempoutplus[0:self.filter_length], |
<|file_name|>programming_interface.hpp<|end_file_name|><|fim▁begin|>/*
* Nana GUI Programming Interface Implementation
* Nana C++ Library(http://www.nanapro.org)
* Copyright(C) 2003-2015 Jinhao([email protected])
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* @file: nana/gui/programming_interface.hpp
*/
#ifndef NANA_GUI_PROGRAMMING_INTERFACE_HPP
#define NANA_GUI_PROGRAMMING_INTERFACE_HPP
#include <nana/config.hpp>
#include "detail/bedrock.hpp"
#include "effects.hpp"
#include "detail/general_events.hpp"
#include <nana/paint/image.hpp>
#include <memory>
namespace nana
{
class drawer_trigger;
class widget;
namespace dev
{
/// Traits for widget classes
template<typename Widget>
struct widget_traits
{
using event_type = ::nana::general_events;
using scheme_type = ::nana::widget_colors;
};
}
namespace API
{
void effects_edge_nimbus(window, effects::edge_nimbus);
effects::edge_nimbus effects_edge_nimbus(window);
void effects_bground(window, const effects::bground_factory_interface&, double fade_rate);
bground_mode effects_bground_mode(window);
void effects_bground_remove(window);
//namespace dev
//@brief: The interfaces defined in namespace dev are used for developing the nana.gui
namespace dev
{
bool set_events(window, const std::shared_ptr<general_events>&);
template<typename Scheme>
std::unique_ptr<Scheme> make_scheme()
{
return std::unique_ptr<Scheme>(
static_cast<Scheme*>(::nana::detail::bedrock::instance().make_scheme(::nana::detail::scheme_factory<Scheme>()).release()));
}
void set_scheme(window, widget_colors*);
widget_colors* get_scheme(window);
void attach_drawer(widget&, drawer_trigger&);
nana::string window_caption(window) throw();
void window_caption(window, nana::string);
window create_window(window, bool nested, const rectangle&, const appearance&, widget* attached);
window create_widget(window, const rectangle&, widget* attached);
window create_lite_widget(window, const rectangle&, widget* attached);
window create_frame(window, const rectangle&, widget* attached);
paint::graphics* window_graphics(window);
void delay_restore(bool);
}//end namespace dev
namespace detail
{
general_events* get_general_events(window);
}//end namespace detail
void exit();
nana::string transform_shortkey_text(nana::string text, nana::string::value_type &shortkey, nana::string::size_type *skpos);
bool register_shortkey(window, unsigned long);
void unregister_shortkey(window);
nana::point cursor_position();
rectangle make_center(unsigned width, unsigned height); ///< Retrieves a rectangle which is in the center of the screen.
rectangle make_center(window, unsigned width, unsigned height); ///< Retrieves a rectangle which is in the center of the window
template<typename Widget=::nana::widget, typename EnumFunction>
void enum_widgets(window wd, bool recursive, EnumFunction && ef)
{<|fim▁hole|> internal_scope_guard lock;
auto children = brock.wd_manager.get_children(reinterpret_cast<core_window_t*>(wd));
for (auto child : children)
{
auto wgt = dynamic_cast<Widget*>(brock.wd_manager.get_widget(child));
if (nullptr == wgt)
continue;
ef(*wgt);
if (recursive)
enum_widgets<Widget>(wd, recursive, std::forward<EnumFunction>(ef));
}
}
void window_icon_default(const paint::image& small_icon, const paint::image& big_icon = {});
void window_icon(window, const paint::image& small_icon, const paint::image& big_icon = {});
bool empty_window(window); ///< Determines whether a window is existing.
bool is_window(window); ///< Determines whether a window is existing, equal to !empty_window.
bool is_destroying(window); ///< Determines whether a window is destroying
void enable_dropfiles(window, bool);
/// \brief Retrieves the native window of a Nana.GUI window.
///
/// The native window type is platform-dependent. Under Microsoft Windows, a conversion can be employed between
/// nana::native_window_type and HWND through reinterpret_cast operator. Under X System, a conversion can
/// be employed between nana::native_window_type and Window through reinterpret_cast operator.
/// \return If the function succeeds, the return value is the native window handle to the Nana.GUI window. If fails return zero.
native_window_type root(window);
window root(native_window_type); ///< Retrieves the native window of a Nana.GUI window.
void fullscreen(window, bool);
bool enabled_double_click(window, bool);
bool insert_frame(window frame, native_window_type);
native_window_type frame_container(window frame);
native_window_type frame_element(window frame, unsigned index);
void close_window(window);
void show_window(window, bool show); ///< Sets a window visible state.
void restore_window(window);
void zoom_window(window, bool ask_for_max);
bool visible(window);
window get_parent_window(window);
window get_owner_window(window);
bool set_parent_window(window, window new_parent);
template<typename Widget=::nana::widget>
typename ::nana::dev::widget_traits<Widget>::event_type & events(window wd)
{
using event_type = typename ::nana::dev::widget_traits<Widget>::event_type;
internal_scope_guard lock;
auto * general_evt = detail::get_general_events(wd);
if (nullptr == general_evt)
throw std::invalid_argument("API::events(): bad parameter window handle, no events object or invalid window handle.");
if (std::is_same<::nana::general_events, event_type>::value)
return *static_cast<event_type*>(general_evt);
auto * widget_evt = dynamic_cast<event_type*>(general_evt);
if (nullptr == widget_evt)
throw std::invalid_argument("API::events(): bad template parameter Widget, the widget type and window handle do not match.");
return *widget_evt;
}
template<typename EventArg, typename std::enable_if<std::is_base_of< ::nana::event_arg, EventArg>::value>::type* = nullptr>
bool emit_event(event_code evt_code, window wd, const EventArg& arg)
{
auto & brock = ::nana::detail::bedrock::instance();
return brock.emit(evt_code, reinterpret_cast< ::nana::detail::bedrock::core_window_t*>(wd), arg, true, brock.get_thread_context());
}
void umake_event(event_handle);
template<typename Widget = ::nana::widget>
typename ::nana::dev::widget_traits<Widget>::scheme_type & scheme(window wd)
{
using scheme_type = typename ::nana::dev::widget_traits<Widget>::scheme_type;
internal_scope_guard lock;
auto * wdg_colors = dev::get_scheme(wd);
if (nullptr == wdg_colors)
throw std::invalid_argument("API::scheme(): bad parameter window handle, no events object or invalid window handle.");
if (std::is_same<::nana::widget_colors, scheme_type>::value)
return *static_cast<scheme_type*>(wdg_colors);
auto * comp_wdg_colors = dynamic_cast<scheme_type*>(wdg_colors);
if (nullptr == comp_wdg_colors)
throw std::invalid_argument("API::scheme(): bad template parameter Widget, the widget type and window handle do not match.");
return *comp_wdg_colors;
}
point window_position(window);
void move_window(window, int x, int y);
void move_window(window wd, const rectangle&);
void bring_top(window, bool activated);
bool set_window_z_order(window wd, window wd_after, z_order_action action_if_no_wd_after);
void draw_through(window, std::function<void()>);
void map_through_widgets(window, native_drawable_type);
size window_size(window);
void window_size(window, const size&);
size window_outline_size(window);
void window_outline_size(window, const size&);
bool get_window_rectangle(window, rectangle&);
bool track_window_size(window, const size&, bool true_for_max); ///< Sets the minimum or maximum tracking size of a window.
void window_enabled(window, bool);
bool window_enabled(window);
/** @brief A widget drawer draws the widget surface in answering an event.
*
* This function will tell the drawer to copy the graphics into window after event answering.
* Tells Nana.GUI to copy the buffer of event window to screen after the event is processed.
* This function only works for a drawer_trigger, when a drawer_trigger receives an event,
* after drawing, a drawer_trigger should call lazy_refresh to tell the Nana.GUI to refresh
* the window to the screen after the event process finished.
*/
void lazy_refresh();
/** @brief: calls refresh() of a widget's drawer. if currently state is lazy_refresh, Nana.GUI may paste the drawing on the window after an event processing.
* @param window: specify a window to be refreshed.
*/
void refresh_window(window); ///< Refreshs the window and display it immediately calling the refresh method of its drawer_trigger..
void refresh_window_tree(window); ///< Refreshs the specified window and all its children windows, then display it immediately
void update_window(window); ///< Copies the off-screen buffer to the screen for immediate display.
void window_caption(window, const std::string& title_utf8);
void window_caption(window, const nana::string& title);
nana::string window_caption(window);
void window_cursor(window, cursor);
cursor window_cursor(window);
void activate_window(window);
bool is_focus_ready(window);
window focus_window();
void focus_window(window);
window capture_window();
window capture_window(window, bool); ///< Enables or disables the window to grab the mouse input
void capture_ignore_children(bool ignore); ///< Enables or disables the captured window whether redirects the mouse input to its children if the mouse is over its children.
void modal_window(window); ///< Blocks the routine til the specified window is closed.
void wait_for(window);
color fgcolor(window);
color fgcolor(window, const color&);
color bgcolor(window);
color bgcolor(window, const color&);
color activated_color(window);
color activated_color(window, const color&);
void create_caret(window, unsigned width, unsigned height);
void destroy_caret(window);
void caret_effective_range(window, const rectangle&);
void caret_pos(window, const ::nana::point&);
nana::point caret_pos(window);
nana::size caret_size(window);
void caret_size(window, const size&);
void caret_visible(window, bool is_show);
bool caret_visible(window);
void tabstop(window); ///< Sets the window that owns the tabstop.
/// treu: The focus is not to be changed when Tab key is pressed, and a key_char event with tab will be generated.
void eat_tabstop(window, bool);
window move_tabstop(window, bool next); ///< Sets the focus to the window which tabstop is near to the specified window.
bool glass_window(window); /// \deprecated
bool glass_window(window, bool); /// \deprecated
/// Sets the window active state. If a window active state is false, the window will not obtain the focus when a mouse clicks on it wich will be obteined by take_if_has_active_false.
void take_active(window, bool has_active, window take_if_has_active_false);
bool window_graphics(window, nana::paint::graphics&);
bool root_graphics(window, nana::paint::graphics&);
bool get_visual_rectangle(window, nana::rectangle&);
void typeface(window, const nana::paint::font&);
paint::font typeface(window);
bool calc_screen_point(window, point&); ///<Converts window coordinates to screen coordinates
bool calc_window_point(window, point&); ///<Converts screen coordinates to window coordinates.
window find_window(const nana::point& mspos);
void register_menu_window(window, bool has_keyboard);
bool attach_menubar(window menubar);
void detach_menubar(window menubar);
bool is_window_zoomed(window, bool ask_for_max); ///<Tests a window whether it is maximized or minimized.
void widget_borderless(window, bool); ///<Enables or disables a borderless widget.
bool widget_borderless(window); ///<Tests a widget whether it is borderless.
nana::mouse_action mouse_action(window);
nana::element_state element_state(window);
bool ignore_mouse_focus(window, bool ignore); ///< Enables/disables the mouse focus, it returns the previous state
bool ignore_mouse_focus(window); ///< Determines whether the mouse focus is enabled
}//end namespace API
}//end namespace nana
#endif<|fim▁end|> | static_assert(std::is_convertible<Widget, ::nana::widget>::value, "enum_widgets<Widget>: The specified Widget is not a widget type.");
typedef ::nana::detail::basic_window core_window_t;
auto & brock = ::nana::detail::bedrock::instance(); |
<|file_name|>test_CVE_2017_17724.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import system_tests
class TestFuzzedPoC(metaclass=system_tests.CaseMeta):
url = [
"https://github.com/Exiv2/exiv2/issues/210",
"https://github.com/Exiv2/exiv2/issues/209"
]
filename = system_tests.path("$data_path/2018-01-09-exiv2-crash-002.tiff")
commands = [
"$exiv2 -pR $filename",<|fim▁hole|> ]
retval = [1, 1, 0]
compare_stderr = system_tests.check_no_ASAN_UBSAN_errors
def compare_stdout(self, i, command, got_stdout, expected_stdout):
""" We don't care about the stdout, just don't crash """
pass<|fim▁end|> | "$exiv2 -pS $filename",
"$exiv2 $filename" |
<|file_name|>application_gateway_http_listener_py3.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayHttpListener(SubResource):
"""Http listener of an application gateway.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: Frontend IP configuration resource of an
application gateway.
:type frontend_ip_configuration:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param frontend_port: Frontend port resource of an application gateway.
:type frontend_port: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayProtocol
:param host_name: Host name of HTTP listener.
:type host_name: str
:param ssl_certificate: SSL certificate resource of an application
gateway.
:type ssl_certificate: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param require_server_name_indication: Applicable only if protocol is
https. Enables SNI for multi-hosting.
:type require_server_name_indication: bool
:param provisioning_state: Provisioning state of the HTTP listener
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'},
'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'},<|fim▁hole|> 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, frontend_port=None, protocol=None, host_name: str=None, ssl_certificate=None, require_server_name_indication: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayHttpListener, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.frontend_port = frontend_port
self.protocol = protocol
self.host_name = host_name
self.ssl_certificate = ssl_certificate
self.require_server_name_indication = require_server_name_indication
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type<|fim▁end|> | |
<|file_name|>Utils.java<|end_file_name|><|fim▁begin|>package com.kromracing.runningroute.client;
import com.google.gwt.dom.client.Element;
import com.google.gwt.user.client.DOM;
import com.google.gwt.user.client.ui.CheckBox;
import com.google.gwt.user.client.ui.Widget;
final public class Utils {
private Utils() {
}
/**
* Sets the HTML id for a widget.
* @param widget The widget to have the id set, ex: TextBox
* @param id ID in HTML, ex: textbox-location
*/
static void setId(final Widget widget, final String id) {
if (widget instanceof CheckBox) {
final Element checkBoxElement = widget.getElement();
// The first element is the actual box to check. That is the one we care about.
final Element inputElement = DOM.getChild(checkBoxElement, 0);
inputElement.setAttribute("id", id);
//DOM.setElementAttribute(inputElement, "id", id); deprecated!
}
else {
widget.getElement().setAttribute("id", id);
//DOM.setElementAttribute(widget.getElement(), "id", id); deprecated!<|fim▁hole|>}<|fim▁end|> | }
} |
<|file_name|>exchange_errors.py<|end_file_name|><|fim▁begin|>import sys
import traceback
from catalyst.errors import ZiplineError
def silent_except_hook(exctype, excvalue, exctraceback):
if exctype in [PricingDataBeforeTradingError, PricingDataNotLoadedError,
SymbolNotFoundOnExchange, NoDataAvailableOnExchange,
ExchangeAuthEmpty]:
fn = traceback.extract_tb(exctraceback)[-1][0]
ln = traceback.extract_tb(exctraceback)[-1][1]
print("Error traceback: {1} (line {2})\n"
"{0.__name__}: {3}".format(exctype, fn, ln, excvalue))
else:
sys.__excepthook__(exctype, excvalue, exctraceback)
sys.excepthook = silent_except_hook
class ExchangeRequestError(ZiplineError):
msg = (
'Request failed: {error}'
).strip()
class ExchangeRequestErrorTooManyAttempts(ZiplineError):
msg = (
'Request failed: {error}, giving up after {attempts} attempts'
).strip()
class ExchangeBarDataError(ZiplineError):
msg = (
'Unable to retrieve bar data: {data_type}, ' +
'giving up after {attempts} attempts: {error}'
).strip()
class ExchangePortfolioDataError(ZiplineError):
msg = (
'Unable to retrieve portfolio data: {data_type}, ' +
'giving up after {attempts} attempts: {error}'
).strip()
class ExchangeTransactionError(ZiplineError):
msg = (
'Unable to execute transaction: {transaction_type}, ' +
'giving up after {attempts} attempts: {error}'
).strip()
class ExchangeNotFoundError(ZiplineError):
msg = (
'Exchange {exchange_name} not found. Please specify exchanges '
'supported by Catalyst and verify spelling for accuracy.'
).strip()
class ExchangeAuthNotFound(ZiplineError):
msg = (
'Please create an auth.json file containing the api token and key for '
'exchange {exchange}. Place the file here: {filename}'
).strip()
class ExchangeAuthEmpty(ZiplineError):
msg = (
'Please enter your API token key and secret for exchange {exchange} '
'in the following file: {filename}'
).strip()
class RemoteAuthEmpty(ZiplineError):
msg = (
'Please enter your API token key and secret for the remote server '
'in the following file: {filename}'
).strip()
class ExchangeSymbolsNotFound(ZiplineError):
msg = (
'Unable to download or find a local copy of symbols.json for exchange '
'{exchange}. The file should be here: {filename}'
).strip()
class AlgoPickleNotFound(ZiplineError):
msg = (
'Pickle not found for algo {algo} in path {filename}'
).strip()
class InvalidHistoryFrequencyAlias(ZiplineError):
msg = (
'Invalid frequency alias {freq}. Valid suffixes are M (minute) '
'and D (day). For example, these aliases would be valid '
'1M, 5M, 1D.'
).strip()
class InvalidHistoryFrequencyError(ZiplineError):
msg = (
'Frequency {frequency} not supported by the exchange.'
).strip()
class UnsupportedHistoryFrequencyError(ZiplineError):
msg = (
'{exchange} does not support candle frequency {freq}, please choose '
'from: {freqs}.'
).strip()
class InvalidHistoryTimeframeError(ZiplineError):
msg = (
'CCXT timeframe {timeframe} not supported by the exchange.'
).strip()
class MismatchingFrequencyError(ZiplineError):
msg = (
'Bar aggregate frequency {frequency} not compatible with '
'data frequency {data_frequency}.'
).strip()
class InvalidSymbolError(ZiplineError):
msg = (
'Invalid trading pair symbol: {symbol}. '
'Catalyst symbols must follow this convention: '
'[Base Currency]_[Quote Currency]. For example: eth_usd, btc_usd, '
'neo_eth, ubq_btc. Error details: {error}'
).strip()
class InvalidOrderStyle(ZiplineError):
msg = (
'Order style {style} not supported by exchange {exchange}.'
).strip()
class CreateOrderError(ZiplineError):
msg = (
'Unable to create order on exchange {exchange} {error}.'
).strip()
class OrderNotFound(ZiplineError):
msg = (
'Order {order_id} not found on exchange {exchange}.'
).strip()
class OrphanOrderError(ZiplineError):
msg = (
'Order {order_id} found in exchange {exchange} but not tracked by '
'the algorithm.'
).strip()
class OrphanOrderReverseError(ZiplineError):
msg = (
'Order {order_id} tracked by algorithm, but not found in exchange '
'{exchange}.'
).strip()
class OrderCancelError(ZiplineError):
msg = (
'Unable to cancel order {order_id} on exchange {exchange} {error}.'
).strip()
class SidHashError(ZiplineError):
msg = (
'Unable to hash sid from symbol {symbol}.'
).strip()
class QuoteCurrencyNotFoundError(ZiplineError):
msg = (
'Algorithm quote currency {quote_currency} not found in account '
'balances on {exchange}: {balances}'
).strip()
class MismatchingQuoteCurrencies(ZiplineError):
msg = (
'Unable to trade with quote currency {quote_currency} when the '
'algorithm uses {algo_currency}.'
).strip()
class MismatchingQuoteCurrenciesExchanges(ZiplineError):
msg = (
'Unable to trade with quote currency {quote_currency} when the '
'exchange {exchange_name} users {exchange_currency}.'
).strip()
class SymbolNotFoundOnExchange(ZiplineError):
"""
Raised when a symbol() call contains a non-existent symbol.
"""
msg = ('Symbol {symbol} not found on exchange {exchange}. '
'Choose from: {supported_symbols}').strip()
class BundleNotFoundError(ZiplineError):
msg = ('Unable to find bundle data for exchange {exchange} and '
'data frequency {data_frequency}.'
'Please ingest some price data.'
'See `catalyst ingest-exchange --help` for details.').strip()
class TempBundleNotFoundError(ZiplineError):
msg = ('Temporary bundle not found in: {path}.').strip()
class EmptyValuesInBundleError(ZiplineError):
msg = ('{name} with end minute {end_minute} has empty rows '
'in ranges: {dates}').strip()
class PricingDataBeforeTradingError(ZiplineError):
msg = ('Pricing data for trading pairs {symbols} on exchange {exchange} '
'starts on {first_trading_day}, but you are either trying to trade '
'or retrieve pricing data on {dt}. Adjust your dates accordingly.'
).strip()
<|fim▁hole|> '\nPlease run: `catalyst ingest-exchange -x {exchange} -f '
'{data_frequency} -i {symbol_list}`. See catalyst documentation '
'for details.').strip()
class PricingDataValueError(ZiplineError):
msg = ('Unable to retrieve pricing data for {exchange} {symbol} '
'[{start_dt} - {end_dt}]: {error}').strip()
class DataCorruptionError(ZiplineError):
msg = (
'Unable to validate data for {exchange} {symbols} in date range '
'[{start_dt} - {end_dt}]. The data is either corrupted or '
'unavailable. Please try deleting this bundle:'
'\n`catalyst clean-exchange -x {exchange}\n'
'Then, ingest the data again. Please contact the Catalyst team if '
'the issue persists.'
).strip()
class ApiCandlesError(ZiplineError):
msg = (
'Unable to fetch candles from the remote API: {error}.'
).strip()
class NoDataAvailableOnExchange(ZiplineError):
msg = (
'Requested data for trading pair {symbol} is not available on '
'exchange {exchange} '
'in `{data_frequency}` frequency at this time. '
'Check `http://enigma.co/catalyst/status` for market coverage.'
).strip()
class NoValueForField(ZiplineError):
msg = (
'Value not found for field: {field}.'
).strip()
class OrderTypeNotSupported(ZiplineError):
msg = (
'Order type `{order_type}` currently not supported by Catalyst. '
'Please use `limit` or `market` orders only.'
).strip()
class NotEnoughCapitalError(ZiplineError):
msg = (
'Not enough capital on exchange {exchange} for trading. Each '
'exchange should contain at least as much {quote_currency} '
'as the specified `capital_base`. The current balance {balance} is '
'lower than the `capital_base`: {capital_base}'
).strip()
class NotEnoughCashError(ZiplineError):
msg = (
'Total {currency} amount on {exchange} is lower than the cash '
'reserved for this algo: {total} < {cash}. While trades can be made '
'on the exchange accounts outside of the algo, exchange must have '
'enough free {currency} to cover the algo cash.'
).strip()
class LastCandleTooEarlyError(ZiplineError):
msg = (
'The trade date of the last candle {last_traded} is before the '
'specified end date minus one candle {end_dt}. Please verify how '
'{exchange} calculates the start date of OHLCV candles.'
).strip()
class TickerNotFoundError(ZiplineError):
msg = (
'Unable to fetch ticker for {symbol} on {exchange}.'
).strip()
class BalanceNotFoundError(ZiplineError):
msg = (
'{currency} not found in account balance on {exchange}: {balances}.'
).strip()
class BalanceTooLowError(ZiplineError):
msg = (
'Balance for {currency} on {exchange} too low: {free} < {amount}. '
'Positions have likely been sold outside of this algorithm. Please '
'add positions to hold a free amount greater than {amount}, or clean '
'the state of this algo and restart.'
).strip()
class NoCandlesReceivedFromExchange(ZiplineError):
msg = (
'Although requesting {bar_count} candles until {end_dt} of '
'asset {asset}, an empty list of candles was received for {exchange}.'
).strip()<|fim▁end|> |
class PricingDataNotLoadedError(ZiplineError):
msg = ('Missing data for {exchange} {symbols} in date range '
'[{start_dt} - {end_dt}]' |
<|file_name|>jquery.bxslider.js<|end_file_name|><|fim▁begin|>/**
* BxSlider v4.1.2 - Fully loaded, responsive content slider
* http://bxslider.com
*
* Copyright 2014, Steven Wanderski - http://stevenwanderski.com - http://bxcreative.com
* Written while drinking Belgian ales and listening to jazz
*
* Released under the MIT license - http://opensource.org/licenses/MIT
*/
;(function($){
var plugin = {};
var defaults = {
// GENERAL
mode: 'horizontal',
slideSelector: '',
infiniteLoop: true,
hideControlOnEnd: false,
speed: 1000,
easing: 'linear',
slideMargin: 0,
startSlide: 0,
randomStart: false,
captions: false,
ticker: false,
tickerHover: false,
adaptiveHeight: false,
adaptiveHeightSpeed: 500,
video: false,
useCSS: false,
preloadImages: 'visible',
responsive: true,
slideZIndex: 50,
wrapperClass: 'bx-wrapper',
// TOUCH
touchEnabled: true,
swipeThreshold: 50,
oneToOneTouch: true,
preventDefaultSwipeX: true,
preventDefaultSwipeY: false,
// PAGER
pager: false,
pagerType: 'full',
pagerShortSeparator: ' / ',
pagerSelector: null,
buildPager: null,
pagerCustom: null,
// CONTROLS
controls: true,
nextText: 'Next',
prevText: 'Prev',
nextSelector: null,
prevSelector: null,
autoControls: false,
startText: 'Start',
stopText: 'Stop',
autoControlsCombine: false,
autoControlsSelector: null,
// AUTO
auto: true,
pause: 4000,
autoStart: true,
autoDirection: 'next',
autoHover: true,
autoDelay: 0,
autoSlideForOnePage: false,
// CAROUSEL
minSlides: 1,
maxSlides: 1,
moveSlides: 0,
slideWidth: 0,
widthType: 'px',
// CALLBACKS
onSliderLoad: function() {},
onSlideBefore: function() {},
onSlideAfter: function() {},
onSlideNext: function() {},
onSlidePrev: function() {},
onSliderResize: function() {}
}
$.fn.bxSlider = function(options){
if(this.length == 0) return this;
// support mutltiple elements
if(this.length > 1){
this.each(function(){$(this).bxSlider(options)});
return this;
}
// create a namespace to be used throughout the plugin
var slider = {};
// set a reference to our slider element
var el = this;
plugin.el = this;
/**
* Makes slideshow responsive
*/
// first get the original window dimens (thanks alot IE)
var windowWidth = $(window).width();
var windowHeight = $(window).height();
/**
* ===================================================================================
* = PRIVATE FUNCTIONS
* ===================================================================================
*/
/**
* Initializes namespace settings to be used throughout plugin
*/
var init = function(){
// merge user-supplied options with the defaults
slider.settings = $.extend({}, defaults, options);
// parse slideWidth setting
slider.settings.slideWidth = parseInt(slider.settings.slideWidth);
slider.settings.pagerType = parseInt(slider.settings.navigation);
slider.settings.slideMargin = parseInt(slider.settings.slideMargin);
slider.settings.oneToOneTouch = slider.settings.oneToOne;
// store the original children
slider.children = el.children(slider.settings.slideSelector);
// check if actual number of slides is less than minSlides / maxSlides
if(slider.children.length < slider.settings.minSlides) slider.settings.minSlides = slider.children.length;
if(slider.children.length < slider.settings.maxSlides) slider.settings.maxSlides = slider.children.length;
// if random start, set the startSlide setting to random number
if(slider.settings.randomStart) slider.settings.startSlide = Math.floor(Math.random() * slider.children.length);
// store active slide information
slider.active = { index: slider.settings.startSlide }
// store if the slider is in carousel mode (displaying / moving multiple slides)
slider.carousel = slider.settings.minSlides > 1 || slider.settings.maxSlides > 1;
// if carousel, force preloadImages = 'all'
if(slider.carousel) slider.settings.preloadImages = 'all';
// calculate the min / max width thresholds based on min / max number of slides
// used to setup and update carousel slides dimensions
slider.minThreshold = (slider.settings.minSlides * slider.settings.slideWidth) + ((slider.settings.minSlides - 1) * slider.settings.slideMargin);
slider.maxThreshold = (slider.settings.maxSlides * slider.settings.slideWidth) + ((slider.settings.maxSlides - 1) * slider.settings.slideMargin);
// store the current state of the slider (if currently animating, working is true)
slider.working = false;
// initialize the controls object
slider.controls = {};
// initialize an auto interval
slider.interval = null;
// determine which property to use for transitions
slider.animProp = slider.settings.mode == 'vertical' ? 'top' : 'left';
if(slider.settings.auto == 'enable') {
slider.settings.auto = true;
} else {
slider.settings.auto = false;
}
if(slider.settings.slideshowControls == 'enable' && slider.settings.auto) {
slider.settings.autoControls = true;
} else {
slider.settings.autoControls = false;
}
slider.settings.pager = slider.settings.pagerEnabled;
// determine if hardware acceleration can be used
slider.usingCSS = slider.settings.useCSS && slider.settings.mode != 'fade' && (function(){
// create our test div element
var div = document.createElement('div');
// css transition properties
var props = ['WebkitPerspective', 'MozPerspective', 'OPerspective', 'msPerspective'];
// test for each property
for(var i in props){
if(div.style[props[i]] !== undefined){
slider.cssPrefix = props[i].replace('Perspective', '').toLowerCase();
slider.animProp = '-' + slider.cssPrefix + '-transform';
return true;
}
}
return false;
}());
// if vertical mode always make maxSlides and minSlides equal
if(slider.settings.mode == 'vertical') slider.settings.maxSlides = slider.settings.minSlides;
// save original style data
el.data("origStyle", el.attr("style"));
el.children(slider.settings.slideSelector).each(function() {
$(this).data("origStyle", $(this).attr("style"));
});
// perform all DOM / CSS modifications
setup();
}
/**
* Performs all DOM and CSS modifications
*/
var setup = function(){
// wrap el in a wrapper
el.wrap('<div class="' + slider.settings.wrapperClass + '"><div class="bx-viewport"></div></div>');
// store a namspace reference to .bx-viewport
slider.viewport = el.parent();
// add a loading div to display while images are loading
slider.loader = $('<div class="bx-loading" />');
slider.viewport.prepend(slider.loader);
// set el to a massive width, to hold any needed slides
// also strip any margin and padding from el
el.css({
width: slider.settings.mode == 'horizontal' ? (slider.children.length * 100 + 215) + '%' : 'auto',
position: 'relative'
});
// if using CSS, add the easing property
if(slider.usingCSS && slider.settings.easing){
el.css('-' + slider.cssPrefix + '-transition-timing-function', slider.settings.easing);
// if not using CSS and no easing value was supplied, use the default JS animation easing (swing)
}else if(!slider.settings.easing){
slider.settings.easing = 'swing';
}
var slidesShowing = getNumberSlidesShowing();
// make modifications to the viewport (.bx-viewport)
slider.viewport.css({
width: '100%',
overflow: 'hidden',
position: 'relative'
});
slider.viewport.parent().css({
maxWidth: getViewportMaxWidth()
});
// make modification to the wrapper (.bx-wrapper)
if(!slider.settings.pager) {
slider.viewport.parent().css({
margin: '0 auto 0px'
});
}
// apply css to all slider children
slider.children.css({
'float': slider.settings.mode == 'horizontal' ? 'left' : 'none',
listStyle: 'none',
position: 'relative'
});
// apply the calculated width after the float is applied to prevent scrollbar interference
slider.children.css('width', getSlideWidth());
// if slideMargin is supplied, add the css
if(slider.settings.mode == 'horizontal' && slider.settings.slideMargin > 0) slider.children.css('marginRight', slider.settings.slideMargin);
if(slider.settings.mode == 'vertical' && slider.settings.slideMargin > 0) slider.children.css('marginBottom', slider.settings.slideMargin);
// if "fade" mode, add positioning and z-index CSS
if(slider.settings.mode == 'fade'){
slider.children.css({
position: 'absolute',
zIndex: 0,
display: 'none'
});
// prepare the z-index on the showing element
slider.children.eq(slider.settings.startSlide).css({zIndex: slider.settings.slideZIndex, display: 'block'});
}
//set thumbnails function
/*if(slider.settings.pagerType) {
var resources = $('.thumbnails').clone();
slider.settings.buildPager = function(index) {
var img = resources.find('li').get(index);
var video = $('.supsystic-slider li').get(index);
if($(video).children().hasClass('fluid-width-video-wrapper')) {
return '<img src="http://placehold.it/' + Math.floor(this.slideWidth/4.0) + 'x' + Math.floor(this.height/3.0) + '&text=Video">';
}
return '<img src="' + $(img).find('img').attr('src') + '">';
}
}
$('.thumbnails').remove();*/
// create an element to contain all slider controls (pager, start / stop, etc)
slider.controls.el = $('<div class="bx-controls" />');
// if captions are requested, add them
if(slider.settings.captions) appendCaptions();
// check if startSlide is last slide
slider.active.last = slider.settings.startSlide == getPagerQty() - 1;
// if video is true, set up the fitVids plugin
if(slider.settings.video) el.fitVids();
// set the default preload selector (visible)
var preloadSelector = slider.children.eq(slider.settings.startSlide);
if (slider.settings.preloadImages == "all") preloadSelector = slider.children;
// only check for control addition if not in "ticker" mode
if(!slider.settings.ticker){
// if pager is requested, add it
if(slider.settings.pager) appendPager();
// if controls are requested, add them
if(slider.settings.controls) appendControls();
// if auto is true, and auto controls are requested, add them
if(slider.settings.auto && slider.settings.autoControls) appendControlsAuto();
// if any control option is requested, add the controls wrapper
if(slider.settings.controls || slider.settings.autoControls || slider.settings.pager) slider.viewport.after(slider.controls.el);
// if ticker mode, do not allow a pager
}else{
slider.settings.pager = false;
}
// preload all images, then perform final DOM / CSS modifications that depend on images being loaded
loadElements(preloadSelector, start);
}
var loadElements = function(selector, callback){
var total = selector.find('img, iframe').length;
if (total == 0){
callback();
return;
}
var count = 0;
selector.find('img, iframe').each(function(){
$(this).one('load', function() {
if(++count == total) callback();
}).each(function() {
if(this.complete) $(this).load();
});
});
}
/**
* Start the slider
*/
var start = function(){
// if infinite loop, prepare additional slides
if(slider.settings.infiniteLoop && slider.settings.mode != 'fade' && !slider.settings.ticker){
var slice = slider.settings.mode == 'vertical' ? slider.settings.minSlides : slider.settings.maxSlides;
var sliceAppend = slider.children.slice(0, slice).clone().addClass('bx-clone');
var slicePrepend = slider.children.slice(-slice).clone().addClass('bx-clone');
el.append(sliceAppend).prepend(slicePrepend);
}
// remove the loading DOM element
slider.loader.remove();
// set the left / top position of "el"
setSlidePosition();
// if "vertical" mode, always use adaptiveHeight to prevent odd behavior
if (slider.settings.mode == 'vertical') slider.settings.adaptiveHeight = true;
// set the viewport height
slider.viewport.height(getViewportHeight());
// make sure everything is positioned just right (same as a window resize)
el.redrawSlider();
// onSliderLoad callback
slider.settings.onSliderLoad(slider.active.index);
// slider has been fully initialized
slider.initialized = true;
// bind the resize call to the window
if (slider.settings.responsive) $(window).bind('resize', resizeWindow);
// if auto is true and has more than 1 page, start the show
if (slider.settings.auto && slider.settings.autoStart && (getPagerQty() > 1 || slider.settings.autoSlideForOnePage)) initAuto();
// if ticker is true, start the ticker
if (slider.settings.ticker) initTicker();
// if pager is requested, make the appropriate pager link active
if (slider.settings.pager) updatePagerActive(slider.settings.startSlide);
// check for any updates to the controls (like hideControlOnEnd updates)
if (slider.settings.controls) updateDirectionControls();
// if touchEnabled is true, setup the touch events
if (slider.settings.touchEnabled && !slider.settings.ticker) initTouch();
}
/**
* Returns the calculated height of the viewport, used to determine either adaptiveHeight or the maxHeight value
*/
var getViewportHeight = function(){
var height = 0;
// first determine which children (slides) should be used in our height calculation
var children = $();
// if mode is not "vertical" and adaptiveHeight is false, include all children
if(slider.settings.mode != 'vertical' && !slider.settings.adaptiveHeight){
children = slider.children;
}else{
// if not carousel, return the single active child
if(!slider.carousel){
children = slider.children.eq(slider.active.index);
// if carousel, return a slice of children
}else{
// get the individual slide index
var currentIndex = slider.settings.moveSlides == 1 ? slider.active.index : slider.active.index * getMoveBy();
// add the current slide to the children
children = slider.children.eq(currentIndex);
// cycle through the remaining "showing" slides
for (i = 1; i <= slider.settings.maxSlides - 1; i++){
// if looped back to the start
if(currentIndex + i >= slider.children.length){
children = children.add(slider.children.eq(i - 1));
}else{
children = children.add(slider.children.eq(currentIndex + i));
}
}
}
}
// if "vertical" mode, calculate the sum of the heights of the children
if(slider.settings.mode == 'vertical'){
children.each(function(index) {
height += $(this).outerHeight();
});
// add user-supplied margins
if(slider.settings.slideMargin > 0){
height += slider.settings.slideMargin * (slider.settings.minSlides - 1);
}
// if not "vertical" mode, calculate the max height of the children
}else{
height = Math.max.apply(Math, children.map(function(){
return $(this).outerHeight(false);
}).get());
}
if(slider.viewport.css('box-sizing') == 'border-box'){
height += parseFloat(slider.viewport.css('padding-top')) + parseFloat(slider.viewport.css('padding-bottom')) +
parseFloat(slider.viewport.css('border-top-width')) + parseFloat(slider.viewport.css('border-bottom-width'));
}else if(slider.viewport.css('box-sizing') == 'padding-box'){
height += parseFloat(slider.viewport.css('padding-top')) + parseFloat(slider.viewport.css('padding-bottom'));
}
return height;
}
/**
* Returns the calculated width to be used for the outer wrapper / viewport
*/
var getViewportMaxWidth = function(){
var width = '100%';
if(slider.settings.slideWidth > 0){
if(slider.settings.mode == 'horizontal'){
width = (slider.settings.maxSlides * slider.settings.slideWidth) + ((slider.settings.maxSlides - 1) * slider.settings.slideMargin);
}else{
width = slider.settings.slideWidth;
}
}
return width;
};
/**
* Returns the calculated width to be applied to each slide
*/
var getSlideWidth = function(){
// start with any user-supplied slide width
var newElWidth = slider.settings.slideWidth;
// get the current viewport width
var wrapWidth = slider.viewport.width();
// if slide width was not supplied, or is larger than the viewport use the viewport width
if(slider.settings.slideWidth == 0 ||
(slider.settings.slideWidth > wrapWidth && !slider.carousel) ||
slider.settings.mode == 'vertical'){
newElWidth = wrapWidth;
// if carousel, use the thresholds to determine the width
}else if(slider.settings.maxSlides > 1 && slider.settings.mode == 'horizontal'){
if(wrapWidth > slider.maxThreshold){
// newElWidth = (wrapWidth - (slider.settings.slideMargin * (slider.settings.maxSlides - 1))) / slider.settings.maxSlides;
}else if(wrapWidth < slider.minThreshold){
newElWidth = (wrapWidth - (slider.settings.slideMargin * (slider.settings.minSlides - 1))) / slider.settings.minSlides;
}
}
return newElWidth;
}
/**
* Returns the number of slides currently visible in the viewport (includes partially visible slides)
*/
var getNumberSlidesShowing = function(){
var slidesShowing = 1;
if(slider.settings.mode == 'horizontal' && slider.settings.slideWidth > 0){
// if viewport is smaller than minThreshold, return minSlides
if(slider.viewport.width() < slider.minThreshold){
slidesShowing = slider.settings.minSlides;
// if viewport is larger than minThreshold, return maxSlides
}else if(slider.viewport.width() > slider.maxThreshold){
slidesShowing = slider.settings.maxSlides;
// if viewport is between min / max thresholds, divide viewport width by first child width
}else{
var childWidth = slider.children.first().width() + slider.settings.slideMargin;
slidesShowing = Math.floor((slider.viewport.width() +
slider.settings.slideMargin) / childWidth);
}
// if "vertical" mode, slides showing will always be minSlides
}else if(slider.settings.mode == 'vertical'){
slidesShowing = slider.settings.minSlides;
}
return slidesShowing;
}
/**
* Returns the number of pages (one full viewport of slides is one "page")
*/
var getPagerQty = function(){
var pagerQty = 0;
// if moveSlides is specified by the user
if(slider.settings.moveSlides > 0){
if(slider.settings.infiniteLoop){
pagerQty = Math.ceil(slider.children.length / getMoveBy());
}else{
// use a while loop to determine pages
var breakPoint = 0;
var counter = 0
// when breakpoint goes above children length, counter is the number of pages
while (breakPoint < slider.children.length){
++pagerQty;
breakPoint = counter + getNumberSlidesShowing();
counter += slider.settings.moveSlides <= getNumberSlidesShowing() ? slider.settings.moveSlides : getNumberSlidesShowing();
}
}
// if moveSlides is 0 (auto) divide children length by sides showing, then round up
}else{
pagerQty = Math.ceil(slider.children.length / getNumberSlidesShowing());
}
return pagerQty;
}
/**
* Returns the number of indivual slides by which to shift the slider
*/
var getMoveBy = function(){
// if moveSlides was set by the user and moveSlides is less than number of slides showing
if(slider.settings.moveSlides > 0 && slider.settings.moveSlides <= getNumberSlidesShowing()){
return slider.settings.moveSlides;
}
// if moveSlides is 0 (auto)
return getNumberSlidesShowing();
}
/**
* Sets the slider's (el) left or top position
*/
var setSlidePosition = function(){
// if last slide, not infinite loop, and number of children is larger than specified maxSlides
if(slider.children.length > slider.settings.maxSlides && slider.active.last && !slider.settings.infiniteLoop){
if (slider.settings.mode == 'horizontal'){
// get the last child's position
var lastChild = slider.children.last();
var position = lastChild.position();
// set the left position
setPositionProperty(-(position.left - (slider.viewport.width() - lastChild.outerWidth())), 'reset', 0);
}else if(slider.settings.mode == 'vertical'){
// get the last showing index's position
var lastShowingIndex = slider.children.length - slider.settings.minSlides;
var position = slider.children.eq(lastShowingIndex).position();
// set the top position
setPositionProperty(-position.top, 'reset', 0);
}
// if not last slide
}else{
// get the position of the first showing slide
var position = slider.children.eq(slider.active.index * getMoveBy()).position();
// check for last slide
if (slider.active.index == getPagerQty() - 1) slider.active.last = true;
// set the repective position
if (position != undefined){
if (slider.settings.mode == 'horizontal') setPositionProperty(-position.left, 'reset', 0);
else if (slider.settings.mode == 'vertical') setPositionProperty(-position.top, 'reset', 0);
}
}
}
/**
* Sets the el's animating property position (which in turn will sometimes animate el).
* If using CSS, sets the transform property. If not using CSS, sets the top / left property.
*
* @param value (int)
* - the animating property's value
*
* @param type (string) 'slider', 'reset', 'ticker'
* - the type of instance for which the function is being
*
* @param duration (int)
* - the amount of time (in ms) the transition should occupy
*
* @param params (array) optional
* - an optional parameter containing any variables that need to be passed in
*/
var setPositionProperty = function(value, type, duration, params){
// use CSS transform
if(slider.usingCSS){
// determine the translate3d value
var propValue = slider.settings.mode == 'vertical' ? 'translate3d(0, ' + value + 'px, 0)' : 'translate3d(' + value + 'px, 0, 0)';
// add the CSS transition-duration
el.css('-' + slider.cssPrefix + '-transition-duration', duration / 1000 + 's');
if(type == 'slide'){
// set the property value
el.css(slider.animProp, propValue);
// bind a callback method - executes when CSS transition completes
el.bind('transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd', function(){
// unbind the callback
el.unbind('transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd');
updateAfterSlideTransition();
});
}else if(type == 'reset'){
el.css(slider.animProp, propValue);
}else if(type == 'ticker'){
// make the transition use 'linear'
el.css('-' + slider.cssPrefix + '-transition-timing-function', 'linear');
el.css(slider.animProp, propValue);
// bind a callback method - executes when CSS transition completes
el.bind('transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd', function(){
// unbind the callback
el.unbind('transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd');
// reset the position
setPositionProperty(params['resetValue'], 'reset', 0);
// start the loop again
tickerLoop();
});
}
// use JS animate
}else{
var animateObj = {};
animateObj[slider.animProp] = value;
if(type == 'slide'){
el.animate(animateObj, duration, slider.settings.easing, function(){
updateAfterSlideTransition();
});
}else if(type == 'reset'){
el.css(slider.animProp, value)
}else if(type == 'ticker'){
el.animate(animateObj, speed, 'linear', function(){
setPositionProperty(params['resetValue'], 'reset', 0);
// run the recursive loop after animation
tickerLoop();
});
}
}
}
/**
* Populates the pager with proper amount of pages
*/
var populatePager = function(){
var pagerHtml = '';
var pagerQty = getPagerQty();
// loop through each pager item
for(var i=0; i < pagerQty; i++){
var linkContent = '';
// if a buildPager function is supplied, use it to get pager link value, else use index + 1
if(slider.settings.buildPager && $.isFunction(slider.settings.buildPager)){
linkContent = slider.settings.buildPager(i);
slider.pagerEl.addClass('bx-custom-pager');
}else{
linkContent = i + 1;
slider.pagerEl.addClass('bx-default-pager');
}
// var linkContent = slider.settings.buildPager && $.isFunction(slider.settings.buildPager) ? slider.settings.buildPager(i) : i + 1;
// add the markup to the string
pagerHtml += '<li class="bx-pager-item"><a href="" data-slide-index="' + i + '" class="bx-pager-link">' + linkContent + '</a></li>';
};
// populate the pager element with pager links
slider.pagerEl.html(pagerHtml);
}
/**
* Appends the pager to the controls element
*/
var appendPager = function(){
if(!slider.settings.pagerCustom){
// create the pager DOM element
slider.pagerEl = $('<ul class="bx-pager" data-center="1" data-transform="0"/>');
// if a pager selector was supplied, populate it with the pager
if(slider.settings.pagerSelector){
$(slider.settings.pagerSelector).html(slider.pagerEl);
// if no pager selector was supplied, add it after the wrapper
}else{
slider.controls.el.addClass('bx-has-pager').append(slider.pagerEl);
if(slider.settings.pagerType) {
slider.controls.el.addClass('thumbnails');
}
}
// populate the pager
populatePager();
}else{
slider.pagerEl = $(slider.settings.pagerCustom);
}
// assign the pager click binding
slider.pagerEl.on('click', 'a', clickPagerBind);
}
/**
* Appends prev / next controls to the controls element
*/
var appendControls = function(){
slider.controls.next = $('<a class="bx-next" href="">' + slider.settings.nextText + '</a>');
slider.controls.prev = $('<a class="bx-prev" href="">' + slider.settings.prevText + '</a>');
// bind click actions to the controls
slider.controls.next.bind('click', clickNextBind);<|fim▁hole|> $(slider.settings.nextSelector).append(slider.controls.next);
}
// if prevSlector was supplied, populate it
if(slider.settings.prevSelector){
$(slider.settings.prevSelector).append(slider.controls.prev);
}
// if no custom selectors were supplied
if(!slider.settings.nextSelector && !slider.settings.prevSelector){
// add the controls to the DOM
slider.controls.directionEl = $('<div class="bx-controls-direction" />');
// add the control elements to the directionEl
slider.controls.directionEl.append(slider.controls.prev).append(slider.controls.next);
// slider.viewport.append(slider.controls.directionEl);
slider.controls.el.addClass('bx-has-controls-direction').append(slider.controls.directionEl);
}
}
/**
* Appends start / stop auto controls to the controls element
*/
var appendControlsAuto = function(){
slider.controls.start = $('<div class="bx-controls-auto-item"><a class="bx-start" href="">' + slider.settings.startText + '</a></div>');
slider.controls.stop = $('<div class="bx-controls-auto-item"><a class="bx-stop" href="">' + slider.settings.stopText + '</a></div>');
// add the controls to the DOM
slider.controls.autoEl = $('<div class="bx-controls-auto" />');
// bind click actions to the controls
slider.controls.autoEl.on('click', '.bx-start', clickStartBind);
slider.controls.autoEl.on('click', '.bx-stop', clickStopBind);
// if autoControlsCombine, insert only the "start" control
if(slider.settings.autoControlsCombine){
slider.controls.autoEl.append(slider.controls.start);
// if autoControlsCombine is false, insert both controls
}else{
slider.controls.autoEl.append(slider.controls.start).append(slider.controls.stop);
}
// if auto controls selector was supplied, populate it with the controls
if(slider.settings.autoControlsSelector){
$(slider.settings.autoControlsSelector).html(slider.controls.autoEl);
// if auto controls selector was not supplied, add it after the wrapper
}else{
slider.controls.el.addClass('bx-has-controls-auto').append(slider.controls.autoEl);
}
// update the auto controls
updateAutoControls(slider.settings.autoStart ? 'stop' : 'start');
}
/**
* Appends image captions to the DOM
*/
var appendCaptions = function(){
var htmlDecode = function(input){
var e = document.createElement('div');
e.innerHTML = input;
return e.childNodes.length === 0 ? "" : e.childNodes[0].nodeValue;
};
// cycle through each child
slider.children.each(function(index){
// get the image title attribute
var title = $(this).find('img:first').attr('title');
// append the caption
if (title != undefined && ('' + title).length) {
$(this).append('<div class="bx-caption"><span class="caption"></span></div>');
if(/<[a-z][\s\S]*>/i.test(htmlDecode(title))) {
title = $(htmlDecode(title));
}
$(this).find('.caption').append(title);
//var padding = parseInt($(this).find('.bx-caption').css('padding'), 10);
//$(this).find('.bx-caption').css('width', (getSlideWidth() - 2*padding) + 'px');
}
});
}
/**
* Click next binding
*
* @param e (event)
* - DOM event object
*/
var clickNextBind = function(e){
// if auto show is running, stop it
if (slider.settings.auto) el.stopAuto();
el.goToNextSlide();
/*if(slider.settings.pagerType) {
pagerMoveLeft();
}*/
e.preventDefault();
};
/**
* Click prev binding
*
* @param e (event)
* - DOM event object
*/
var clickPrevBind = function(e){
// if auto show is running, stop it
if (slider.settings.auto) el.stopAuto();
el.goToPrevSlide();
/*if(slider.settings.pagerType) {
pagerMoveRight();
}*/
e.preventDefault();
};
/**
* Click start binding
*
* @param e (event)
* - DOM event object
*/
var clickStartBind = function(e){
el.startAuto();
e.preventDefault();
}
/**
* Click stop binding
*
* @param e (event)
* - DOM event object
*/
var clickStopBind = function(e){
el.stopAuto();
e.preventDefault();
}
/**
* Click pager binding
*
* @param e (event)
* - DOM event object
*/
var clickPagerBind = function(e){
// if auto show is running, stop it
if (slider.settings.auto) el.stopAuto();
var pagerLink = $(e.currentTarget);
if(pagerLink.attr('data-slide-index') !== undefined){
var pagerIndex = parseInt(pagerLink.attr('data-slide-index'));
// if clicked pager link is not active, continue with the goToSlide call
if(pagerIndex != slider.active.index) el.goToSlide(pagerIndex);
e.preventDefault();
}
/*if(slider.settings.pagerType) {
pagerTranslate(this);
}*/
};
/*var pagerTranslate = function(self) {
var $pager = $('.bx-pager');
if($(self).parent('li').index() > parseInt($pager.data('center'), 10)) {
$pager.data('center', parseInt($pager.data('center'), 10) + $pager.find('li').length - $(self).parent('li').index());
}else {
$pager.data('center', parseInt($pager.data('center'), 10) - $pager.find('li').length - $(self).parent('li').index());
}
};
var thumbnailTranslate = function(element, translate, $pager) {
element.css({
'transform': 'translateX(' + translate + 'px)',
'-webkit-transform': 'translateX(' + translate + 'px)',
'-moz-transform': 'translateX(' + translate + 'px)',
'-o-transform': 'translateX(' + translate + 'px)',
'transition': '0.4s',
'-webkit-transition': '0.4s',
'-moz-transition': '0.4s',
'-o-transition': '0.4s'
});
$pager.data('transform', translate);
};
var pagerMoveLeft = function() {
var $pager = $('.bx-pager'),
translate = $pager.data('transform');
if(parseInt($pager.data('center')) < $pager.find('li').length - 3) {
translate -= slider.settings.slideWidth/4;
$pager.find('li').each(function() {
//translate = new WebKitCSSMatrix(translate).m41;
thumbnailTranslate($(this), translate, $pager);
});
}
if(parseInt($pager.data('center')) == $pager.find('li').length) {
$pager.data('center', parseInt($pager.data('center'), 10) - $pager.find('li').length);
$pager.find('li').each(function() {
//translate = new WebKitCSSMatrix(translate).m41;
thumbnailTranslate($(this), Math.floor(slider.settings.slideWidth/4)*($pager.find('li').length - 5), $pager);
});
}
$pager.data('center', parseInt($pager.data('center'), 10) + 1);
};
var pagerMoveRight = function() {
var $pager = $('.bx-pager'),
translate = $pager.data('transform');
if(parseInt($pager.data('center')) > 4 ) {
translate += slider.settings.slideWidth/4;
$pager.find('li').each(function() {
//translate = new WebKitCSSMatrix(translate).m41;
thumbnailTranslate($(this), translate, $pager);
});
}
if(parseInt($pager.data('center')) == 1) {
$pager.data('center', parseInt($pager.data('center'), 10) + $pager.find('li').length);
$pager.find('li').each(function() {
//translate = new WebKitCSSMatrix(translate).m41;
thumbnailTranslate($(this), -Math.floor(slider.settings.slideWidth/4)*($pager.find('li').length - 4), $pager);
});
}
$pager.data('center', parseInt($pager.data('center'), 10) - 1);
};*/
/**
* Updates the pager links with an active class
*
* @param slideIndex (int)
* - index of slide to make active
*/
var updatePagerActive = function(slideIndex){
// if "short" pager type
var len = slider.children.length; // nb of children
if(slider.settings.pagerType == 'short'){
if(slider.settings.maxSlides > 1) {
len = Math.ceil(slider.children.length/slider.settings.maxSlides);
}
slider.pagerEl.html( (slideIndex + 1) + slider.settings.pagerShortSeparator + len);
return;
}
// remove all pager active classes
slider.pagerEl.find('a').removeClass('active');
// apply the active class for all pagers
slider.pagerEl.each(function(i, el) { $(el).find('a').eq(slideIndex).addClass('active'); });
}
/**
* Performs needed actions after a slide transition
*/
var updateAfterSlideTransition = function(){
// if infinte loop is true
if(slider.settings.infiniteLoop){
var position = '';
// first slide
if(slider.active.index == 0){
// set the new position
position = slider.children.eq(0).position();
// carousel, last slide
}else if(slider.active.index == getPagerQty() - 1 && slider.carousel){
position = slider.children.eq((getPagerQty() - 1) * getMoveBy()).position();
// last slide
}else if(slider.active.index == slider.children.length - 1){
position = slider.children.eq(slider.children.length - 1).position();
}
if(position){
if (slider.settings.mode == 'horizontal') { setPositionProperty(-position.left, 'reset', 0); }
else if (slider.settings.mode == 'vertical') { setPositionProperty(-position.top, 'reset', 0); }
}
}
// declare that the transition is complete
slider.working = false;
// onSlideAfter callback
slider.settings.onSlideAfter(slider.children.eq(slider.active.index), slider.oldIndex, slider.active.index);
}
/**
* Updates the auto controls state (either active, or combined switch)
*
* @param state (string) "start", "stop"
* - the new state of the auto show
*/
var updateAutoControls = function(state){
// if autoControlsCombine is true, replace the current control with the new state
if(slider.settings.autoControlsCombine){
slider.controls.autoEl.html(slider.controls[state]);
// if autoControlsCombine is false, apply the "active" class to the appropriate control
}else{
slider.controls.autoEl.find('a').removeClass('active');
slider.controls.autoEl.find('a:not(.bx-' + state + ')').addClass('active');
}
}
/**
* Updates the direction controls (checks if either should be hidden)
*/
var updateDirectionControls = function(){
if(getPagerQty() == 1){
slider.controls.prev.addClass('disabled');
slider.controls.next.addClass('disabled');
}else if(!slider.settings.infiniteLoop && slider.settings.hideControlOnEnd){
// if first slide
if (slider.active.index == 0){
slider.controls.prev.addClass('disabled');
slider.controls.next.removeClass('disabled');
// if last slide
}else if(slider.active.index == getPagerQty() - 1){
slider.controls.next.addClass('disabled');
slider.controls.prev.removeClass('disabled');
// if any slide in the middle
}else{
slider.controls.prev.removeClass('disabled');
slider.controls.next.removeClass('disabled');
}
}
}
/**
* Initialzes the auto process
*/
var initAuto = function(){
// if autoDelay was supplied, launch the auto show using a setTimeout() call
if(slider.settings.autoDelay > 0){
var timeout = setTimeout(el.startAuto, slider.settings.autoDelay);
// if autoDelay was not supplied, start the auto show normally
}else{
el.startAuto();
}
// if autoHover is requested
if(slider.settings.autoHover){
// on el hover
el.hover(function(){
// if the auto show is currently playing (has an active interval)
if(slider.interval){
// stop the auto show and pass true agument which will prevent control update
el.stopAuto(true);
// create a new autoPaused value which will be used by the relative "mouseout" event
slider.autoPaused = true;
}
}, function(){
// if the autoPaused value was created be the prior "mouseover" event
if(slider.autoPaused){
// start the auto show and pass true agument which will prevent control update
el.startAuto(true);
// reset the autoPaused value
slider.autoPaused = null;
}
});
}
}
/**
* Initialzes the ticker process
*/
var initTicker = function(){
var startPosition = 0;
// if autoDirection is "next", append a clone of the entire slider
if(slider.settings.autoDirection == 'next'){
el.append(slider.children.clone().addClass('bx-clone'));
// if autoDirection is "prev", prepend a clone of the entire slider, and set the left position
}else{
el.prepend(slider.children.clone().addClass('bx-clone'));
var position = slider.children.first().position();
startPosition = slider.settings.mode == 'horizontal' ? -position.left : -position.top;
}
setPositionProperty(startPosition, 'reset', 0);
// do not allow controls in ticker mode
slider.settings.pager = false;
slider.settings.controls = false;
slider.settings.autoControls = false;
// if autoHover is requested
if(slider.settings.tickerHover && !slider.usingCSS){
// on el hover
slider.viewport.hover(function(){
el.stop();
}, function(){
// calculate the total width of children (used to calculate the speed ratio)
var totalDimens = 0;
slider.children.each(function(index){
totalDimens += slider.settings.mode == 'horizontal' ? $(this).outerWidth(true) : $(this).outerHeight(true);
});
// calculate the speed ratio (used to determine the new speed to finish the paused animation)
var ratio = slider.settings.speed / totalDimens;
// determine which property to use
var property = slider.settings.mode == 'horizontal' ? 'left' : 'top';
// calculate the new speed
var newSpeed = ratio * (totalDimens - (Math.abs(parseInt(el.css(property)))));
tickerLoop(newSpeed);
});
}
// start the ticker loop
tickerLoop();
}
/**
* Runs a continuous loop, news ticker-style
*/
var tickerLoop = function(resumeSpeed){
speed = resumeSpeed ? resumeSpeed : slider.settings.speed;
var position = {left: 0, top: 0};
var reset = {left: 0, top: 0};
// if "next" animate left position to last child, then reset left to 0
if(slider.settings.autoDirection == 'next'){
position = el.find('.bx-clone').first().position();
// if "prev" animate left position to 0, then reset left to first non-clone child
}else{
reset = slider.children.first().position();
}
var animateProperty = slider.settings.mode == 'horizontal' ? -position.left : -position.top;
var resetValue = slider.settings.mode == 'horizontal' ? -reset.left : -reset.top;
var params = {resetValue: resetValue};
setPositionProperty(animateProperty, 'ticker', speed, params);
}
/**
* Initializes touch events
*/
var initTouch = function(){
// initialize object to contain all touch values
slider.touch = {
start: {x: 0, y: 0},
end: {x: 0, y: 0}
}
slider.viewport.bind('touchstart', onTouchStart);
}
/**
* Event handler for "touchstart"
*
* @param e (event)
* - DOM event object
*/
var onTouchStart = function(e){
if(slider.working){
e.preventDefault();
}else{
// record the original position when touch starts
slider.touch.originalPos = el.position();
var orig = e.originalEvent;
// record the starting touch x, y coordinates
slider.touch.start.x = orig.changedTouches[0].pageX;
slider.touch.start.y = orig.changedTouches[0].pageY;
// bind a "touchmove" event to the viewport
slider.viewport.bind('touchmove', onTouchMove);
// bind a "touchend" event to the viewport
slider.viewport.bind('touchend', onTouchEnd);
}
}
/**
* Event handler for "touchmove"
*
* @param e (event)
* - DOM event object
*/
var onTouchMove = function(e){
var orig = e.originalEvent;
// if scrolling on y axis, do not prevent default
var xMovement = Math.abs(orig.changedTouches[0].pageX - slider.touch.start.x);
var yMovement = Math.abs(orig.changedTouches[0].pageY - slider.touch.start.y);
// x axis swipe
if((xMovement * 3) > yMovement && slider.settings.preventDefaultSwipeX){
e.preventDefault();
// y axis swipe
}else if((yMovement * 3) > xMovement && slider.settings.preventDefaultSwipeY){
e.preventDefault();
}
if(slider.settings.mode != 'fade' && slider.settings.oneToOneTouch){
var value = 0;
// if horizontal, drag along x axis
if(slider.settings.mode == 'horizontal'){
var change = orig.changedTouches[0].pageX - slider.touch.start.x;
value = slider.touch.originalPos.left + change;
// if vertical, drag along y axis
}else{
var change = orig.changedTouches[0].pageY - slider.touch.start.y;
value = slider.touch.originalPos.top + change;
}
setPositionProperty(value, 'reset', 0);
}
}
/**
* Event handler for "touchend"
*
* @param e (event)
* - DOM event object
*/
var onTouchEnd = function(e){
slider.viewport.unbind('touchmove', onTouchMove);
var orig = e.originalEvent;
var value = 0;
// record end x, y positions
slider.touch.end.x = orig.changedTouches[0].pageX;
slider.touch.end.y = orig.changedTouches[0].pageY;
// if fade mode, check if absolute x distance clears the threshold
if(slider.settings.mode == 'fade'){
var distance = Math.abs(slider.touch.start.x - slider.touch.end.x);
if(distance >= slider.settings.swipeThreshold){
slider.touch.start.x > slider.touch.end.x ? el.goToNextSlide() : el.goToPrevSlide();
el.stopAuto();
}
// not fade mode
}else{
var distance = 0;
// calculate distance and el's animate property
if(slider.settings.mode == 'horizontal'){
distance = slider.touch.end.x - slider.touch.start.x;
value = slider.touch.originalPos.left;
}else{
distance = slider.touch.end.y - slider.touch.start.y;
value = slider.touch.originalPos.top;
}
// if not infinite loop and first / last slide, do not attempt a slide transition
if(!slider.settings.infiniteLoop && ((slider.active.index == 0 && distance > 0) || (slider.active.last && distance < 0))){
setPositionProperty(value, 'reset', 200);
}else{
// check if distance clears threshold
if(Math.abs(distance) >= slider.settings.swipeThreshold){
distance < 0 ? el.goToNextSlide() : el.goToPrevSlide();
el.stopAuto();
}else{
// el.animate(property, 200);
setPositionProperty(value, 'reset', 200);
}
}
}
slider.viewport.unbind('touchend', onTouchEnd);
}
/**
* Window resize event callback
*/
var resizeWindow = function(e){
// don't do anything if slider isn't initialized.
if(!slider.initialized) return;
// get the new window dimens (again, thank you IE)
var windowWidthNew = $(window).width();
var windowHeightNew = $(window).height();
// make sure that it is a true window resize
// *we must check this because our dinosaur friend IE fires a window resize event when certain DOM elements
// are resized. Can you just die already?*
if(windowWidth != windowWidthNew || windowHeight != windowHeightNew){
// set the new window dimens
windowWidth = windowWidthNew;
windowHeight = windowHeightNew;
// update all dynamic elements
el.redrawSlider();
// Call user resize handler
slider.settings.onSliderResize.call(el, slider.active.index);
}
}
/**
* ===================================================================================
* = PUBLIC FUNCTIONS
* ===================================================================================
*/
/**
* Performs slide transition to the specified slide
*
* @param slideIndex (int)
* - the destination slide's index (zero-based)
*
* @param direction (string)
* - INTERNAL USE ONLY - the direction of travel ("prev" / "next")
*/
el.goToSlide = function(slideIndex, direction){
// if plugin is currently in motion, ignore request
if(slider.working || slider.active.index == slideIndex) return;
// declare that plugin is in motion
slider.working = true;
// store the old index
slider.oldIndex = slider.active.index;
// if slideIndex is less than zero, set active index to last child (this happens during infinite loop)
if(slideIndex < 0){
slider.active.index = getPagerQty() - 1;
// if slideIndex is greater than children length, set active index to 0 (this happens during infinite loop)
}else if(slideIndex >= getPagerQty()){
slider.active.index = 0;
// set active index to requested slide
}else{
slider.active.index = slideIndex;
}
// onSlideBefore, onSlideNext, onSlidePrev callbacks
slider.settings.onSlideBefore(slider.children.eq(slider.active.index), slider.oldIndex, slider.active.index);
if(direction == 'next'){
slider.settings.onSlideNext(slider.children.eq(slider.active.index), slider.oldIndex, slider.active.index);
}else if(direction == 'prev'){
slider.settings.onSlidePrev(slider.children.eq(slider.active.index), slider.oldIndex, slider.active.index);
}
// check if last slide
slider.active.last = slider.active.index >= getPagerQty() - 1;
// update the pager with active class
if(slider.settings.pager) updatePagerActive(slider.active.index);
// // check for direction control update
if(slider.settings.controls) updateDirectionControls();
// if slider is set to mode: "fade"
if(slider.settings.mode == 'fade'){
// if adaptiveHeight is true and next height is different from current height, animate to the new height
if(slider.settings.adaptiveHeight && slider.viewport.height() != getViewportHeight()){
slider.viewport.animate({height: getViewportHeight()}, slider.settings.adaptiveHeightSpeed);
}
// fade out the visible child and reset its z-index value
slider.children.filter(':visible').fadeOut(slider.settings.speed).css({zIndex: 0});
// fade in the newly requested slide
slider.children.eq(slider.active.index).css('zIndex', slider.settings.slideZIndex+1).fadeIn(slider.settings.speed, function(){
$(this).css('zIndex', slider.settings.slideZIndex);
updateAfterSlideTransition();
});
// slider mode is not "fade"
}else{
// if adaptiveHeight is true and next height is different from current height, animate to the new height
if(slider.settings.adaptiveHeight && slider.viewport.height() != getViewportHeight()){
slider.viewport.animate({height: getViewportHeight()}, slider.settings.adaptiveHeightSpeed);
}
var moveBy = 0;
var position = {left: 0, top: 0};
// if carousel and not infinite loop
if(!slider.settings.infiniteLoop && slider.carousel && slider.active.last){
if(slider.settings.mode == 'horizontal'){
// get the last child position
var lastChild = slider.children.eq(slider.children.length - 1);
position = lastChild.position();
// calculate the position of the last slide
moveBy = slider.viewport.width() - lastChild.outerWidth();
}else{
// get last showing index position
var lastShowingIndex = slider.children.length - slider.settings.minSlides;
position = slider.children.eq(lastShowingIndex).position();
}
// horizontal carousel, going previous while on first slide (infiniteLoop mode)
}else if(slider.carousel && slider.active.last && direction == 'prev'){
// get the last child position
var eq = slider.settings.moveSlides == 1 ? slider.settings.maxSlides - getMoveBy() : ((getPagerQty() - 1) * getMoveBy()) - (slider.children.length - slider.settings.maxSlides);
var lastChild = el.children('.bx-clone').eq(eq);
position = lastChild.position();
// if infinite loop and "Next" is clicked on the last slide
}else if(direction == 'next' && slider.active.index == 0){
// get the last clone position
position = el.find('> .bx-clone').eq(slider.settings.maxSlides).position();
slider.active.last = false;
// normal non-zero requests
}else if(slideIndex >= 0){
var requestEl = slideIndex * getMoveBy();
position = slider.children.eq(requestEl).position();
}
/* If the position doesn't exist
* (e.g. if you destroy the slider on a next click),
* it doesn't throw an error.
*/
if ("undefined" !== typeof(position)) {
var value = slider.settings.mode == 'horizontal' ? -(position.left - moveBy) : -position.top;
// plugin values to be animated
setPositionProperty(value, 'slide', slider.settings.speed);
}
}
}
/**
* Transitions to the next slide in the show
*/
el.goToNextSlide = function(){
// if infiniteLoop is false and last page is showing, disregard call
if (!slider.settings.infiniteLoop && slider.active.last) return;
var pagerIndex = parseInt(slider.active.index) + 1;
el.goToSlide(pagerIndex, 'next');
}
/**
* Transitions to the prev slide in the show
*/
el.goToPrevSlide = function(){
// if infiniteLoop is false and last page is showing, disregard call
if (!slider.settings.infiniteLoop && slider.active.index == 0) return;
var pagerIndex = parseInt(slider.active.index) - 1;
el.goToSlide(pagerIndex, 'prev');
}
/**
* Starts the auto show
*
* @param preventControlUpdate (boolean)
* - if true, auto controls state will not be updated
*/
el.startAuto = function(preventControlUpdate){
// if an interval already exists, disregard call
if(slider.interval) return;
// create an interval
slider.interval = setInterval(function(){
slider.settings.autoDirection == 'next' ? el.goToNextSlide() : el.goToPrevSlide();
}, slider.settings.pause);
// if auto controls are displayed and preventControlUpdate is not true
if (slider.settings.autoControls && preventControlUpdate != true) updateAutoControls('stop');
}
/**
* Stops the auto show
*
* @param preventControlUpdate (boolean)
* - if true, auto controls state will not be updated
*/
el.stopAuto = function(preventControlUpdate){
// if no interval exists, disregard call
if(!slider.interval) return;
// clear the interval
clearInterval(slider.interval);
slider.interval = null;
// if auto controls are displayed and preventControlUpdate is not true
if (slider.settings.autoControls && preventControlUpdate != true) updateAutoControls('start');
}
/**
* Returns current slide index (zero-based)
*/
el.getCurrentSlide = function(){
return slider.active.index;
}
/**
* Returns current slide element
*/
el.getCurrentSlideElement = function(){
return slider.children.eq(slider.active.index);
}
/**
* Returns number of slides in show
*/
el.getSlideCount = function(){
return slider.children.length;
}
/**
* Update all dynamic slider elements
*/
el.redrawSlider = function(){
// resize all children in ratio to new screen size
slider.children.add(el.find('.bx-clone')).width(getSlideWidth());
// adjust the height
slider.viewport.css('height', getViewportHeight());
// update the slide position
if(!slider.settings.ticker) setSlidePosition();
// if active.last was true before the screen resize, we want
// to keep it last no matter what screen size we end on
if (slider.active.last) slider.active.index = getPagerQty() - 1;
// if the active index (page) no longer exists due to the resize, simply set the index as last
if (slider.active.index >= getPagerQty()) slider.active.last = true;
// if a pager is being displayed and a custom pager is not being used, update it
if(slider.settings.pager && !slider.settings.pagerCustom){
//populatePager();
updatePagerActive(slider.active.index);
}
}
/**
* Destroy the current instance of the slider (revert everything back to original state)
*/
el.destroySlider = function(){
// don't do anything if slider has already been destroyed
if(!slider.initialized) return;
slider.initialized = false;
$('.bx-clone', this).remove();
slider.children.each(function() {
$(this).data("origStyle") != undefined ? $(this).attr("style", $(this).data("origStyle")) : $(this).removeAttr('style');
});
$(this).data("origStyle") != undefined ? this.attr("style", $(this).data("origStyle")) : $(this).removeAttr('style');
$(this).unwrap().unwrap();
if(slider.controls.el) slider.controls.el.remove();
if(slider.controls.next) slider.controls.next.remove();
if(slider.controls.prev) slider.controls.prev.remove();
if(slider.pagerEl && slider.settings.controls) slider.pagerEl.remove();
$('.bx-caption', this).remove();
if(slider.controls.autoEl) slider.controls.autoEl.remove();
clearInterval(slider.interval);
if(slider.settings.responsive) $(window).unbind('resize', resizeWindow);
}
/**
* Reload the slider (revert all DOM changes, and re-initialize)
*/
el.reloadSlider = function(settings){
if (settings != undefined) options = settings;
el.destroySlider();
init();
}
init();
// returns the current jQuery object
return this;
}
})(jQuery);<|fim▁end|> | slider.controls.prev.bind('click', clickPrevBind);
// if nextSlector was supplied, populate it
if(slider.settings.nextSelector){ |
<|file_name|>tasks.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --- This file has been autogenerated --- #
# --- from docs/Readout-Data-Collection.ipynb --- #
# --- Do not edit this file directly --- #
import os
import numpy as np
import sympy
import cirq
import recirq
@recirq.json_serializable_dataclass(namespace='recirq.readout_scan',
registry=recirq.Registry,
frozen=True)
class ReadoutScanTask:
"""Scan over Ry(theta) angles from -pi/2 to 3pi/2 tracing out a sinusoid
which is primarily affected by readout error.
See Also:
:py:func:`run_readout_scan`
Attributes:
dataset_id: A unique identifier for this dataset.
device_name: The device to run on, by name.
n_shots: The number of repetitions for each theta value.
qubit: The qubit to benchmark.
resolution_factor: We select the number of points in the linspace
so that the special points: (-1/2, 0, 1/2, 1, 3/2) * pi are
always included. The total number of theta evaluations
is resolution_factor * 4 + 1.
"""
dataset_id: str
device_name: str
n_shots: int
qubit: cirq.GridQubit
resolution_factor: int
@property
def fn(self):
n_shots = _abbrev_n_shots(n_shots=self.n_shots)
qubit = _abbrev_grid_qubit(self.qubit)
return (f'{self.dataset_id}/'
f'{self.device_name}/'
f'q-{qubit}/'
f'ry_scan_{self.resolution_factor}_{n_shots}')
# Define the following helper functions to make nicer `fn` keys
# for the tasks:
def _abbrev_n_shots(n_shots: int) -> str:
"""Shorter n_shots component of a filename"""
if n_shots % 1000 == 0:<|fim▁hole|> """Formatted grid_qubit component of a filename"""
return f'{qubit.row}_{qubit.col}'
EXPERIMENT_NAME = 'readout-scan'
DEFAULT_BASE_DIR = os.path.expanduser(f'~/cirq-results/{EXPERIMENT_NAME}')
def run_readout_scan(task: ReadoutScanTask,
base_dir=None):
"""Execute a :py:class:`ReadoutScanTask` task."""
if base_dir is None:
base_dir = DEFAULT_BASE_DIR
if recirq.exists(task, base_dir=base_dir):
print(f"{task} already exists. Skipping.")
return
# Create a simple circuit
theta = sympy.Symbol('theta')
circuit = cirq.Circuit([
cirq.ry(theta).on(task.qubit),
cirq.measure(task.qubit, key='z')
])
# Use utilities to map sampler names to Sampler objects
sampler = recirq.get_sampler_by_name(device_name=task.device_name)
# Use a sweep over theta values.
# Set up limits so we include (-1/2, 0, 1/2, 1, 3/2) * pi
# The total number of points is resolution_factor * 4 + 1
n_special_points: int = 5
resolution_factor = task.resolution_factor
theta_sweep = cirq.Linspace(theta, -np.pi / 2, 3 * np.pi / 2,
resolution_factor * (n_special_points - 1) + 1)
thetas = np.asarray([v for ((k, v),) in theta_sweep.param_tuples()])
flat_circuit, flat_sweep = cirq.flatten_with_sweep(circuit, theta_sweep)
# Run the jobs
print(f"Collecting data for {task.qubit}", flush=True)
results = sampler.run_sweep(program=flat_circuit, params=flat_sweep,
repetitions=task.n_shots)
# Save the results
recirq.save(task=task, data={
'thetas': thetas,
'all_bitstrings': [
recirq.BitArray(np.asarray(r.measurements['z']))
for r in results]
}, base_dir=base_dir)<|fim▁end|> | return f'{n_shots // 1000}k'
return str(n_shots)
def _abbrev_grid_qubit(qubit: cirq.GridQubit) -> str: |
<|file_name|>ComplianceStatus.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/securityhub/model/ComplianceStatus.h>
#include <aws/core/utils/HashingUtils.h>
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
using namespace Aws::Utils;
namespace Aws
{
namespace SecurityHub
{
namespace Model
{
namespace ComplianceStatusMapper
{
static const int PASSED_HASH = HashingUtils::HashString("PASSED");
static const int WARNING_HASH = HashingUtils::HashString("WARNING");
static const int FAILED_HASH = HashingUtils::HashString("FAILED");
static const int NOT_AVAILABLE_HASH = HashingUtils::HashString("NOT_AVAILABLE");
ComplianceStatus GetComplianceStatusForName(const Aws::String& name)
{
int hashCode = HashingUtils::HashString(name.c_str());
if (hashCode == PASSED_HASH)
{
return ComplianceStatus::PASSED;
}
else if (hashCode == WARNING_HASH)
{
return ComplianceStatus::WARNING;
}
else if (hashCode == FAILED_HASH)
{
return ComplianceStatus::FAILED;
}
else if (hashCode == NOT_AVAILABLE_HASH)
{
return ComplianceStatus::NOT_AVAILABLE;
}
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
overflowContainer->StoreOverflow(hashCode, name);
return static_cast<ComplianceStatus>(hashCode);
}
return ComplianceStatus::NOT_SET;
}
Aws::String GetNameForComplianceStatus(ComplianceStatus enumValue)
{
switch(enumValue)
{
case ComplianceStatus::PASSED:
return "PASSED";
case ComplianceStatus::WARNING:
return "WARNING";
case ComplianceStatus::FAILED:
return "FAILED";
case ComplianceStatus::NOT_AVAILABLE:
return "NOT_AVAILABLE";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
}
return {};
}
}
} // namespace ComplianceStatusMapper<|fim▁hole|><|fim▁end|> | } // namespace Model
} // namespace SecurityHub
} // namespace Aws |
<|file_name|>read_test.py<|end_file_name|><|fim▁begin|>from tests.api import auth_for
from tests.data import add_fixtures, clubs, users
def test_lva(db_session, client):
lva = clubs.lva(owner=users.john())
add_fixtures(db_session, lva)
res = client.get("/clubs/{id}".format(id=lva.id))
assert res.status_code == 200
assert res.json == {
"id": lva.id,
"name": "LV Aachen",
"timeCreated": "2015-12-24T12:34:56+00:00",
"website": "http://www.lv-aachen.de",
"isWritable": False,
"owner": {"id": lva.owner.id, "name": lva.owner.name},
}
def test_sfn(db_session, client):
sfn = clubs.sfn()
add_fixtures(db_session, sfn)
<|fim▁hole|> assert res.status_code == 200
assert res.json == {
u"id": sfn.id,
u"name": u"Sportflug Niederberg",
u"timeCreated": "2017-01-01T12:34:56+00:00",
u"website": None,
u"isWritable": False,
u"owner": None,
}
def test_writable(db_session, client):
lva = clubs.lva()
john = users.john(club=lva)
add_fixtures(db_session, lva, john)
res = client.get("/clubs/{id}".format(id=lva.id), headers=auth_for(john))
assert res.status_code == 200
assert res.json == {
"id": lva.id,
"name": "LV Aachen",
"timeCreated": "2015-12-24T12:34:56+00:00",
"website": "http://www.lv-aachen.de",
"isWritable": True,
"owner": None,
}
def test_missing(client):
res = client.get("/clubs/10000000")
assert res.status_code == 404
def test_invalid_id(client):
res = client.get("/clubs/abc")
assert res.status_code == 404<|fim▁end|> | res = client.get("/clubs/{id}".format(id=sfn.id)) |
<|file_name|>0048_ramp.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-17 19:24
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0047_remove_mapupdate_changed_geometries'),
]
operations = [
migrations.CreateModel(
name='Ramp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('minx', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='min x coordinate')),
('miny', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='min y coordinate')),
('maxx', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='max x coordinate')),
('maxy', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='max y coordinate')),
('geometry', c3nav.mapdata.fields.GeometryField(default=None, geomtype='polygon')),
('space', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ramps', to='mapdata.Space', verbose_name='space')),
],
options={<|fim▁hole|> 'verbose_name': 'Ramp',
'verbose_name_plural': 'Ramps',
'default_related_name': 'ramps',
},
),
]<|fim▁end|> | |
<|file_name|>Stack.cpp<|end_file_name|><|fim▁begin|>#include "logger/Stack.hpp"
using namespace logger;
///////////////////////////////////////
<|fim▁hole|>{
if (stack.empty())
return ioOut;
std::deque<std::string>::const_iterator dequeIt;
for(dequeIt = stack.begin();
dequeIt != stack.end();
++dequeIt)
{
if (dequeIt != stack.begin())
ioOut << "_";
ioOut << *dequeIt;
}
return ioOut;
}<|fim▁end|> | std::ostream& operator<<(std::ostream& ioOut, const Stack& stack) |
<|file_name|>minters.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#<|fim▁hole|># MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Persistent identifier minters."""
from __future__ import absolute_import
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \
RecordIdentifier
def zenodo_concept_recid_minter(record_uuid=None, data=None):
"""Mint the Concept RECID.
Reserves the Concept RECID for the record.
"""
parent_id = RecordIdentifier.next()
conceptrecid = PersistentIdentifier.create(
pid_type='recid',
pid_value=str(parent_id),
status=PIDStatus.RESERVED,
)
data['conceptrecid'] = conceptrecid.pid_value
return conceptrecid
def zenodo_deposit_minter(record_uuid, data):
"""Mint the DEPID, and reserve the Concept RECID and RECID PIDs."""
if 'conceptrecid' not in data:
zenodo_concept_recid_minter(data=data)
recid = zenodo_reserved_record_minter(data=data)
# Create depid with same pid_value of the recid
depid = PersistentIdentifier.create(
'depid',
str(recid.pid_value),
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED,
)
data.update({
'_deposit': {
'id': depid.pid_value,
'status': 'draft',
},
})
return depid
def zenodo_reserved_record_minter(record_uuid=None, data=None):
"""Reserve a recid."""
id_ = RecordIdentifier.next()
recid = PersistentIdentifier.create(
'recid', id_, status=PIDStatus.RESERVED
)
data['recid'] = recid.pid_value
return recid<|fim▁end|> | # You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, |
<|file_name|>cotizacion_item_recursos_humanos.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2015, Daniel and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe<|fim▁hole|>class CotizacionItemRecursosHumanos(Document):
pass<|fim▁end|> | from frappe.model.document import Document
|
<|file_name|>match-disc-bot.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.<|fim▁hole|>fn g() -> isize { match f() { true => { 1 } false => { 0 } } }
fn main() { g(); }<|fim▁end|> |
// error-pattern:quux
fn f() -> ! { panic!("quux") } |
<|file_name|>pinned_events.rs<|end_file_name|><|fim▁begin|>//! Types for the [`m.room.pinned_events`] event.
//!
//! [`m.room.pinned_events`]: https://spec.matrix.org/v1.2/client-server-api/#mroompinned_events
use ruma_macros::EventContent;
use serde::{Deserialize, Serialize};
use crate::EventId;
/// The content of an `m.room.pinned_events` event.
///
/// Used to "pin" particular events in a room for other participants to review later.
#[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[ruma_event(type = "m.room.pinned_events", kind = State)]
pub struct RoomPinnedEventsEventContent {
/// An ordered list of event IDs to pin.
pub pinned: Vec<Box<EventId>>,
}
impl RoomPinnedEventsEventContent {
/// Creates a new `RoomPinnedEventsEventContent` with the given events.
pub fn new(pinned: Vec<Box<EventId>>) -> Self {
Self { pinned }
}
}
#[cfg(all(test, feature = "rand"))]
mod tests {
use std::convert::TryInto;
use crate::{server_name, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId};
use super::RoomPinnedEventsEventContent;
use crate::events::{StateEvent, Unsigned};
#[test]
fn serialization_deserialization() {
let mut content: RoomPinnedEventsEventContent =
RoomPinnedEventsEventContent { pinned: Vec::new() };
let server_name = server_name!("example.com");
content.pinned.push(EventId::new(server_name));
content.pinned.push(EventId::new(server_name));
let event = StateEvent {
content: content.clone(),
event_id: EventId::new(server_name),
origin_server_ts: MilliSecondsSinceUnixEpoch(1_432_804_485_886_u64.try_into().unwrap()),
prev_content: None,
room_id: RoomId::new(server_name),
sender: UserId::new(server_name),
state_key: "".into(),
unsigned: Unsigned::default(),
};
let serialized_event = serde_json::to_string(&event).unwrap();
let parsed_event: StateEvent<RoomPinnedEventsEventContent> =
serde_json::from_str(&serialized_event).unwrap();
assert_eq!(parsed_event.event_id, event.event_id);
assert_eq!(parsed_event.room_id, event.room_id);
assert_eq!(parsed_event.sender, event.sender);
assert_eq!(parsed_event.state_key, event.state_key);
assert_eq!(parsed_event.origin_server_ts, event.origin_server_ts);
assert_eq!(parsed_event.content.pinned, event.content.pinned);
assert_eq!(parsed_event.content.pinned[0], content.pinned[0]);
assert_eq!(parsed_event.content.pinned[1], content.pinned[1]);<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// vtcombo: a single binary that contains:
// - a ZK topology server based on an in-memory map.
// - one vtgate instance.
// - many vttablet instances.
// - a vtctld instance so it's easy to see the topology.
package main
import (
"flag"
"strings"
"time"
log "github.com/golang/glog"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/vtctld"
"vitess.io/vitess/go/vt/vtgate"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
)
var (
protoTopo = flag.String("proto_topo", "", "vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.")
schemaDir = flag.String("schema_dir", "", "Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary.")
ts *topo.Server
)
func init() {
servenv.RegisterDefaultFlags()
}
func main() {
defer exit.Recover()
// flag parsing
dbconfigFlags := dbconfigs.AppConfig | dbconfigs.AllPrivsConfig | dbconfigs.DbaConfig |
dbconfigs.FilteredConfig | dbconfigs.ReplConfig
dbconfigs.RegisterFlags(dbconfigFlags)
mysqlctl.RegisterFlags()
servenv.ParseFlags("vtcombo")
// parse the input topology
tpb := &vttestpb.VTTestTopology{}
if err := proto.UnmarshalText(*protoTopo, tpb); err != nil {
log.Errorf("cannot parse topology: %v", err)
exit.Return(1)
}
// default cell to "test" if unspecified
if len(tpb.Cells) == 0 {
tpb.Cells = append(tpb.Cells, "test")
}
// set discoverygateway flag to default value
flag.Set("cells_to_watch", strings.Join(tpb.Cells, ","))
// vtctld UI requires the cell flag
flag.Set("cell", tpb.Cells[0])
flag.Set("enable_realtime_stats", "true")
if flag.Lookup("log_dir") == nil {
flag.Set("log_dir", "$VTDATAROOT/tmp")
}
// Create topo server. We use a 'memorytopo' implementation.
ts = memorytopo.NewServer(tpb.Cells...)
servenv.Init()
tabletenv.Init()
// database configs
mycnf, err := mysqlctl.NewMycnfFromFlags(0)
if err != nil {
log.Errorf("mycnf read failed: %v", err)
exit.Return(1)
}
dbcfgs, err := dbconfigs.Init(mycnf.SocketFile, dbconfigFlags)
if err != nil {
log.Warning(err)
}
mysqld := mysqlctl.NewMysqld(mycnf, dbcfgs, dbconfigFlags)
servenv.OnClose(mysqld.Close)
// tablets configuration and init
if err := initTabletMap(ts, tpb, mysqld, *dbcfgs, *schemaDir, mycnf); err != nil {
log.Errorf("initTabletMapProto failed: %v", err)<|fim▁hole|> // vtgate configuration and init
resilientServer := srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer")
healthCheck := discovery.NewHealthCheck(1*time.Millisecond /*retryDelay*/, 1*time.Hour /*healthCheckTimeout*/)
tabletTypesToWait := []topodatapb.TabletType{
topodatapb.TabletType_MASTER,
topodatapb.TabletType_REPLICA,
topodatapb.TabletType_RDONLY,
}
vtgate.QueryLogHandler = "/debug/vtgate/querylog"
vtgate.QueryLogzHandler = "/debug/vtgate/querylogz"
vtgate.QueryzHandler = "/debug/vtgate/queryz"
vtgate.Init(context.Background(), healthCheck, resilientServer, tpb.Cells[0], 2 /*retryCount*/, tabletTypesToWait)
// vtctld configuration and init
vtctld.InitVtctld(ts)
servenv.OnTerm(func() {
// FIXME(alainjobart): stop vtgate
})
servenv.OnClose(func() {
// We will still use the topo server during lameduck period
// to update our state, so closing it in OnClose()
ts.Close()
})
servenv.RunDefault()
}<|fim▁end|> | exit.Return(1)
}
|
<|file_name|>CStaticGeometry.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
*
* PROJECT: Open Faction
* LICENSE: See LICENSE in the top level directory
* FILE: shared/CStaticGeometry.cpp
* PURPOSE: Loading of static geometry in levels
* DEVELOPERS: Rafal Harabien
*
*****************************************************************************/
#include "CStaticGeometry.h"
#include "CLevel.h"
#include "CLevelProperties.h"
#include "CConsole.h"
#include "formats/rfl_format.h"
#include "CGame.h"
#include "CEventsHandler.h"
#ifdef OF_CLIENT
# include "CTextureMgr.h"
# include "CLightmaps.h"
# include "irr/CReadFile.h"
# include "irr/CMeshSkyboxSceneNode.h"
# include "irr/CVbmAnimator.h"
#endif // OF_CLIENT
#include <util/utils.h>
#include <cmath>
#include <cassert>
using namespace std;
#ifdef OF_CLIENT
using namespace irr;
#endif // OF_CLIENT
struct SIndexDesc
{
unsigned idx;
float u, v;
float lm_u, lm_v;
};
struct SFace
{
btVector3 vNormal;
unsigned iTexture;
unsigned iLightmapUnk;
unsigned uFlags;
unsigned iRoom;
unsigned uPortalUnk;
vector<SIndexDesc> Indices;
};
#ifdef OF_CLIENT
struct SIrrRoom
{
SIrrRoom(): pIrrMesh(NULL) {}
scene::SMesh *pIrrMesh;
vector<scene::ISceneNodeAnimator*> Animators;
};
#endif
CRoom::CRoom(CLevel *pLevel, unsigned nId, float fLife, bool bDetail):
CKillableObject(OFET_LEVEL, pLevel, nId),
m_pMesh(NULL), m_pShape(NULL),
m_bDetail(bDetail)
{
m_iColGroup = COL_LEVEL;
m_iColMask = COL_ENTITY;
SetLife(fLife);
}
CRoom::~CRoom()
{
if(m_pShape)
{
assert(m_pColObj);
m_pColObj->setCollisionShape(NULL);
delete m_pShape;
m_pShape = NULL;
}
if(m_pMesh)
delete m_pMesh;
m_pMesh = NULL;
}
void CRoom::Kill(SDamageInfo &DmgInfo)
{
if(!IsAlive())
return; // already dead
m_fLife = 0.0f;
m_pLevel->GetGame()->GetEventsHandler()->OnGlassKill(this, DmgInfo);
RemoveFromWorld();
}
void CStaticGeometry::Load(CLevel *pLevel, CInputBinaryStream &Stream, unsigned nVersion)
{
/* Unload old geometry first */
Unload();
Stream.ignore(nVersion == 0xB4 ? 6 : 10); // unknown
unsigned cTextures = Stream.ReadUInt32();
assert(m_Materials.empty());
std::vector<CString> MaterialNames;
for(unsigned i = 0; i < cTextures; ++i)
{<|fim▁hole|> MaterialNames.push_back(strFilename);
#ifdef OF_CLIENT
CMultiTexture *pMaterial = pLevel->GetGame()->GetTextureMgr()->Load(strFilename);
m_Materials.push_back(pMaterial);
#endif // OF_CLIENT
}
unsigned cScrollAnim = Stream.ReadUInt32();
Stream.ignore(cScrollAnim * 12);
unsigned cRooms = Stream.ReadUInt32();
m_Rooms.reserve(cRooms);
#ifdef OF_CLIENT
std::vector<SIrrRoom> IrrRooms(cRooms);
#endif // OF_CLIENT
unsigned iGlass = 0;
for(unsigned i = 0; i < cRooms; ++i)
{
unsigned nId = Stream.ReadUInt32();
btVector3 vAabb1 = Stream.ReadVector();
btVector3 vAabb2 = Stream.ReadVector();
assert(vAabb1.x() < vAabb2.x() && vAabb1.y() < vAabb2.y() && vAabb1.z() < vAabb2.z());
bool bSkyRoom = Stream.ReadUInt8() ? true : false;
Stream.ignore(3); // is_cold, is_outside, is_airlock
bool bLiquidRoom = Stream.ReadUInt8() ? true : false;
bool bAmbientLight = Stream.ReadUInt8() ? true : false;
bool bDetail = Stream.ReadUInt8() ? true : false;
unsigned Unknown = Stream.ReadUInt8();
assert(Unknown <= 1);
float fLife = Stream.ReadFloat();
assert(fLife >= -1.0f && fLife <= 10000.f);
Stream.ReadString2(); // eax_effect
if(bLiquidRoom)
{
Stream.ignore(8); // liquid_depth, liquid_color
Stream.ReadString2(); // liquid_surface_texture
Stream.ignore(12 + 13 + 12); // liquid_visibility, liquid_type, liquid_alpha, liquid_unknown,
// liquid_waveform, liquid_surface_texture_scroll_u, liquid_surface_texture_scroll_b
}
if(bAmbientLight)
Stream.ignore(4); // ambient_color
CRoom *pRoom = new CRoom(pLevel, nId, fLife, bDetail);
pRoom->m_GlassIndex = pRoom->IsGlass() ? (iGlass++) : 0;
pRoom->m_bSkyRoom = bSkyRoom;
pRoom->m_vCenter = btVector3((vAabb1.x() + vAabb2.x())/2.0f, (vAabb1.y() + vAabb2.y())/2.0f, (vAabb1.z() + vAabb2.z())/2.0f);
m_Rooms.push_back(pRoom);
}
unsigned cUnknown = Stream.ReadUInt32();
if(cRooms != cUnknown)
pLevel->GetGame()->GetConsole()->DbgPrint("Warning! cRooms(%u) != cUnknown(%u)\n", cRooms, cUnknown);
for(unsigned i = 0; i < cUnknown; ++i)
{
Stream.ignore(4); // index
unsigned cUnknown2 = Stream.ReadUInt32(); // links_count
Stream.ignore(cUnknown2 * 4); // links
}
unsigned cUnknown2 = Stream.ReadUInt32();
Stream.ignore(cUnknown2 * 32); // unknown3
unsigned cVertices = Stream.ReadUInt32();
std::vector<btVector3> Vertices(cVertices);
for(unsigned i = 0; i < cVertices; ++i)
Vertices[i] = Stream.ReadVector();
unsigned cFaces = Stream.ReadUInt32();
std::vector<SFace> Faces(cFaces);
//CGame::GetInst().GetSceneMgr()->getParameters()->setAttribute(scene::ALLOW_ZWRITE_ON_TRANSPARENT, true);
for(unsigned i = 0; i < cFaces; ++i)
{
unsigned nPos = (unsigned)Stream.tellg();
SFace &Face = Faces[i];
// Use normal from unknown plane
Face.vNormal = Stream.ReadVector();
float fLen2 = Face.vNormal.length2();
assert(fLen2 > 0.99f && fLen2 < 1.01f);
float fDist = Stream.ReadFloat();
//assert(fDist >= 0.0f);
Face.iTexture = Stream.ReadUInt32();
if(Face.iTexture != 0xFFFFFFFF)
assert(Face.iTexture < cTextures);
Face.iLightmapUnk = Stream.ReadUInt32(); // its used later (lightmap?)
assert(Face.iLightmapUnk == 0xFFFFFFFF || Face.iLightmapUnk < cFaces);
unsigned Unknown3 = Stream.ReadUInt32();
//assert(Unknown3 == i);
unsigned Unknown4 = Stream.ReadUInt32();
assert(Unknown4 == 0xFFFFFFFF);
unsigned Unknown4_2 = Stream.ReadUInt32();
assert(Unknown4_2 == 0xFFFFFFFF);
Face.uPortalUnk = Stream.ReadUInt32(); // its used later (it's not 0 for portals)
Face.uFlags = Stream.ReadUInt8();
if((Face.uFlags & ~(RFL_FF_MASK)))
pLevel->GetGame()->GetConsole()->DbgPrint("Unknown face flags 0x%x\n", Face.uFlags & ~(RFL_FF_MASK));
uint8_t uLightmapRes = Stream.ReadUInt8();
if(uLightmapRes >= 0x22)
pLevel->GetGame()->GetConsole()->DbgPrint("Unknown lightmap resolution 0x%x\n", uLightmapRes);
uint16_t Unk6 = Stream.ReadUInt16();
uint32_t Unk6_2 = Stream.ReadUInt32();
assert(Unk6 == 0);
Face.iRoom = Stream.ReadUInt32();
assert(Face.iRoom < cRooms);
unsigned cFaceVertices = Stream.ReadUInt32();
assert(cFaceVertices >= 3);
//pLevel->GetGame()->GetConsole()->DbgPrint("Face %u vertices: %u, texture: %x (pos: 0x%x)\n", i, cFaceVertices, iTexture, nPos);
if(cFaceVertices > 100)
pLevel->GetGame()->GetConsole()->DbgPrint("Warning! Face %u has %u vertices (level can be corrupted) (pos: 0x%x)\n", i, cFaceVertices, nPos);
Face.Indices.reserve(cFaceVertices);
for(unsigned j = 0; j < cFaceVertices; ++j)
{
SIndexDesc Idx;
Idx.idx = Stream.ReadUInt32();
assert(Idx.idx < cVertices);
Idx.u = Stream.ReadFloat();
Idx.v = Stream.ReadFloat();
if(Face.iLightmapUnk != 0xFFFFFFFF)
{
Idx.lm_u = Stream.ReadFloat();
Idx.lm_v = Stream.ReadFloat();
assert(Idx.lm_u >= 0.0f && Idx.lm_u <= 1.0f);
assert(Idx.lm_v >= 0.0f && Idx.lm_v <= 1.0f);
}
Face.Indices.push_back(Idx);
}
}
unsigned cLightmapVertices = Stream.ReadUInt32();
#ifdef OF_CLIENT
std::vector<unsigned> LightmapVertices(cLightmapVertices);
#endif // OF_CLIENT
for(unsigned i = 0; i < cLightmapVertices; ++i)
{
unsigned iLightmap = Stream.ReadUInt32();
#ifdef OF_CLIENT
assert(iLightmap < pLevel->GetLightmaps()->GetCount());
LightmapVertices[i] = iLightmap;
#endif // OF_CLIENT
Stream.ignore(92); // unknown4
}
if(nVersion == 0xB4)
Stream.ignore(4); // unknown5
for(unsigned i = 0; i < cFaces; ++i)
{
SFace &Face = Faces[i];
CRoom *pRoom = m_Rooms[Face.iRoom];
CMultiTexture *pMaterial = (Face.iTexture != 0xFFFFFFFF) ? m_Materials[Face.iTexture] : NULL;
const CString &MaterialName = (Face.iTexture != 0xFFFFFFFF) ? MaterialNames[Face.iTexture] : "";
bool bIsTransparent = (Face.uPortalUnk != 0) || (Face.uFlags & RFL_FF_SHOW_SKY) || IsInvisibleLevelTexture(MaterialName);
#ifdef OF_CLIENT
video::ITexture *pTexture = NULL, *pLightmapTexture = NULL;
scene::SMeshBufferLightMap *pIrrBuf;
unsigned iBuf, nFirstSubmeshIndex;
if(!bIsTransparent)
{
// Get texture pointer
if(pMaterial)
pTexture = pMaterial->GetFrame(0);
if(Face.iLightmapUnk != 0xFFFFFFFF)
{
unsigned iLightmap = LightmapVertices[Face.iLightmapUnk];
pLightmapTexture = pLevel->GetLightmaps()->Get(iLightmap);
}
if(!IrrRooms[Face.iRoom].pIrrMesh)
{
IrrRooms[Face.iRoom].pIrrMesh = new scene::SMesh;
assert(IrrRooms[Face.iRoom].pIrrMesh->getMeshBufferCount() == 0);
}
// Find buffer for specified texture
for(iBuf = 0; iBuf < IrrRooms[Face.iRoom].pIrrMesh->getMeshBufferCount(); ++iBuf)
{
pIrrBuf = (scene::SMeshBufferLightMap*)IrrRooms[Face.iRoom].pIrrMesh->getMeshBuffer(iBuf);
if(pIrrBuf->Material.getTexture(0) == pTexture &&
pIrrBuf->Material.getTexture(1) == pLightmapTexture)
{
break;
}
}
if(iBuf == IrrRooms[Face.iRoom].pIrrMesh->getMeshBufferCount())
{
// Create new buffer and prepare it
pIrrBuf = new scene::SMeshBufferLightMap;
pIrrBuf->setHardwareMappingHint(scene::EHM_STATIC);
// Setup material flags
pIrrBuf->Material.setFlag(video::EMF_FOG_ENABLE, pLevel->GetProperties()->IsFogEnabled());
pIrrBuf->Material.setFlag(video::EMF_LIGHTING, false);
// Setup textures
pIrrBuf->Material.setTexture(0, pTexture);
pIrrBuf->Material.setTexture(1, pLightmapTexture);
if(pRoom->IsDetail() && pMaterial && pMaterial->HasAlpha())
pIrrBuf->Material.MaterialType = video::EMT_TRANSPARENT_ALPHA_CHANNEL;
else if(pLightmapTexture && !pRoom->m_bSkyRoom)
pIrrBuf->Material.MaterialType = video::EMT_LIGHTMAP_M2;
if(pMaterial && pMaterial->IsAnimated())
{
scene::ISceneNodeAnimator *pAnim = pMaterial->CreateAnimator(iBuf);
if(pAnim)
IrrRooms[Face.iRoom].Animators.push_back(pAnim);
}
// Add buffer to the mesh
IrrRooms[Face.iRoom].pIrrMesh->addMeshBuffer(pIrrBuf);
pIrrBuf->drop();
}
// Reallocate buffers to speed up things
pIrrBuf->Vertices.reallocate(pIrrBuf->Vertices.size() + Face.Indices.size());
pIrrBuf->Indices.reallocate(pIrrBuf->Indices.size() + (Face.Indices.size() - 3 + 1) * 3);
// First index points to first vertex
nFirstSubmeshIndex = pIrrBuf->Vertices.size();
}
#endif // OF_CLIENT
unsigned nFirstIndex, nPrevIndex;
for(unsigned j = 0; j < Face.Indices.size(); ++j)
{
SIndexDesc &Idx = Face.Indices[j];
if(!bIsTransparent)
{
if(!pRoom->m_bSkyRoom)
{
if(j == 0)
nFirstIndex = Idx.idx;
else if(j >= 2)
{
if(!pRoom->m_pMesh)
{
pRoom->m_pMesh = new btTriangleMesh();
pRoom->m_pMesh->preallocateVertices(Face.Indices.size());
pRoom->m_pMesh->preallocateIndices((Face.Indices.size() - 3 + 1) * 3);
}
pRoom->m_pMesh->addTriangle(Vertices[nFirstIndex] - pRoom->m_vCenter, Vertices[nPrevIndex] - pRoom->m_vCenter, Vertices[Idx.idx] - pRoom->m_vCenter);
}
}
#ifdef OF_CLIENT
btVector3 vPos = Vertices[Idx.idx] - pRoom->m_vCenter;
video::S3DVertex2TCoords Vertex;
Vertex.Pos.X = vPos.x();
Vertex.Pos.Y = vPos.y();
Vertex.Pos.Z = vPos.z();
Vertex.Normal.X = Face.vNormal.x();
Vertex.Normal.Y = Face.vNormal.y();
Vertex.Normal.Z = Face.vNormal.z();
Vertex.TCoords.X = Idx.u;
Vertex.TCoords.Y = Idx.v;
Vertex.TCoords2.X = Idx.lm_u;
Vertex.TCoords2.Y = Idx.lm_v;
Vertex.Color = video::SColor(255, 255, 255, 255);
pIrrBuf->Vertices.push_back(Vertex);
if(j >= 2)
{
pIrrBuf->Indices.push_back(nFirstSubmeshIndex);
pIrrBuf->Indices.push_back(pIrrBuf->Vertices.size() - 2);
pIrrBuf->Indices.push_back(pIrrBuf->Vertices.size() - 1);
}
#endif // OF_CLIENT
}
nPrevIndex = Idx.idx;
}
}
for(unsigned i = 0; i < m_Rooms.size(); ++i)
{
CRoom *pRoom = m_Rooms[i];
if(pRoom->m_pMesh)
{
pRoom->m_pShape = new btBvhTriangleMeshShape(pRoom->m_pMesh, true);
btRigidBody::btRigidBodyConstructionInfo ConstructionInfo(0.0f, &pRoom->m_MotionState, pRoom->m_pShape);
ConstructionInfo.m_friction = 10.0f;
ConstructionInfo.m_restitution = 1.0f;
pRoom->m_pColObj = new btRigidBody(ConstructionInfo);
pRoom->m_pColObj->setUserPointer(pRoom);
pRoom->m_pColObj->setCollisionFlags(btCollisionObject::CF_STATIC_OBJECT);
pRoom->m_pColObj->getWorldTransform().setOrigin(pRoom->m_vCenter);
pRoom->AddToWorld();
}
#ifdef OF_CLIENT
if(IrrRooms[i].pIrrMesh)
{
// Update bounding boxes
for(unsigned iBuf = 0; iBuf < IrrRooms[i].pIrrMesh->getMeshBufferCount(); ++iBuf)
IrrRooms[i].pIrrMesh->getMeshBuffer(iBuf)->recalculateBoundingBox();
IrrRooms[i].pIrrMesh->recalculateBoundingBox();
// Create scene node
if(pRoom->m_bSkyRoom)
{
scene::ISceneManager *pSM = pLevel->GetGame()->GetSceneMgr();
pRoom->m_pSceneNode = new scene::CMeshSkyboxSceneNode(IrrRooms[i].pIrrMesh, pSM->getRootSceneNode(), pSM, -1);
pRoom->m_pSceneNode->drop(); // drop it because smgr owns it now
}
else
{
pRoom->m_pSceneNode = pLevel->GetGame()->GetSceneMgr()->addOctreeSceneNode(IrrRooms[i].pIrrMesh);
pRoom->m_pSceneNode->setPosition(core::vector3df(pRoom->m_vCenter.x(), pRoom->m_vCenter.y(), pRoom->m_vCenter.z()));
}
// Setup animators
for(unsigned iAnim = 0; iAnim < IrrRooms[i].Animators.size(); ++iAnim)
{
pRoom->m_pSceneNode->addAnimator(IrrRooms[i].Animators[iAnim]);
IrrRooms[i].Animators[iAnim]->drop(); // Scene node owns animators
}
IrrRooms[i].Animators.clear();
} else
assert(IrrRooms[i].Animators.empty());
#endif // OF_CLIENT
}
pLevel->GetGame()->GetConsole()->DbgPrint("Loaded geometry: %u textures, %u rooms, %u vertices, %u faces\n", cTextures, cRooms, cVertices, cFaces);
}
void CStaticGeometry::Unload()
{
// Cleanup rooms - they are deleted by CLevel
m_Rooms.clear();
#ifdef OF_CLIENT
// Cleanup materials
for(unsigned i = 0; i < m_Materials.size(); ++i)
m_Materials[i]->Release();
m_Materials.clear();
#endif // OF_CLIENT
}<|fim▁end|> | CString strFilename = Stream.ReadString2(); |
<|file_name|>test_repeatable.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import pytest
from parglare import Parser, Grammar
from parglare.exceptions import GrammarError, ParseError, RRConflicts
def test_repeatable_zero_or_more():
"""
Tests zero or more repeatable operator.
"""
grammar = """
S: "2" b* "3";
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0')
assert g.get_nonterminal('b_1')
p = Parser(g)
input_str = '2 1 1 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", [], "3"]
def test_repeatable_zero_or_more_with_separator():
"""
Tests zero or more repeatable operator with separator.
"""
grammar = """
S: "2" b*[comma] "3";
terminals
b: "1";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
p = Parser(g)
input_str = '2 1, 1 , 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", [], "3"]
def test_repeatable_one_or_more():
"""
Tests one or more repeatable operator.
"""
grammar = """
S: "2" b+ "3";
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_1')
p = Parser(g)
input_str = '2 1 1 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
with pytest.raises(ParseError) as e:
result = p.parse(input_str)
assert 'Expected: b' in str(e.value)
def test_repeatable_one_or_more_with_separator():
"""
Tests one or more repeatable operator with separator.
"""
grammar = """
S: "2" b+[comma] "3";
terminals
b: "1";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_1_comma')
p = Parser(g)
input_str = '2 1, 1 , 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
with pytest.raises(ParseError) as e:
p.parse(input_str)<|fim▁hole|>
def test_optional():
"""
Tests optional operator.
"""
grammar = """
S: "2" b? "3"?;
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_opt')
p = Parser(g)
input_str = '2 1 3'
result = p.parse(input_str)
assert result == ["2", "1", "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", None, "3"]
input_str = '2 1'
result = p.parse(input_str)
assert result == ["2", "1", None]
input_str = ' 1 3'
with pytest.raises(ParseError) as e:
p.parse(input_str)
assert 'Expected: 2' in str(e.value)
def test_optional_no_modifiers():
"""
Tests that optional operator doesn't allow modifiers.
"""
grammar = """
S: "2" b?[comma] "3"?;
terminals
b: "1";
comma: ",";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert "Repetition modifier not allowed" in str(e.value)
def test_multiple_repetition_operators():
"""
Test using of multiple repetition operators.
"""
grammar = """
S: "2" b*[comma] c+ "3"?;
terminals
b: "b";
c: "c";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
assert g.get_nonterminal('c_1')
p = Parser(g)
input_str = '2 b, b c 3'
result = p.parse(input_str)
assert result == ["2", ["b", "b"], ["c"], "3"]
def test_repetition_operator_many_times_same():
"""
Test using the same repetition operator multiple times.
"""
grammar = """
S: "2" b*[comma] "3"? b*[comma];
terminals
b: "b";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
p = Parser(g)
input_str = '2 b 3 b, b'
result = p.parse(input_str)
assert result == ["2", ["b"], "3", ["b", "b"]]
def test_repeatable_one_zero_rr_conflicts():
"""
Check that translations of B+ and B* don't produce R/R conflict.
"""
grammar = """
S: A B+ C;
S: A B* D;
terminals
A:; B:; C:; D:;
"""
g = Grammar.from_string(grammar, _no_check_recognizers=True)
# Check if parser construction raises exception
try:
Parser(g)
except RRConflicts:
pytest.fail("R/R conflicts not expected here.")<|fim▁end|> | assert 'Expected: b' in str(e.value)
|
<|file_name|>batch.rs<|end_file_name|><|fim▁begin|>//! Module providing write batches
use leveldb_sys::*;
use libc::{c_char, size_t, c_void};
use std::slice;
use options::{WriteOptions, c_writeoptions};
use super::error::Error;
use std::ptr;
use super::Database;
#[allow(missing_docs)]
struct RawWritebatch {
ptr: *mut leveldb_writebatch_t,
}
impl Drop for RawWritebatch {
fn drop(&mut self) {<|fim▁hole|> leveldb_writebatch_destroy(self.ptr);
}
}
}
#[allow(missing_docs)]
pub struct Writebatch {
#[allow(dead_code)]
writebatch: RawWritebatch,
}
/// Batch access to the database
pub trait Batch {
/// Write a batch to the database, ensuring success for all items or an error
fn write(&self, options: WriteOptions, batch: &Writebatch) -> Result<(), Error>;
}
impl Batch for Database {
fn write(&self, options: WriteOptions, batch: &Writebatch) -> Result<(), Error> {
unsafe {
let mut error = ptr::null_mut();
let c_writeoptions = c_writeoptions(options);
leveldb_write(self.database.ptr,
c_writeoptions,
batch.writebatch.ptr,
&mut error);
leveldb_writeoptions_destroy(c_writeoptions);
if error == ptr::null_mut() {
Ok(())
} else {
Err(Error::new_from_i8(error))
}
}
}
}
impl Writebatch {
/// Create a new writebatch
pub fn new() -> Writebatch {
let ptr = unsafe { leveldb_writebatch_create() };
let raw = RawWritebatch { ptr: ptr };
Writebatch { writebatch: raw }
}
/// Clear the writebatch
pub fn clear(&mut self) {
unsafe { leveldb_writebatch_clear(self.writebatch.ptr) };
}
/// Batch a put operation
pub fn put<K: AsRef<[u8]>>(&mut self, key: K, value: &[u8]) {
unsafe {
let k = key.as_ref();
leveldb_writebatch_put(self.writebatch.ptr,
k.as_ptr() as *mut c_char,
k.len() as size_t,
value.as_ptr() as *mut c_char,
value.len() as size_t);
}
}
/// Batch a delete operation
pub fn delete<K: AsRef<[u8]>>(&mut self, key: K) {
unsafe {
let k = key.as_ref();
leveldb_writebatch_delete(self.writebatch.ptr,
k.as_ptr() as *mut c_char,
k.len() as size_t);
}
}
/// Iterate over the writebatch, returning the resulting iterator
pub fn iterate<T: WritebatchIterator>(&mut self, iterator: Box<T>) -> Box<T> {
use std::mem;
unsafe {
let mem = mem::transmute(iterator);
leveldb_writebatch_iterate(self.writebatch.ptr,
mem,
put_callback::<T>,
deleted_callback::<T>);
mem::transmute(mem)
}
}
}
/// A trait for iterators to iterate over written batches and check their validity.
pub trait WritebatchIterator {
/// Callback for put items
fn put(&mut self, key: &[u8], value: &[u8]);
/// Callback for deleted items
fn deleted(&mut self, key: &[u8]);
}
extern "C" fn put_callback<T: WritebatchIterator>(state: *mut c_void,
key: *const i8,
keylen: size_t,
val: *const i8,
vallen: size_t) {
unsafe {
let iter: &mut T = &mut *(state as *mut T);
let key_slice = slice::from_raw_parts::<u8>(key as *const u8, keylen as usize);
let val_slice = slice::from_raw_parts::<u8>(val as *const u8, vallen as usize);
let k = key_slice;
iter.put(k, val_slice);
}
}
extern "C" fn deleted_callback<T: WritebatchIterator>(state: *mut c_void,
key: *const i8,
keylen: size_t) {
unsafe {
let iter: &mut T = &mut *(state as *mut T);
let key_slice = slice::from_raw_parts::<u8>(key as *const u8, keylen as usize);
let k = key_slice;
iter.deleted(k);
}
}<|fim▁end|> | unsafe { |
<|file_name|>manager.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.v2_0 import client
from keystoneclient.v3 import client as keystoneclient_v3
from keystoneclient.auth import token_endpoint
from keystoneclient import session, exceptions
from charmhelpers.core.decorators import retry_on_exception
# Early versions of keystoneclient lib do not have an explicit
# ConnectionRefused
if hasattr(exceptions, 'ConnectionRefused'):
econnrefused = exceptions.ConnectionRefused
else:
econnrefused = exceptions.ConnectionError
def _get_keystone_manager_class(endpoint, token, api_version):
"""Return KeystoneManager class for the given API version
@param endpoint: the keystone endpoint to point client at
@param token: the keystone admin_token
@param api_version: version of the keystone api the client should use
@returns keystonemanager class used for interrogating keystone
"""
if api_version == 2:
return KeystoneManager2(endpoint, token)
if api_version == 3:
return KeystoneManager3(endpoint, token)
raise ValueError('No manager found for api version {}'.format(api_version))
@retry_on_exception(5, base_delay=3, exc_type=econnrefused)
def get_keystone_manager(endpoint, token, api_version=None):
"""Return a keystonemanager for the correct API version
If api_version has not been set then create a manager based on the endpoint
Use this manager to query the catalogue and determine which api version
should actually be being used. Return the correct client based on that.
Function is wrapped in a retry_on_exception to catch the case where the
keystone service is still initialising and not responding to requests yet.
XXX I think the keystone client should be able to do version
detection automatically so the code below could be greatly
simplified
@param endpoint: the keystone endpoint to point client at
@param token: the keystone admin_token
@param api_version: version of the keystone api the client should use
@returns keystonemanager class used for interrogating keystone
"""
if api_version:
return _get_keystone_manager_class(endpoint, token, api_version)
else:
if 'v2.0' in endpoint.split('/'):
manager = _get_keystone_manager_class(endpoint, token, 2)
else:
manager = _get_keystone_manager_class(endpoint, token, 3)
if endpoint.endswith('/'):
base_ep = endpoint.rsplit('/', 2)[0]
else:
base_ep = endpoint.rsplit('/', 1)[0]
svc_id = None
for svc in manager.api.services.list():
if svc.type == 'identity':
svc_id = svc.id
version = None
for ep in manager.api.endpoints.list():
if ep.service_id == svc_id and hasattr(ep, 'adminurl'):
version = ep.adminurl.split('/')[-1]
if version and version == 'v2.0':
new_ep = base_ep + "/" + 'v2.0'
return _get_keystone_manager_class(new_ep, token, 2)
elif version and version == 'v3':
new_ep = base_ep + "/" + 'v3'
return _get_keystone_manager_class(new_ep, token, 3)
else:
return manager
class KeystoneManager(object):
def resolve_domain_id(self, name):
pass
def resolve_role_id(self, name):
"""Find the role_id of a given role"""
roles = [r._info for r in self.api.roles.list()]
for r in roles:
if name.lower() == r['name'].lower():
return r['id']
def resolve_service_id(self, name, service_type=None):
"""Find the service_id of a given service"""
services = [s._info for s in self.api.services.list()]
for s in services:
if service_type:
if (name.lower() == s['name'].lower() and
service_type == s['type']):
return s['id']
else:
if name.lower() == s['name'].lower():
return s['id']
def resolve_service_id_by_type(self, type):
"""Find the service_id of a given service"""
services = [s._info for s in self.api.services.list()]
for s in services:
if type == s['type']:
return s['id']
class KeystoneManager2(KeystoneManager):
def __init__(self, endpoint, token):
self.api_version = 2
self.api = client.Client(endpoint=endpoint, token=token)
def resolve_user_id(self, name, user_domain=None):
"""Find the user_id of a given user"""
users = [u._info for u in self.api.users.list()]
for u in users:
if name.lower() == u['name'].lower():
return u['id']
def create_endpoints(self, region, service_id, publicurl, adminurl,
internalurl):
self.api.endpoints.create(region=region, service_id=service_id,
publicurl=publicurl, adminurl=adminurl,
internalurl=internalurl)
def tenants_list(self):
return self.api.tenants.list()
def resolve_tenant_id(self, name, domain=None):
"""Find the tenant_id of a given tenant"""
tenants = [t._info for t in self.api.tenants.list()]
for t in tenants:
if name.lower() == t['name'].lower():
return t['id']
def create_tenant(self, tenant_name, description, domain='default'):
self.api.tenants.create(tenant_name=tenant_name,
description=description)
def delete_tenant(self, tenant_id):
self.api.tenants.delete(tenant_id)
def create_user(self, name, password, email, tenant_id=None,
domain_id=None):
self.api.users.create(name=name,
password=password,
email=email,
tenant_id=tenant_id)
def update_password(self, user, password):
self.api.users.update_password(user=user, password=password)
def roles_for_user(self, user_id, tenant_id=None, domain_id=None):
return self.api.roles.roles_for_user(user_id, tenant_id)
def add_user_role(self, user, role, tenant, domain):
self.api.roles.add_user_role(user=user, role=role, tenant=tenant)
class KeystoneManager3(KeystoneManager):
def __init__(self, endpoint, token):
self.api_version = 3
keystone_auth_v3 = token_endpoint.Token(endpoint=endpoint, token=token)
keystone_session_v3 = session.Session(auth=keystone_auth_v3)
self.api = keystoneclient_v3.Client(session=keystone_session_v3)
def resolve_tenant_id(self, name, domain=None):
"""Find the tenant_id of a given tenant"""
if domain:
domain_id = self.resolve_domain_id(domain)
tenants = [t._info for t in self.api.projects.list()]
for t in tenants:
if name.lower() == t['name'].lower() and \
(domain is None or t['domain_id'] == domain_id):
return t['id']
def resolve_domain_id(self, name):
"""Find the domain_id of a given domain"""
domains = [d._info for d in self.api.domains.list()]
for d in domains:
if name.lower() == d['name'].lower():
return d['id']
def resolve_user_id(self, name, user_domain=None):
"""Find the user_id of a given user"""
domain_id = None
if user_domain:
domain_id = self.resolve_domain_id(user_domain)
for user in self.api.users.list(domain=domain_id):
if name.lower() == user.name.lower():
if user_domain:
if domain_id == user.domain_id:
return user.id
else:
return user.id
def create_endpoints(self, region, service_id, publicurl, adminurl,
internalurl):
self.api.endpoints.create(service_id, publicurl, interface='public',
region=region)
self.api.endpoints.create(service_id, adminurl, interface='admin',
region=region)
self.api.endpoints.create(service_id, internalurl,
interface='internal', region=region)
def tenants_list(self):
return self.api.projects.list()
def create_domain(self, domain_name, description):
self.api.domains.create(domain_name, description=description)
def create_tenant(self, tenant_name, description, domain='default'):
domain_id = self.resolve_domain_id(domain)
self.api.projects.create(tenant_name, domain_id,
description=description)
def delete_tenant(self, tenant_id):
self.api.projects.delete(tenant_id)
def create_user(self, name, password, email, tenant_id=None,
domain_id=None):
if not domain_id:
domain_id = self.resolve_domain_id('default')
if tenant_id:
self.api.users.create(name,
domain=domain_id,
password=password,
email=email,
project=tenant_id)
else:
self.api.users.create(name,
domain=domain_id,
password=password,
email=email)
def update_password(self, user, password):
self.api.users.update(user, password=password)
def roles_for_user(self, user_id, tenant_id=None, domain_id=None):
# Specify either a domain or project, not both
if domain_id:
return self.api.roles.list(user_id, domain=domain_id)
else:
return self.api.roles.list(user_id, project=tenant_id)
def add_user_role(self, user, role, tenant, domain):
# Specify either a domain or project, not both
if domain:
self.api.roles.grant(role, user=user, domain=domain)
if tenant:
self.api.roles.grant(role, user=user, project=tenant)
def find_endpoint_v3(self, interface, service_id, region):
found_eps = []
for ep in self.api.endpoints.list():
if ep.service_id == service_id and ep.region == region and \
ep.interface == interface:
found_eps.append(ep)
return found_eps
def delete_old_endpoint_v3(self, interface, service_id, region, url):
eps = self.find_endpoint_v3(interface, service_id, region)
for ep in eps:
if getattr(ep, 'url') != url:
self.api.endpoints.delete(ep.id)
return True<|fim▁hole|><|fim▁end|> | return False |
<|file_name|>issue-84268.rs<|end_file_name|><|fim▁begin|>// compile-flags: -O --crate-type=rlib
#![feature(platform_intrinsics, repr_simd)]
extern "platform-intrinsic" {
fn simd_fabs<T>(x: T) -> T;
fn simd_eq<T, U>(x: T, y: T) -> U;
}
#[repr(simd)]
pub struct V([f32; 4]);
<|fim▁hole|>pub struct M([i32; 4]);
#[no_mangle]
// CHECK-LABEL: @is_infinite
pub fn is_infinite(v: V) -> M {
// CHECK: fabs
// CHECK: cmp oeq
unsafe {
simd_eq(simd_fabs(v), V([f32::INFINITY; 4]))
}
}<|fim▁end|> | #[repr(simd)] |
<|file_name|>public_api.ts<|end_file_name|><|fim▁begin|>export {FormlyJigsawHeaderModule} from './header.module';<|fim▁hole|><|fim▁end|> | export {FormlyFieldHeader} from './header.type'; |
<|file_name|>user.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import logging
from taskflow.patterns import linear_flow
from pumphouse import exceptions
from pumphouse import events
from pumphouse import task
LOG = logging.getLogger(__name__)
class RetrieveUser(task.BaseCloudTask):
def execute(self, user_id):
user = self.cloud.keystone.users.get(user_id)
self.cloud.identity.fetch(user.id)
return user.to_dict()
class EnsureUser(task.BaseCloudTask):
def execute(self, user_info, tenant_info):
try:
user = self.cloud.keystone.users.find(name=user_info["name"])
# TODO(akscram): Current password should be replaced by temporary.
except exceptions.keystone_excs.NotFound:
user = self.cloud.keystone.users.create(
name=user_info["name"],
# TODO(akscram): Here we should generate a temporary
# password for the user and use them
# along the migration process.
# The RepairUserPasswords should repair
# original after all operations.
password="default",
email=user_info["email"],
tenant_id=tenant_info["id"] if tenant_info else None,
enabled=user_info["enabled"],
)
self.created_event(user)
return user.to_dict()
def created_event(self, user):
LOG.info("Created user: %s", user)
events.emit("create", {
"id": user.id,
"type": "user",
"cloud": self.cloud.name,
"data": user.to_dict(),
}, namespace="/events")
class EnsureOrphanUser(EnsureUser):
def execute(self, user_info):
super(EnsureOrphanUser, self).execute(user_info, None)
class EnsureUserRole(task.BaseCloudTask):
def execute(self, user_info, role_info, tenant_info):
try:
self.cloud.keystone.tenants.add_user(tenant_info["id"],
user_info["id"],
role_info["id"])
except exceptions.keystone_excs.Conflict:
pass
else:
self.role_assigned_event(role_info, user_info, tenant_info)
return user_info
def role_assigned_event(self, role_info, user_info, tenant_info):
LOG.info("Created role %s assignment for user %s in tenant %s",
role_info["id"], user_info["id"], tenant_info["id"])
def migrate_membership(context, user_id, role_id, tenant_id):
user_ensure = "user-{}-ensure".format(user_id)
role_ensure = "role-{}-ensure".format(role_id)
tenant_ensure = "tenant-{}-ensure".format(tenant_id)
user_role_ensure = "user-role-{}-{}-{}-ensure".format(user_id, role_id,
tenant_id)
task = EnsureUserRole(context.dst_cloud,
name=user_role_ensure,
provides=user_role_ensure,
rebind=[user_ensure, role_ensure,
tenant_ensure])
context.store[user_role_ensure] = user_role_ensure
return task<|fim▁hole|>
def migrate_user(context, user_id, tenant_id=None):
user_binding = "user-{}".format(user_id)
user_retrieve = "{}-retrieve".format(user_binding)
user_ensure = "{}-ensure".format(user_binding)
flow = linear_flow.Flow("migrate-user-{}".format(user_id))
flow.add(RetrieveUser(context.src_cloud,
name=user_binding,
provides=user_binding,
rebind=[user_retrieve]))
if tenant_id is not None:
tenant_ensure = "tenant-{}-ensure".format(tenant_id)
flow.add(EnsureUser(context.dst_cloud,
name=user_ensure,
provides=user_ensure,
rebind=[user_binding, tenant_ensure]))
else:
flow.add(EnsureUser(context.dst_cloud,
name=user_ensure,
provides=user_ensure,
rebind=[user_binding],
inject={"tenant_info": None}))
context.store[user_retrieve] = user_id
return flow<|fim▁end|> | |
<|file_name|>brush.rs<|end_file_name|><|fim▁begin|>use crate::prelude::*;
/// A `Brush` defines the fill pattern of shapes.
/// The syntax allows to express fill patterns in several ways:
///
/// * solid colors
/// * colors with alpha channel
/// * gradients of colors
/// * gradients with directions
/// * gradients with angles
///
/// The string declaration of a `Brush` is composed combining the following
/// syntax elements:
///
/// 1. The `color name`
/// 2. The `gradient` string
/// * the gradient type (linear, repeating-linear)
/// * gradient attributes (direction-identifier, angles, color names )
///
/// ## Examples
/// Here are some implementations with declarations of colors, degrees, orientations and directions.
///
/// ```text
/// .foreground("white")
/// .background("black")
/// .background("linear-gradient(0deg, #4b6cb7, #182848)")
/// .background("repeating-linear-gradient(0.25turn, rgba(255, 255, 0, 0.6), dodgerblue, deepskyblue)")
/// .background("linear-gradient(-90deg, hsv(201, 94%, 80.5%), steelblue)")
/// .background("linear-gradient(to top right, white, skyblue 60%, lightskyblue 80%, yellow 83%, yellow)")
/// ```
/// Read on to see how the syntax is composed.
///
/// ## Definition of a color name
/// With the given implementation you can choose between three methods
/// to define a color.
///
/// A. `color codes`
///
/// You can define the value of a color with a symbol "#" followed
/// by letters or numbers. These numbers are in hexadecimal numeral system.
/// The short variant will use 3 numbers , the long variant will use 6
/// numbers.
/// For example `#f00` will give you red. If you write `#0000ff`, you will
/// get blue.<|fim▁hole|>/// To include an alpha channel, the short variant takes 4 numbers.
/// If you need a yellow with 50.2% opaque, you use `#ff08`.
/// In the long form you need 8 numbers. `#0000ff80` represents 50.2% opaque
/// (non-premultiplied) blue.
///
/// B. `color function`
///
/// Currently the unique available functions that interpret a color are
/// distincted with the keywords `rgb`, `hsv`, `hsb`, `hsl`. There are
/// `alpha variants` as well. `hsb` is an alias to `hsv`.
/// Alpha variants are coded with the keywords `rgba`, `abgr` or `argb`.
/// Here is an example to define a color via the function method:
/// `hsl(197, 71%, 73%)` will provide you a pretty skyblue color.
/// For `rgb` and `rgba` the range of the values are 0-255.
/// Any other keyword will use floating point integers to define the color
/// value. `hsva(0.0-360.0, 0.0-1.0, 0.0-1.0, 0.0-1.0)` is such an example.
/// In addition you can choose to use percent values (`%` sign) for the given
/// parameters.
/// When appending the `%` sign to the range parameters of the `rgb` function
/// call, the values are mapped to 0.0-100.0 (percent) or 0.0-1.0 (min/max).
/// For all other keywords (`hsv`, `hsb`, `hsl`) you are not allowed to append
/// the percent sign to the first parameter. If you append `%` to the following
/// parameters, OrbTk will interpret the values in a range between `0.0-100.0`.
///
/// C. `color name`
///
/// **WIP: The given implementation is using (utils/colors.txt). This has to be adopted!!!**
///
/// OrbTk maintains a list of color names as constants. You may
/// directly choose their string value inside the code.
///
/// Example color names are:
///
/// * COLOR_WHITE
/// * COLOR_RED
/// * COLOR_OLIVE
/// * COLOR_LINK_WATER
/// * COLOR_SLATE_GRAY
///
/// ## Definition of a gradient
/// The syntax of a gradient definition is structured as follows:
///
/// * Optional parameters are inside brackets (`[]`).
/// * Within braces (`{}`) you define the appropriate parameter value.
/// * The pipe (`|`) is offering mutual exclusive variants
/// e.g: degrees(deg), radians(rad) or turns(turn).
/// * Three points (`...`) refer to multiple stops.
/// They are respected when a gradient is rendered.
///
/// To understand gradient directions, imagine a line or vector that
/// starts at a given point inside the entity and points to an
/// imaginary target point within the same entity. Gradients will be
/// rendered along the choosen direction to reach its target
/// poing. Supported directions are:
///
/// * "to bottom"
/// * "to bottom left"
/// * "to bottom right"
/// * "to left"
/// * "to right"
/// * "to top
/// * "to top left"
/// * "to top right"
///
/// Displacement points tell the gradient algorithm to add
/// (`positive`) or or substract (`negative`) the given pixel numbers
/// from the original starting point.
///
/// Lets look at some examples. The first one shows the
/// structure of an angled gradient
///
/// ```text
/// [repeating-]linear-gradient({Gradient-angle}{deg|rad|turn}, ...) [{X Displacement}px {Y Displacement}px], {Color} [{Stop position}{%|px}]
/// ```
///
/// The next example shows the structure of a gradient that will be
/// rendered in a given direction
///
/// ```text
/// [repeating-]linear-gradient({direction-identifier}, {initial color-name}, {terminating color-name}
/// ```
///
//#[cfg(feature = "nightly")]
//#[doc(include = "../colors.md")]
#[derive(Clone, PartialEq, Debug)]
pub enum Brush {
/// Paints an area with a solid color.
SolidColor(Color),
/// Paints an area with a gradient.
Gradient(Gradient),
}
impl Brush {
pub fn is_transparent(&self) -> bool {
match self {
Brush::SolidColor(color) => color.a() == 0,
_ => false,
}
}
}
impl From<Brush> for Color {
fn from(b: Brush) -> Color {
match b {
Brush::SolidColor(color) => color,
_ => Color::rgb(0, 0, 0),
}
}
}
impl From<Brush> for Gradient {
fn from(b: Brush) -> Gradient {
match b {
Brush::Gradient(g) => g,
_ => Gradient::default(),
}
}
}
impl Default for Brush {
fn default() -> Self {
Brush::SolidColor(Color::rgba(0, 0, 0, 0))
}
}
impl From<Color> for Brush {
fn from(c: Color) -> Brush {
Brush::SolidColor(c)
}
}
impl From<Gradient> for Brush {
fn from(g: Gradient) -> Brush {
Brush::Gradient(g)
}
}
impl From<&str> for Brush {
fn from(s: &str) -> Brush {
Expression::from(s).brush().unwrap_or_default()
}
}
impl From<String> for Brush {
fn from(s: String) -> Brush {
Self::from(&s[..])
}
}
impl From<Value> for Brush {
fn from(v: Value) -> Self {
let value = v.get::<String>();
Brush::from(value)
}
}
#[cfg(test)]
mod tests {
// use crate::prelude::*;
// todo: tbd after brush struct is finished
}<|fim▁end|> | |
<|file_name|>kendo.culture.kok.js<|end_file_name|><|fim▁begin|>/*
* Kendo UI v2015.1.408 (http://www.telerik.com/kendo-ui)<|fim▁hole|>* Copyright 2015 Telerik AD. All rights reserved.
*
* Kendo UI commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-complete
* If you do not own a commercial license, this file shall be governed by the trial license terms.
*/
(function(f, define){
define([], f);
})(function(){
(function( window, undefined ) {
var kendo = window.kendo || (window.kendo = { cultures: {} });
kendo.cultures["kok"] = {
name: "kok",
numberFormat: {
pattern: ["-n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3,2],
percent: {
pattern: ["-n %","n %"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3,2],
symbol: "%"
},
currency: {
pattern: ["$ -n","$ n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3,2],
symbol: "₹"
}
},
calendars: {
standard: {
days: {
names: ["आयतार","सोमार","मंगळार","बुधवार","बिरेस्तार","सुक्रार","शेनवार"],
namesAbbr: ["आय.","सोम.","मंगळ.","बुध.","बिरे.","सुक्र.","शेन."],
namesShort: ["आ","स","म","ब","ब","स","श"]
},
months: {
names: ["जानेवारी","फेब्रुवारी","मार्च","एप्रिल","मे","जून","जुलै","ऑगस्ट","सप्टेंबर","ऑक्टोबर","नोवेम्बर","डिसेंबर"],
namesAbbr: ["जाने","फेब्रु","मार्च","एप्रिल","मे","जून","जुलै","ऑग.","सप्टें.","ऑक्टो.","नोवे.","डिसें"]
},
AM: ["म.पू.","म.पू.","म.पू."],
PM: ["म.नं.","म.नं.","म.नं."],
patterns: {
d: "dd-MM-yyyy",
D: "dd MMMM yyyy",
F: "dd MMMM yyyy HH:mm:ss",
g: "dd-MM-yyyy HH:mm",
G: "dd-MM-yyyy HH:mm:ss",
m: "dd MMMM",
M: "dd MMMM",
s: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
t: "HH:mm",
T: "HH:mm:ss",
u: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
y: "MMMM, yyyy",
Y: "MMMM, yyyy"
},
"/": "-",
":": ":",
firstDay: 1
}
}
}
})(this);
return window.kendo;
}, typeof define == 'function' && define.amd ? define : function(_, f){ f(); });<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import<|fim▁hole|><|fim▁end|> | from .validates import * |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.