repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
saurabh6790/omni-apps | utilities/doctype/sms_control/sms_control.py | 9 | 3941 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import load_json, nowdate, cstr
from webnotes.model.code import get_obj
from webnotes.model.doc import Document
from webnotes import msgprint
from webnotes.model.bean import getlist, copy_doclist
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def validate_receiver_nos(self,receiver_list):
validated_receiver_list = []
for d in receiver_list:
# remove invalid character
invalid_char_list = [' ', '+', '-', '(', ')']
for x in invalid_char_list:
d = d.replace(x, '')
validated_receiver_list.append(d)
if not validated_receiver_list:
msgprint("Please enter valid mobile nos", raise_exception=1)
return validated_receiver_list
def get_sender_name(self):
"returns name as SMS sender"
sender_name = webnotes.conn.get_value('Global Defaults', None, 'sms_sender_name') or \
'ERPNXT'
if len(sender_name) > 6 and \
webnotes.conn.get_value("Control Panel", None, "country") == "India":
msgprint("""
As per TRAI rule, sender name must be exactly 6 characters.
Kindly change sender name in Setup --> Global Defaults.
Note: Hyphen, space, numeric digit, special characters are not allowed.
""", raise_exception=1)
return sender_name
def get_contact_number(self, arg):
"returns mobile number of the contact"
args = load_json(arg)
number = webnotes.conn.sql("""select mobile_no, phone from tabContact where name=%s and %s=%s""" %
('%s', args['key'], '%s'), (args['contact_name'], args['value']))
return number and (number[0][0] or number[0][1]) or ''
def send_form_sms(self, arg):
"called from client side"
args = load_json(arg)
self.send_sms([cstr(args['number'])], cstr(args['message']))
def send_sms(self, receiver_list, msg, sender_name = ''):
receiver_list = self.validate_receiver_nos(receiver_list)
arg = {
'receiver_list' : receiver_list,
'message' : msg,
'sender_name' : sender_name or self.get_sender_name()
}
if webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url'):
ret = self.send_via_gateway(arg)
msgprint(ret)
def send_via_gateway(self, arg):
ss = get_obj('SMS Settings', 'SMS Settings', with_children=1)
args = {ss.doc.message_parameter : arg.get('message')}
for d in getlist(ss.doclist, 'static_parameter_details'):
args[d.parameter] = d.value
resp = []
for d in arg.get('receiver_list'):
args[ss.doc.receiver_parameter] = d
resp.append(self.send_request(ss.doc.sms_gateway_url, args))
return resp
# Send Request
# =========================================================
def send_request(self, gateway_url, args):
import httplib, urllib
server, api_url = self.scrub_gateway_url(gateway_url)
conn = httplib.HTTPConnection(server) # open connection
headers = {}
headers['Accept'] = "text/plain, text/html, */*"
conn.request('GET', api_url + urllib.urlencode(args), headers = headers) # send request
resp = conn.getresponse() # get response
resp = resp.read()
return resp
# Split gateway url to server and api url
# =========================================================
def scrub_gateway_url(self, url):
url = url.replace('http://', '').strip().split('/')
server = url.pop(0)
api_url = '/' + '/'.join(url)
if not api_url.endswith('?'):
api_url += '?'
return server, api_url
# Create SMS Log
# =========================================================
def create_sms_log(self, arg, sent_sms):
sl = Document('SMS Log')
sl.sender_name = arg['sender_name']
sl.sent_on = nowdate()
sl.receiver_list = cstr(arg['receiver_list'])
sl.message = arg['message']
sl.no_of_requested_sms = len(arg['receiver_list'])
sl.no_of_sent_sms = sent_sms
sl.save(new=1)
| agpl-3.0 | -6,519,140,999,349,434,000 | 31.841667 | 101 | 0.640954 | false |
shawnadelic/shuup | shuup/xtheme/plugins/category_links.py | 2 | 2994 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import Category
from shuup.xtheme import TemplatedPlugin
from shuup.xtheme.plugins.forms import GenericPluginForm, TranslatableField
class CategoryLinksConfigForm(GenericPluginForm):
"""
A configuration form for the CategoryLinksPlugin
"""
def populate(self):
"""
A custom populate method to display category choices
"""
for field in self.plugin.fields:
if isinstance(field, tuple):
name, value = field
value.initial = self.plugin.config.get(name, value.initial)
self.fields[name] = value
self.fields["categories"] = forms.ModelMultipleChoiceField(
queryset=Category.objects.all_visible(customer=None),
required=False,
initial=self.plugin.config.get("categories", None),
)
def clean(self):
"""
A custom clean method to save category configuration information in a serializable form
"""
cleaned_data = super(CategoryLinksConfigForm, self).clean()
categories = cleaned_data.get("categories", [])
cleaned_data["categories"] = [category.pk for category in categories if hasattr(category, "pk")]
return cleaned_data
class CategoryLinksPlugin(TemplatedPlugin):
"""
A plugin for displaying links to visible categories on the shop front
"""
identifier = "category_links"
name = _("Category Links")
template_name = "shuup/xtheme/plugins/category_links.jinja"
editor_form_class = CategoryLinksConfigForm
fields = [
("title", TranslatableField(label=_("Title"), required=False, initial="")),
("show_all_categories", forms.BooleanField(
label=_("Show all categories"),
required=False,
initial=True,
help_text=_("All categories are shown, even if not selected"),
)),
"categories",
]
def get_context_data(self, context):
"""
A custom get_context_data method to return only visible categories
for request customer.
"""
selected_categories = self.config.get("categories", [])
show_all_categories = self.config.get("show_all_categories", True)
request = context.get("request")
categories = Category.objects.all_visible(
customer=getattr(request, "customer"),
shop=getattr(request, "shop")
)
if not show_all_categories:
categories = categories.filter(id__in=selected_categories)
return {
"title": self.get_translated_value("title"),
"categories": categories,
}
| agpl-3.0 | -7,679,898,462,294,426,000 | 35.962963 | 104 | 0.637609 | false |
dchad/malware-detection | vs/feature_extraction_java.py | 1 | 5427 | # feature_extraction_java.py
#
# Read a list of Java Bytecode files and extract
# feature sets from them.
#
# Input:
#
# Output:
#
# Author: Derek Chadwick
# Date : 05/09/2016
#
# TODO: all of the things
from multiprocessing import Pool
import os
from csv import writer
import numpy as np
import pandas as pd
import math
import scipy.misc
import array
import time as tm
import re
import subprocess as sub
java_opcodes = ['aaload','aastore','aconst_null','aload','aload_0','aload_1','aload_2','aload_3',
'anewarray','areturn','arraylength','astore','astore_0','astore_1','astore_2','astore_3',
'athrow','baload','bastore','bipush','breakpoint','caload','castore','checkcast',
'd2f','d2i','d2l','dadd','daload','dastore','dcmpg','dcmpl','dconst_0','dconst_1',
'ddiv','dload','dload_0','dload_1','dload_2','dload_3','dmul','dneg','drem','dreturn',
'dstore','dstore_0','dstore_1','dstore_2','dstore_3','dsub','dup','dup_x1','dup_x2',
'dup2','dup2_x1','dup2_x2','f2d','f2i','f2l','fadd','faload','fastore','fcmpg',
'fcmpl','fconst_0','fconst_1','fconst_2','fdiv','fload','fload_0','fload_1',
'fload_2','fload_3','fmul','fneg','frem','freturn','fstore','fstore_0',
'fstore_1','fstore_2','fstore_3','fsub','getfield','getstatic','goto','goto_w',
'i2b','i2c','i2d','i2f','i2l','i2s','iadd','iaload','iand','iastore','iconst_m1',
'iconst_0','iconst_1','iconst_2','iconst_3','iconst_4','iconst_5','idiv',
'if_acmpeq','if_acmpne','if_icmpeq','if_icmpge','if_icmpgt','if_icmple',
'if_icmplt','if_icmpne','ifeq','ifge','ifgt','ifle','iflt','ifne','ifnonnull',
'ifnull','iinc','iload','iload_0','iload_1','iload_2','iload_3','impdep1',
'impdep2','imul','ineg','instanceof','invokedynamic','invokeinterface',
'invokespecial','invokestatic','invokevirtual','ior','irem','ireturn','ishl',
'ishr','istore','istore_0','istore_1','istore_2','istore_3','isub','iushr',
'ixor','jsr','jsr_w','l2d','l2f','l2i','ladd','laload','land','lastore','lcmp',
'lconst_0','lconst_1','ldc','ldc_w','ldc2_w','ldiv','lload','lload_0','lload_1',
'lload_2','lload_3','lmul','lneg','lookupswitch','lor','lrem','lreturn','lshl',
'lshr','lstore','lstore_0','lstore_1','lstore_2','lstore_3','lsub','lushr',
'lxor','monitorenter','monitorexit','multianewarray','new','newarray',
'nop','pop','pop2','putfield','putstatic','ret','return','saload','sastore',
'sipush','swap','tableswitch','wide']
def count_asm_registers(asm_code):
registers_values = [0]*len(registers)
for row in asm_code:
parts = row.replace(',',' ').replace('+',' ').replace('*',' ').replace('[',' ').replace(']',' ') \
.replace('-',' ').split()
for register in registers:
registers_values[registers.index(register)] += parts.count(register)
return registers_values
def count_asm_opcodes(asm_code):
opcodes_values = [0]*len(opcodes)
for row in asm_code:
parts = row.split()
for opcode in opcodes:
if opcode in parts:
opcodes_values[opcodes.index(opcode)] += 1
break
return opcodes_values
def extract_asm_features(tfiles, feature_file, api_file):
pid = os.getpid()
print('Process id:', pid)
feature_file = 'data/' + str(pid) + feature_file # libc API, symbols, registers, opcodes, etc...
print('feature file:', feature_file)
fapi = open("data/elf-libc-api.txt")
defined_apis = fapi.readlines()
for idx, fname in defined_apis:
defined_apis[idx] = fname.rstrip() # Remove newlines, they are annoying.
asm_files = [i for i in tfiles if '.asm' in i]
ftot = len(asm_files)
feature_counts = []
with open(feature_file, 'w') as f:
# write the csv header
fw = writer(f)
colnames = ['file_name'] + registers + opcodes + defined_apis + keywords
fw.writerow(colnames)
for idx, fname in enumerate(asm_files):
fasm = open(ext_drive + fname, 'r')
content = fasm.readlines()
reg_vals = count_asm_registers(content)
opc_vals = count_asm_opcodes(content)
api_vals = count_asm_APIs(content, defined_apis)
sec_vals = count_asm_sections(content)
mis_vals = count_asm_misc(content)
count_vals = reg_vals + opc_vals + api_vals + mis_vals + sec_vals
feature_counts.append([fname[:fname.find('.asm')]] + count_vals)
# Writing rows after every 10 files processed
if (idx+1) % 10 == 0:
print("{:d} Processed {:d} of {:d} total files.".format(pid, idx + 1, ftot))
fw.writerows(feature_counts)
feature_counts = []
# Writing remaining files
if len(feature_counts) > 0:
fw.writerows(feature_counts)
feature_counts = []
return
# Start of Script
ext_drive = '/opt/vs/train1/'
tfiles = os.listdir(ext_drive)
print("Total Files: {:d}".format(len(tfiles)))
extract_asm_features(tfiles)
# End of Script | gpl-3.0 | 3,928,310,142,784,207,400 | 39.507463 | 106 | 0.559978 | false |
JohnnyKing94/pootle | pootle/apps/pootle_store/store/serialize.py | 5 | 2351 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.utils.functional import cached_property
from pootle.core.delegate import config, serializers
class StoreSerialization(object):
"""Calls configured deserializers for Store"""
def __init__(self, store):
self.store = store
@cached_property
def project_serializers(self):
project = self.store.translation_project.project
return (
config.get(
project.__class__,
instance=project,
key="pootle.core.serializers")
or [])
@property
def pootle_path(self):
return self.store.pootle_path
@cached_property
def max_unit_revision(self):
return self.store.data.max_unit_revision
@cached_property
def serializers(self):
available_serializers = serializers.gather(
self.store.translation_project.project.__class__)
if not available_serializers.keys():
return []
found_serializers = []
for serializer in self.project_serializers:
found_serializers.append(available_serializers[serializer])
return found_serializers
def tostring(self, include_obsolete=False, raw=False):
store = self.store.syncer.convert(
include_obsolete=include_obsolete, raw=raw)
if hasattr(store, "updateheader"):
# FIXME We need those headers on import
# However some formats just don't support setting metadata
max_unit_revision = self.max_unit_revision or 0
store.updateheader(add=True, X_Pootle_Path=self.pootle_path)
store.updateheader(add=True, X_Pootle_Revision=max_unit_revision)
return str(store)
def pipeline(self, data):
if not self.serializers:
return data
for serializer in self.serializers:
data = serializer(self.store, data).output
return data
def serialize(self, include_obsolete=False, raw=False):
return self.pipeline(
self.tostring(include_obsolete=include_obsolete, raw=raw))
| gpl-3.0 | -1,505,137,486,076,295,200 | 33.072464 | 77 | 0.647384 | false |
hgl888/chromium-crosswalk | tools/perf/page_sets/startup_pages_record.py | 32 | 1248 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class StartupPagesRecordPage(page_module.Page):
def __init__(self, url, page_set):
super(StartupPagesRecordPage, self).__init__(url=url, page_set=page_set)
self.archive_data_file = 'data/startup_pages.json'
class StartupPagesRecordPageSet(story.StorySet):
""" Pages to record data for testing starting Chrome with a URL.
We can't use startup_pages.json with record_wpr, since record_wpr
requires a default navigate step, which we don't want for startup
testing; but we do want to record the pages it uses. Also, record_wpr
fails on about:blank, which we want to include in startup testing.
"""
def __init__(self):
super(StartupPagesRecordPageSet, self).__init__(
archive_data_file='data/startup_pages.json')
urls_list = [
# Why: typical page
'http://bbc.co.uk',
# Why: Horribly complex page - stress test!
'http://kapook.com',
]
for url in urls_list:
self.AddStory(StartupPagesRecordPage(url, self))
| bsd-3-clause | -1,371,564,430,892,264,200 | 33.666667 | 76 | 0.69391 | false |
hitdong/pyvision | src/pyvision/other/optics.py | 4 | 2308 | '''
Created on Mar 22, 2013
@author: David S. Bolme
Oak Ridge National Laboratory
'''
def diffractionLimit(distance,wavelength,aperture):
'''
This function computes the Diffraction limit of an optical system. It
returns the smallest resolvable pattern at a given wavelength and
aperture.
@param distance: distance to the target in meters.
@param wavelength: the wavelength of the light in nanometers
@param aperture: the size of the aperture in meters.
@returns: the resolution limit in meters
'''
# Convert the wavelength of the
wavelength = 1.0e-9*wavelength
# Compute the resolution
resolution = distance * 1.220*(wavelength/aperture)
return resolution
def apertureComputation(distance,wavelength,resolution):
'''
This function computes the Diffraction limit of an optical system. It
returns the smallest resolvable pattern at a given wavelength and
aperture.
@param distance: distance to the target in meters.
@param wavelength: the wavelength of the light in nanometers
@param resolution: the resolution on target in metes.
@returns: the aperture size in meters.
'''
# Convert the wavelength of the
wavelength = 1.0e-9*wavelength
# Compute the resolution
aperture = (distance * 1.220* wavelength) / resolution
return aperture
def fNumber(focal_length,aperture):
N=focal_length/aperture
return N
def depthOfField(hyperfocal,distance):
'''
'''
H = hyperfocal
s = distance
Dn = (H*s)/(H+s)
Df = (H*s)/(H-s)
return Dn,Df,Df-Dn
def hyperFocalDistance(focal_length,fnumber,circle_of_confusion,definition=2):
'''
http://en.wikipedia.org/wiki/Hyperfocal_distance
Definition 1: The hyperfocal distance is the closest distance at which a
lens can be focused while keeping objects at infinity acceptably sharp.
When the lens is focused at this distance, all objects at distances from
half of the hyperfocal distance out to infinity will be acceptably sharp.
Definition 2: The hyperfocal distance is the distance beyond which all
objects are acceptably sharp, for a lens focused at infinity.
'''
return (focal_length**2)/(fnumber*circle_of_confusion)
| bsd-3-clause | -2,947,494,543,172,497,000 | 31.055556 | 78 | 0.696707 | false |
alessandrocamilli/l10n-italy | l10n_it_fiscalcode/__openerp__.py | 2 | 1880 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Associazione Odoo Italia
# (<http://www.odoo-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italian Localisation - Fiscal Code',
'version': '8.0.0.1.0',
'category': 'Localisation/Italy',
'description': """
This module customizes Odoo in order to fit italian laws and mores
Functionalities:
- Fiscal code computation for partner
External depends:
* Python codicefiscale https://pypi.python.org/pypi/codicefiscale
""",
'author': "Odoo Italian Community,Odoo Community Association (OCA)",
'website': 'http://www.odoo-italia.org',
'license': 'AGPL-3',
'depends': ['base_vat'],
'external_dependencies': {
'python': ['codicefiscale'],
},
'data': [
'view/fiscalcode_view.xml',
'wizard/compute_fc_view.xml',
'data/res.city.it.code.csv',
"security/ir.model.access.csv"
],
'qweb': [],
'demo': [],
'test': [
'test/fiscalcode.yml',
],
'active': False,
'installable': False
}
| agpl-3.0 | 1,540,303,066,270,929,400 | 31.982456 | 78 | 0.593085 | false |
ewongbb/stem | test/unit/doctest.py | 2 | 3714 | """
Tests examples from our documentation.
"""
from __future__ import absolute_import
import doctest
import os
import unittest
import stem.descriptor.router_status_entry
import stem.util.connection
import stem.util.str_tools
import stem.util.system
import stem.version
import test
from stem.response import ControlMessage
try:
# added in python 3.3
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
EXPECTED_CIRCUIT_STATUS = """\
20 EXTENDED $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA=Eskimo BUILD_FLAGS=NEED_CAPACITY PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:51:11.433755
19 BUILT $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$30BAB8EE7606CBD12F3CC269AE976E0153E7A58D=Pascal1,$2765D8A8C4BBA3F89585A9FFE0E8575615880BEB=Anthracite PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:50:56.969938\
"""
ADD_ONION_RESPONSE = """\
250-ServiceID=oekn5sqrvcu4wote
250-ClientAuth=bob:nKwfvVPmTNr2k2pG0pzV4g
250 OK
"""
class TestDocumentation(unittest.TestCase):
def test_examples(self):
stem_dir = os.path.join(test.STEM_BASE, 'stem')
is_failed = False
for path in stem.util.system.files_with_suffix(stem_dir, '.py'):
args = {'module_relative': False}
test_run = None
if path.endswith('/stem/util/conf.py'):
with patch('stem.util.conf.get_config') as get_config_mock:
config = Mock()
config.load.return_value = None
get_config_mock.return_value = config
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/descriptor/router_status_entry.py'):
args['globs'] = {
'_base64_to_hex': stem.descriptor.router_status_entry._base64_to_hex,
}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/util/connection.py'):
args['globs'] = {
'expand_ipv6_address': stem.util.connection.expand_ipv6_address,
}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/util/str_tools.py'):
args['globs'] = {
'_to_camel_case': stem.util.str_tools._to_camel_case,
'_split_by_length': stem.util.str_tools._split_by_length,
'crop': stem.util.str_tools.crop,
'size_label': stem.util.str_tools.size_label,
'time_label': stem.util.str_tools.time_label,
'time_labels': stem.util.str_tools.time_labels,
'short_time_label': stem.util.str_tools.short_time_label,
'parse_short_time_label': stem.util.str_tools.parse_short_time_label,
}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/response/__init__.py'):
pass # the escaped slashes seem to be confusing doctest
elif path.endswith('/stem/control.py'):
controller = Mock()
controller.extend_circuit.side_effect = [19, 20]
controller.get_info.side_effect = lambda arg: {
'circuit-status': EXPECTED_CIRCUIT_STATUS,
}[arg]
response = ControlMessage.from_str(ADD_ONION_RESPONSE, 'ADD_ONION', normalize = True)
controller.create_ephemeral_hidden_service.return_value = response
args['globs'] = {'controller': controller}
test_run = doctest.testfile(path, **args)
elif path.endswith('/stem/version.py'):
with patch('stem.version.get_system_tor_version', Mock(return_value = stem.version.Version('0.2.1.30'))):
test_run = doctest.testfile(path, **args)
else:
test_run = doctest.testfile(path, **args)
if test_run and test_run.failed > 0:
is_failed = True
if is_failed:
self.fail('doctests encountered errors')
| lgpl-3.0 | 881,542,893,550,349,000 | 35.058252 | 219 | 0.671782 | false |
balloob/home-assistant | tests/components/nest/test_config_flow_sdm.py | 4 | 2347 | """Test the Google Nest Device Access config flow."""
from homeassistant import config_entries, setup
from homeassistant.components.nest.const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.async_mock import patch
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
PROJECT_ID = "project-id-4321"
SUBSCRIBER_ID = "subscriber-id-9876"
async def test_full_flow(hass, aiohttp_client, aioclient_mock, current_request):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"project_id": PROJECT_ID,
"subscriber_id": SUBSCRIBER_ID,
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
oauth_authorize = OAUTH2_AUTHORIZE.format(project_id=PROJECT_ID)
assert result["url"] == (
f"{oauth_authorize}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}&scope=https://www.googleapis.com/auth/sdm.service"
"+https://www.googleapis.com/auth/pubsub"
"&access_type=offline&prompt=consent"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.nest.async_setup_entry", return_value=True
) as mock_setup:
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
| apache-2.0 | 2,635,588,060,780,906,000 | 34.560606 | 86 | 0.635705 | false |
OSSESAC/odoopubarquiluz | addons/procurement/schedulers.py | 25 | 13521 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import netsvc
from openerp import pooler
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp import tools
class procurement_order(osv.osv):
_inherit = 'procurement.order'
def run_scheduler(self, cr, uid, automatic=False, use_new_cursor=False, context=None):
''' Runs through scheduler.
@param use_new_cursor: False or the dbname
'''
if use_new_cursor:
use_new_cursor = cr.dbname
self._procure_confirm(cr, uid, use_new_cursor=use_new_cursor, context=context)
self._procure_orderpoint_confirm(cr, uid, automatic=automatic,\
use_new_cursor=use_new_cursor, context=context)
def _procure_confirm(self, cr, uid, ids=None, use_new_cursor=False, context=None):
'''
Call the scheduler to check the procurement order
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: False or the dbname
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
if context is None:
context = {}
try:
if use_new_cursor:
cr = pooler.get_db(use_new_cursor).cursor()
wf_service = netsvc.LocalService("workflow")
procurement_obj = self.pool.get('procurement.order')
if not ids:
ids = procurement_obj.search(cr, uid, [('state', '=', 'exception')], order="date_planned")
for id in ids:
wf_service.trg_validate(uid, 'procurement.order', id, 'button_restart', cr)
if use_new_cursor:
cr.commit()
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
maxdate = (datetime.today() + relativedelta(days=company.schedule_range)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
start_date = fields.datetime.now()
offset = 0
report = []
report_total = 0
report_except = 0
report_later = 0
while True:
ids = procurement_obj.search(cr, uid, [('state', '=', 'confirmed'), ('procure_method', '=', 'make_to_order')], offset=offset, limit=500, order='priority, date_planned', context=context)
for proc in procurement_obj.browse(cr, uid, ids, context=context):
if maxdate >= proc.date_planned:
wf_service.trg_validate(uid, 'procurement.order', proc.id, 'button_check', cr)
else:
offset += 1
report_later += 1
if proc.state == 'exception':
report.append(_('PROC %d: on order - %3.2f %-5s - %s') % \
(proc.id, proc.product_qty, proc.product_uom.name,
proc.product_id.name))
report_except += 1
report_total += 1
if use_new_cursor:
cr.commit()
if not ids:
break
offset = 0
ids = []
while True:
report_ids = []
ids = procurement_obj.search(cr, uid, [('state', '=', 'confirmed'), ('procure_method', '=', 'make_to_stock')], offset=offset)
for proc in procurement_obj.browse(cr, uid, ids):
if maxdate >= proc.date_planned:
wf_service.trg_validate(uid, 'procurement.order', proc.id, 'button_check', cr)
report_ids.append(proc.id)
else:
report_later += 1
report_total += 1
if proc.state == 'exception':
report.append(_('PROC %d: from stock - %3.2f %-5s - %s') % \
(proc.id, proc.product_qty, proc.product_uom.name,
proc.product_id.name,))
report_except += 1
if use_new_cursor:
cr.commit()
offset += len(ids)
if not ids: break
end_date = fields.datetime.now()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _prepare_automatic_op_procurement(self, cr, uid, product, warehouse, location_id, context=None):
return {'name': _('Automatic OP: %s') % (product.name,),
'origin': _('SCHEDULER'),
'date_planned': datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'product_id': product.id,
'product_qty': -product.virtual_available,
'product_uom': product.uom_id.id,
'location_id': location_id,
'company_id': warehouse.company_id.id,
'procure_method': 'make_to_order',}
def create_automatic_op(self, cr, uid, context=None):
"""
Create procurement of virtual stock < 0
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param context: A standard dictionary for contextual values
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product')
proc_obj = self.pool.get('procurement.order')
warehouse_obj = self.pool.get('stock.warehouse')
wf_service = netsvc.LocalService("workflow")
warehouse_ids = warehouse_obj.search(cr, uid, [], context=context)
products_ids = product_obj.search(cr, uid, [], order='id', context=context)
for warehouse in warehouse_obj.browse(cr, uid, warehouse_ids, context=context):
context['warehouse'] = warehouse
# Here we check products availability.
# We use the method 'read' for performance reasons, because using the method 'browse' may crash the server.
for product_read in product_obj.read(cr, uid, products_ids, ['virtual_available'], context=context):
if product_read['virtual_available'] >= 0.0:
continue
product = product_obj.browse(cr, uid, [product_read['id']], context=context)[0]
if product.supply_method == 'buy':
location_id = warehouse.lot_input_id.id
elif product.supply_method == 'produce':
location_id = warehouse.lot_stock_id.id
else:
continue
proc_id = proc_obj.create(cr, uid,
self._prepare_automatic_op_procurement(cr, uid, product, warehouse, location_id, context=context),
context=context)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_check', cr)
return True
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
date_planned = start_date + \
relativedelta(days=orderpoint.product_id.seller_delay or 0.0)
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'procure_method': 'make_to_order',
'origin': orderpoint.name}
def _product_virtual_get(self, cr, uid, order_point):
location_obj = self.pool.get('stock.location')
return location_obj._product_virtual_get(cr, uid,
order_point.location_id.id, [order_point.product_id.id],
{'uom': order_point.product_uom.id})[order_point.product_id.id]
def _procure_orderpoint_confirm(self, cr, uid, automatic=False,\
use_new_cursor=False, context=None, user_id=False):
'''
Create procurement based on Orderpoint
use_new_cursor: False or the dbname
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param user_id: The current user ID for security checks
@param context: A standard dictionary for contextual values
@param param: False or the dbname
@return: Dictionary of values
"""
'''
if context is None:
context = {}
if use_new_cursor:
cr = pooler.get_db(use_new_cursor).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
wf_service = netsvc.LocalService("workflow")
offset = 0
ids = [1]
if automatic:
self.create_automatic_op(cr, uid, context=context)
while ids:
ids = orderpoint_obj.search(cr, uid, [], offset=offset, limit=100)
for op in orderpoint_obj.browse(cr, uid, ids, context=context):
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if prods < op.product_min_qty:
qty = max(op.product_min_qty, op.product_max_qty)-prods
reste = qty % op.qty_multiple
if reste > 0:
qty += op.qty_multiple - reste
if qty <= 0:
continue
if op.product_id.type not in ('consu'):
if op.procurement_draft_ids:
# Check draft procurement related to this order point
pro_ids = [x.id for x in op.procurement_draft_ids]
procure_datas = procurement_obj.read(
cr, uid, pro_ids, ['id', 'product_qty'], context=context)
to_generate = qty
for proc_data in procure_datas:
if to_generate >= proc_data['product_qty']:
wf_service.trg_validate(uid, 'procurement.order', proc_data['id'], 'button_confirm', cr)
procurement_obj.write(cr, uid, [proc_data['id']], {'origin': op.name}, context=context)
to_generate -= proc_data['product_qty']
if not to_generate:
break
qty = to_generate
if qty:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty, context=context),
context=context)
wf_service.trg_validate(uid, 'procurement.order', proc_id,
'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order', proc_id,
'button_check', cr)
orderpoint_obj.write(cr, uid, [op.id],
{'procurement_id': proc_id}, context=context)
offset += len(ids)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
cr.close()
return {}
procurement_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,730,736,791,106,820,000 | 46.276224 | 201 | 0.533762 | false |
ojii/django-shop | shop/cart/modifiers/tax_modifiers.py | 2 | 1703 | # -*- coding: utf-8 -*-
from decimal import Decimal
from shop.cart.cart_modifiers_base import BaseCartModifier
class TenPercentGlobalTaxModifier(BaseCartModifier):
"""
A basic Tax calculator: it simply adds a taxes field to the *order*,
and makes it a fixed percentage of the subtotal (10%)
Obviously, this is only provided as an example, and anything serious should
use a more dynamic configuration system, such as settings or models to
hold the tax values...
"""
TAX_PERCENTAGE = Decimal('10')
def add_extra_cart_price_field(self, cart):
"""
Add a field on cart.extra_price_fields:
"""
taxes = (self.TAX_PERCENTAGE/100) * cart.subtotal_price
to_append = ('Taxes total', taxes)
cart.extra_price_fields.append(to_append)
return cart
class TenPercentPerItemTaxModifier(BaseCartModifier):
"""
This adds a 10% tax cart modifier, calculated on the item's base price, plus
any modifier applied to the cart item *so far* (order matters!).
Make sure the moment you apply taxes comply with your local regulations!
Some countries insist that taxes are calculated after/before discounts, and
so forth
"""
TAX_PERCENTAGE = Decimal("10")
def add_extra_cart_item_price_field(self, cart_item):
total_before_taxes = cart_item.line_subtotal
for label, value in cart_item.extra_price_fields:
total_before_taxes = total_before_taxes + value
tax_amount =(self.TAX_PERCENTAGE/100) * total_before_taxes
to_append = ('Taxes (10%)', tax_amount)
cart_item.extra_price_fields.append(to_append)
| bsd-3-clause | -288,910,436,574,436,740 | 36.844444 | 80 | 0.661186 | false |
hthompson6/a10-neutron-lbaas | a10_neutron_lbaas/tests/test_case.py | 2 | 1281 | # Copyright (C) 2015, A10 Networks Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
def assertIn(expected, actual):
if expected not in actual:
raise Exception("Expected to find {0} in {1}".format(expected, actual))
def assertIsNot(a, b):
if a is b:
raise Exception("Expected {0} to not be {1}".format(a, b))
class TestCase(unittest.TestCase):
"""unittest.TestCase with portable or custom assertions"""
def __init__(self, *args):
super(TestCase, self).__init__(*args)
self._patch("assertIn", assertIn)
self._patch("assertIsNot", assertIsNot)
def _patch(self, key, value):
if not hasattr(self, key):
setattr(self, key, value)
| apache-2.0 | -2,322,329,403,750,829,600 | 31.846154 | 79 | 0.673692 | false |
endlessm/chromium-browser | build/config/ios/compile_xcassets.py | 5 | 9042 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import re
import subprocess
import sys
import tempfile
"""Wrapper around actool to compile assets catalog.
The script compile_xcassets.py is a wrapper around actool to compile
assets catalog to Assets.car that turns warning into errors. It also
fixes some quirks of actool to make it work from ninja (mostly that
actool seems to require absolute path but gn generates command-line
with relative paths).
The wrapper filter out any message that is not a section header and
not a warning or error message, and fails if filtered output is not
empty. This should to treat all warnings as error until actool has
an option to fail with non-zero error code when there are warnings.
"""
# Pattern matching a section header in the output of actool.
SECTION_HEADER = re.compile('^/\\* ([^ ]*) \\*/$')
# Name of the section containing informational messages that can be ignored.
NOTICE_SECTION = 'com.apple.actool.compilation-results'
# Regular expressions matching spurious messages from actool that should be
# ignored (as they are bogus). Generally a bug should be filed with Apple
# when adding a pattern here.
SPURIOUS_PATTERNS = map(re.compile, [
# crbug.com/770634, likely a bug in Xcode 9.1 beta, remove once build
# requires a version of Xcode with a fix.
r'\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: \(null\)',
# crbug.com/770634, likely a bug in Xcode 9.2 beta, remove once build
# requires a version of Xcode with a fix.
r'\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: 76x76@1x app icons'
' only apply to iPad apps targeting releases of iOS prior to 10.0.',
])
# Map special type of asset catalog to the corresponding command-line
# parameter that need to be passed to actool.
ACTOOL_FLAG_FOR_ASSET_TYPE = {
'.appiconset': '--app-icon',
'.launchimage': '--launch-image',
}
def IsSpuriousMessage(line):
"""Returns whether line contains a spurious message that should be ignored."""
for pattern in SPURIOUS_PATTERNS:
match = pattern.search(line)
if match is not None:
return True
return False
def FilterCompilerOutput(compiler_output, relative_paths):
"""Filers actool compilation output.
The compiler output is composed of multiple sections for each different
level of output (error, warning, notices, ...). Each section starts with
the section name on a single line, followed by all the messages from the
section.
The function filter any lines that are not in com.apple.actool.errors or
com.apple.actool.document.warnings sections (as spurious messages comes
before any section of the output).
See crbug.com/730054, crbug.com/739163 and crbug.com/770634 for some example
messages that pollute the output of actool and cause flaky builds.
Args:
compiler_output: string containing the output generated by the
compiler (contains both stdout and stderr)
relative_paths: mapping from absolute to relative paths used to
convert paths in the warning and error messages (unknown paths
will be left unaltered)
Returns:
The filtered output of the compiler. If the compilation was a
success, then the output will be empty, otherwise it will use
relative path and omit any irrelevant output.
"""
filtered_output = []
current_section = None
data_in_section = False
for line in compiler_output.splitlines():
match = SECTION_HEADER.search(line)
if match is not None:
data_in_section = False
current_section = match.group(1)
continue
if current_section and current_section != NOTICE_SECTION:
if IsSpuriousMessage(line):
continue
absolute_path = line.split(':')[0]
relative_path = relative_paths.get(absolute_path, absolute_path)
if absolute_path != relative_path:
line = relative_path + line[len(absolute_path):]
if not data_in_section:
data_in_section = True
filtered_output.append('/* %s */\n' % current_section)
filtered_output.append(line + '\n')
return ''.join(filtered_output)
def CompileAssetCatalog(output, platform, product_type, min_deployment_target,
inputs, compress_pngs, partial_info_plist):
"""Compile the .xcassets bundles to an asset catalog using actool.
Args:
output: absolute path to the containing bundle
platform: the targeted platform
product_type: the bundle type
min_deployment_target: minimum deployment target
inputs: list of absolute paths to .xcassets bundles
compress_pngs: whether to enable compression of pngs
partial_info_plist: path to partial Info.plist to generate
"""
command = [
'xcrun', 'actool', '--output-format=human-readable-text',
'--notices', '--warnings', '--errors', '--platform', platform,
'--minimum-deployment-target', min_deployment_target,
]
if compress_pngs:
command.extend(['--compress-pngs'])
if product_type != '':
command.extend(['--product-type', product_type])
if platform == 'macosx':
command.extend(['--target-device', 'mac'])
else:
command.extend(['--target-device', 'iphone', '--target-device', 'ipad'])
# Scan the input directories for the presence of asset catalog types that
# require special treatment, and if so, add them to the actool command-line.
for relative_path in inputs:
if not os.path.isdir(relative_path):
continue
for file_or_dir_name in os.listdir(relative_path):
if not os.path.isdir(os.path.join(relative_path, file_or_dir_name)):
continue
asset_name, asset_type = os.path.splitext(file_or_dir_name)
if asset_type not in ACTOOL_FLAG_FOR_ASSET_TYPE:
continue
command.extend([ACTOOL_FLAG_FOR_ASSET_TYPE[asset_type], asset_name])
# Always ask actool to generate a partial Info.plist file. If not path
# has been given by the caller, use a temporary file name.
temporary_file = None
if not partial_info_plist:
temporary_file = tempfile.NamedTemporaryFile(suffix='.plist')
partial_info_plist = temporary_file.name
command.extend(['--output-partial-info-plist', partial_info_plist])
# Dictionary used to convert absolute paths back to their relative form
# in the output of actool.
relative_paths = {}
# actool crashes if paths are relative, so convert input and output paths
# to absolute paths, and record the relative paths to fix them back when
# filtering the output.
absolute_output = os.path.abspath(output)
relative_paths[output] = absolute_output
relative_paths[os.path.dirname(output)] = os.path.dirname(absolute_output)
command.extend(['--compile', os.path.dirname(os.path.abspath(output))])
for relative_path in inputs:
absolute_path = os.path.abspath(relative_path)
relative_paths[absolute_path] = relative_path
command.append(absolute_path)
try:
# Run actool and redirect stdout and stderr to the same pipe (as actool
# is confused about what should go to stderr/stdout).
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = process.communicate()
# Filter the output to remove all garbarge and to fix the paths.
stdout = FilterCompilerOutput(stdout, relative_paths)
if process.returncode or stdout:
sys.stderr.write(stdout)
sys.exit(1)
finally:
if temporary_file:
temporary_file.close()
def Main():
parser = argparse.ArgumentParser(
description='compile assets catalog for a bundle')
parser.add_argument(
'--platform', '-p', required=True,
choices=('macosx', 'iphoneos', 'iphonesimulator'),
help='target platform for the compiled assets catalog')
parser.add_argument(
'--minimum-deployment-target', '-t', required=True,
help='minimum deployment target for the compiled assets catalog')
parser.add_argument(
'--output', '-o', required=True,
help='path to the compiled assets catalog')
parser.add_argument(
'--compress-pngs', '-c', action='store_true', default=False,
help='recompress PNGs while compiling assets catalog')
parser.add_argument(
'--product-type', '-T',
help='type of the containing bundle')
parser.add_argument(
'--partial-info-plist', '-P',
help='path to partial info plist to create')
parser.add_argument(
'inputs', nargs='+',
help='path to input assets catalog sources')
args = parser.parse_args()
if os.path.basename(args.output) != 'Assets.car':
sys.stderr.write(
'output should be path to compiled asset catalog, not '
'to the containing bundle: %s\n' % (args.output,))
sys.exit(1)
CompileAssetCatalog(
args.output,
args.platform,
args.product_type,
args.minimum_deployment_target,
args.inputs,
args.compress_pngs,
args.partial_info_plist)
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | 2,769,308,951,750,360,000 | 35.023904 | 80 | 0.699513 | false |
jumpstarter-io/nova | nova/tests/virt/libvirt/test_utils.py | 2 | 12038 | # Copyright 2012 NTT Data. All Rights Reserved.
# Copyright 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import mock
from oslo.config import cfg
from nova.openstack.common import processutils
from nova import test
from nova import utils
from nova.virt import images
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_get_disk_type(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
disk_type = libvirt_utils.get_disk_type(path)
self.assertEqual(disk_type, 'raw')
@mock.patch('nova.utils.execute')
def test_copy_image_local_cp(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')
mock_execute.assert_called_once_with('cp', 'src', 'dest')
_rsync_call = functools.partial(mock.call,
'rsync', '--sparse', '--compress')
@mock.patch('nova.utils.execute')
def test_copy_image_rsync(self, mock_execute):
libvirt_utils.copy_image('src', 'dest', host='host')
mock_execute.assert_has_calls([
self._rsync_call('--dry-run', 'src', 'host:dest'),
self._rsync_call('src', 'host:dest'),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_copy_image_scp(self, mock_execute):
mock_execute.side_effect = [
processutils.ProcessExecutionError,
mock.DEFAULT,
]
libvirt_utils.copy_image('src', 'dest', host='host')
mock_execute.assert_has_calls([
self._rsync_call('--dry-run', 'src', 'host:dest'),
mock.call('scp', 'src', 'host:dest'),
])
self.assertEqual(2, mock_execute.call_count)
class ImageUtilsTestCase(test.NoDBTestCase):
def test_disk_type(self):
# Seems like lvm detection
# if its in /dev ??
for p in ['/dev/b', '/dev/blah/blah']:
d_type = libvirt_utils.get_disk_type(p)
self.assertEqual('lvm', d_type)
# Try rbd detection
d_type = libvirt_utils.get_disk_type('rbd:pool/instance')
self.assertEqual('rbd', d_type)
# Try the other types
template_output = """image: %(path)s
file format: %(format)s
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
"""
path = '/myhome/disk.config'
for f in ['raw', 'qcow2']:
output = template_output % ({
'format': f,
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_type = libvirt_utils.get_disk_type(path)
self.assertEqual(f, d_type)
self.mox.UnsetStubs()
def test_disk_backing(self):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: 2K (2048 bytes)
cluster_size: 65536
disk size: 96K
"""
output = template_output % ({
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_backing = libvirt_utils.get_disk_backing_file(path)
self.assertIsNone(d_backing)
def test_disk_size(self):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: %(v_size)s (%(vsize_b)s bytes)
cluster_size: 65536
disk size: 96K
"""
for i in range(0, 128):
bytes = i * 65336
kbytes = bytes / 1024
mbytes = kbytes / 1024
output = template_output % ({
'v_size': "%sM" % (mbytes),
'vsize_b': i,
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(i, d_size)
self.mox.UnsetStubs()
output = template_output % ({
'v_size': "%sK" % (kbytes),
'vsize_b': i,
'path': path,
})
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((output, ''))
self.mox.ReplayAll()
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(i, d_size)
self.mox.UnsetStubs()
def test_qemu_info_canon(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
def test_qemu_info_canon2(self):
path = "disk.config"
example_output = """image: disk.config
file format: QCOW2
virtual size: 67108844
cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('qcow2', image_info.file_format)
self.assertEqual(67108844, image_info.virtual_size)
self.assertEqual(963434, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
self.assertEqual('/var/lib/nova/a328c7998805951a_2',
image_info.backing_file)
def test_qemu_backing_file_actual(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(1, len(image_info.snapshots))
self.assertEqual('/b/3a988059e51a_2',
image_info.backing_file)
def test_qemu_info_convert(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
def test_qemu_info_snaps(self):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists(path).AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path).AndReturn((example_output, ''))
self.mox.ReplayAll()
image_info = images.qemu_img_info(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(3, len(image_info.snapshots))
def test_valid_hostname_normal(self):
self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
def test_valid_hostname_ipv4addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
def test_valid_hostname_ipv6addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
| apache-2.0 | -7,144,820,326,888,661,000 | 37.460064 | 79 | 0.603007 | false |
luiscberrocal/homeworkpal | homeworkpal_project/project_admin/forms.py | 1 | 2808 | from datetimewidget.widgets import DateTimeWidget, DateWidget
from django.forms import forms, inlineformset_factory, BaseFormSet, ModelForm, BaseInlineFormSet
from .models import Project, Risk, ProjectMember, Deliverable
import logging
logger = logging.getLogger(__name__)
__author__ = 'luiscberrocal'
class RequiredFirstInlineFormSet(BaseInlineFormSet):
"""
Used to make empty formset forms required
See http://stackoverflow.com/questions/2406537/django-formsets-\
make-first-required/4951032#4951032
"""
def __init__(self, *args, **kwargs):
super(RequiredFirstInlineFormSet, self).__init__(*args, **kwargs)
if len(self.forms) > 0:
first_form = self.forms[0]
first_form.empty_permitted = True
logger.debug('Setting first required for %s prefix %s empty permitted %s' % (
type(first_form).__name__, first_form.prefix, first_form.empty_permitted))
class ProjectForm(ModelForm):
class Meta:
model = Project
fields = ['short_name', 'description', 'planned_start_date',
'planned_end_date', 'actual_start_date', 'actual_end_date',
'planned_man_hours', 'type', 'group', 'priority']
widgets = {
# Use localization and bootstrap 3
'planned_start_date': DateWidget(attrs={'id': "planned-start-date"}, usel10n=True, bootstrap_version=3),
'planned_end_date': DateWidget(attrs={'id': "planned-end-date"}, usel10n=True, bootstrap_version=3),
'actual_start_date': DateWidget(attrs={'id': "actual-start-date"}, usel10n=True, bootstrap_version=3),
'actual_end_date': DateWidget(attrs={'id': "actual-end-date"}, usel10n=True, bootstrap_version=3)
}
class ProjectMemberLineForm(ModelForm):
class Meta:
model = ProjectMember
fields = ['role', 'employee', 'start_date', 'end_date']
class RiskLineForm(ModelForm):
class Meta:
model = Risk
fields = ['risk_type', 'priority', 'description']
class DeliverableLineForm(ModelForm):
class Meta:
model = Deliverable
fields = ['name', 'description']
RiskLineFormSet = inlineformset_factory(Project, Risk, form=RiskLineForm, formset=RequiredFirstInlineFormSet,
extra=1)
ProjectMemberLineFormSet = inlineformset_factory(Project, ProjectMember,
form=ProjectMemberLineForm,
formset=RequiredFirstInlineFormSet, extra=1)
DeliverableLineFormset = inlineformset_factory(Project, Deliverable,
form=DeliverableLineForm,
formset=RequiredFirstInlineFormSet, extra=1)
| mit | -4,671,874,913,405,049,000 | 40.294118 | 116 | 0.624288 | false |
forging2012/taiga-back | taiga/projects/notifications/services.py | 7 | 16875 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from functools import partial
from django.apps import apps
from django.db.transaction import atomic
from django.db import IntegrityError, transaction
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.conf import settings
from django.utils.translation import ugettext as _
from djmail import template_mail
from taiga.base import exceptions as exc
from taiga.projects.notifications.choices import NotifyLevel
from taiga.projects.history.choices import HistoryType
from taiga.projects.history.services import (make_key_from_model_object,
get_last_snapshot_for_key,
get_model_from_key)
from taiga.permissions.service import user_has_perm
from taiga.users.models import User
from .models import HistoryChangeNotification, Watched
def notify_policy_exists(project, user) -> bool:
"""
Check if policy exists for specified project
and user.
"""
model_cls = apps.get_model("notifications", "NotifyPolicy")
qs = model_cls.objects.filter(project=project,
user=user)
return qs.exists()
def create_notify_policy(project, user, level=NotifyLevel.notwatch):
"""
Given a project and user, create notification policy for it.
"""
model_cls = apps.get_model("notifications", "NotifyPolicy")
try:
return model_cls.objects.create(project=project,
user=user,
notify_level=level)
except IntegrityError as e:
raise exc.IntegrityError(_("Notify exists for specified user and project")) from e
def create_notify_policy_if_not_exists(project, user, level=NotifyLevel.notwatch):
"""
Given a project and user, create notification policy for it.
"""
model_cls = apps.get_model("notifications", "NotifyPolicy")
try:
result = model_cls.objects.get_or_create(project=project,
user=user,
defaults={"notify_level": level})
return result[0]
except IntegrityError as e:
raise exc.IntegrityError(_("Notify exists for specified user and project")) from e
def get_notify_policy(project, user):
"""
Get notification level for specified project and user.
"""
model_cls = apps.get_model("notifications", "NotifyPolicy")
instance, _ = model_cls.objects.get_or_create(project=project, user=user,
defaults={"notify_level": NotifyLevel.notwatch})
return instance
def analize_object_for_watchers(obj:object, comment:str, user:object):
"""
Generic implementation for analize model objects and
extract mentions from it and add it to watchers.
"""
if not hasattr(obj, "get_project"):
return
if not hasattr(obj, "add_watcher"):
return
from taiga import mdrender as mdr
texts = (getattr(obj, "description", ""),
getattr(obj, "content", ""),
comment,)
_, data = mdr.render_and_extract(obj.get_project(), "\n".join(texts))
if data["mentions"]:
for user in data["mentions"]:
obj.add_watcher(user)
# Adding the person who edited the object to the watchers
if comment and not user.is_system:
obj.add_watcher(user)
def _filter_by_permissions(obj, user):
UserStory = apps.get_model("userstories", "UserStory")
Issue = apps.get_model("issues", "Issue")
Task = apps.get_model("tasks", "Task")
WikiPage = apps.get_model("wiki", "WikiPage")
if isinstance(obj, UserStory):
return user_has_perm(user, "view_us", obj)
elif isinstance(obj, Issue):
return user_has_perm(user, "view_issues", obj)
elif isinstance(obj, Task):
return user_has_perm(user, "view_tasks", obj)
elif isinstance(obj, WikiPage):
return user_has_perm(user, "view_wiki_pages", obj)
return False
def _filter_notificable(user):
return user.is_active and not user.is_system
def get_users_to_notify(obj, *, discard_users=None) -> list:
"""
Get filtered set of users to notify for specified
model instance and changer.
NOTE: changer at this momment is not used.
NOTE: analogouts to obj.get_watchers_to_notify(changer)
"""
project = obj.get_project()
def _check_level(project:object, user:object, levels:tuple) -> bool:
policy = get_notify_policy(project, user)
return policy.notify_level in [int(x) for x in levels]
_can_notify_hard = partial(_check_level, project,
levels=[NotifyLevel.watch])
_can_notify_light = partial(_check_level, project,
levels=[NotifyLevel.watch, NotifyLevel.notwatch])
candidates = set()
candidates.update(filter(_can_notify_hard, project.members.all()))
candidates.update(filter(_can_notify_light, obj.get_watchers()))
candidates.update(filter(_can_notify_light, obj.project.get_watchers()))
candidates.update(filter(_can_notify_light, obj.get_participants()))
# Remove the changer from candidates
if discard_users:
candidates = candidates - set(discard_users)
candidates = set(filter(partial(_filter_by_permissions, obj), candidates))
# Filter disabled and system users
candidates = set(filter(partial(_filter_notificable), candidates))
return frozenset(candidates)
def _resolve_template_name(model:object, *, change_type:int) -> str:
"""
Ginven an changed model instance and change type,
return the preformated template name for it.
"""
ct = ContentType.objects.get_for_model(model)
# Resolve integer enum value from "change_type"
# parameter to human readable string
if change_type == HistoryType.create:
change_type = "create"
elif change_type == HistoryType.change:
change_type = "change"
else:
change_type = "delete"
tmpl = "{app_label}/{model}-{change}"
return tmpl.format(app_label=ct.app_label,
model=ct.model,
change=change_type)
def _make_template_mail(name:str):
"""
Helper that creates a adhoc djmail template email
instance for specified name, and return an instance
of it.
"""
cls = type("InlineCSSTemplateMail",
(template_mail.InlineCSSTemplateMail,),
{"name": name})
return cls()
@transaction.atomic
def send_notifications(obj, *, history):
if history.is_hidden:
return None
key = make_key_from_model_object(obj)
owner = User.objects.get(pk=history.user["pk"])
notification, created = (HistoryChangeNotification.objects.select_for_update()
.get_or_create(key=key,
owner=owner,
project=obj.project,
history_type = history.type))
notification.updated_datetime = timezone.now()
notification.save()
notification.history_entries.add(history)
# Get a complete list of notifiable users for current
# object and send the change notification to them.
notify_users = get_users_to_notify(obj, discard_users=[notification.owner])
for notify_user in notify_users:
notification.notify_users.add(notify_user)
# If we are the min interval is 0 it just work in a synchronous and spamming way
if settings.CHANGE_NOTIFICATIONS_MIN_INTERVAL == 0:
send_sync_notifications(notification.id)
@transaction.atomic
def send_sync_notifications(notification_id):
"""
Given changed instance, calculate the history entry and
a complete list for users to notify, send
email to all users.
"""
notification = HistoryChangeNotification.objects.select_for_update().get(pk=notification_id)
# If the last modification is too recent we ignore it
now = timezone.now()
time_diff = now - notification.updated_datetime
if time_diff.seconds < settings.CHANGE_NOTIFICATIONS_MIN_INTERVAL:
return
history_entries = tuple(notification.history_entries.all().order_by("created_at"))
obj, _ = get_last_snapshot_for_key(notification.key)
obj_class = get_model_from_key(obj.key)
context = {"obj_class": obj_class,
"snapshot": obj.snapshot,
"project": notification.project,
"changer": notification.owner,
"history_entries": history_entries}
model = get_model_from_key(notification.key)
template_name = _resolve_template_name(model, change_type=notification.history_type)
email = _make_template_mail(template_name)
domain = settings.SITES["api"]["domain"].split(":")[0] or settings.SITES["api"]["domain"]
if "ref" in obj.snapshot:
msg_id = obj.snapshot["ref"]
elif "slug" in obj.snapshot:
msg_id = obj.snapshot["slug"]
else:
msg_id = 'taiga-system'
now = datetime.datetime.now()
format_args = {"project_slug": notification.project.slug,
"project_name": notification.project.name,
"msg_id": msg_id,
"time": int(now.timestamp()),
"domain": domain}
headers = {"Message-ID": "<{project_slug}/{msg_id}/{time}@{domain}>".format(**format_args),
"In-Reply-To": "<{project_slug}/{msg_id}@{domain}>".format(**format_args),
"References": "<{project_slug}/{msg_id}@{domain}>".format(**format_args),
"List-ID": 'Taiga/{project_name} <taiga.{project_slug}@{domain}>'.format(**format_args),
"Thread-Index": make_ms_thread_index("<{project_slug}/{msg_id}@{domain}>".format(**format_args), now)}
for user in notification.notify_users.distinct():
context["user"] = user
context["lang"] = user.lang or settings.LANGUAGE_CODE
email.send(user.email, context, headers=headers)
notification.delete()
def process_sync_notifications():
for notification in HistoryChangeNotification.objects.all():
send_sync_notifications(notification.pk)
def _get_q_watchers(obj):
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
return Q(watched__content_type=obj_type, watched__object_id=obj.id)
def get_watchers(obj):
"""Get the watchers of an object.
:param obj: Any Django model instance.
:return: User queryset object representing the users that watch the object.
"""
return get_user_model().objects.filter(_get_q_watchers(obj))
def get_related_people(obj):
"""Get the related people of an object for notifications.
:param obj: Any Django model instance.
:return: User queryset object representing the users related to the object.
"""
related_people_q = Q()
## - Owner
if hasattr(obj, "owner_id") and obj.owner_id:
related_people_q.add(Q(id=obj.owner_id), Q.OR)
## - Assigned to
if hasattr(obj, "assigned_to_id") and obj.assigned_to_id:
related_people_q.add(Q(id=obj.assigned_to_id), Q.OR)
## - Watchers
related_people_q.add(_get_q_watchers(obj), Q.OR)
## - Apply filters
related_people = get_user_model().objects.filter(related_people_q)
## - Exclude inactive and system users and remove duplicate
related_people = related_people.exclude(is_active=False)
related_people = related_people.exclude(is_system=True)
related_people = related_people.distinct()
return related_people
def get_watched(user_or_id, model):
"""Get the objects watched by an user.
:param user_or_id: :class:`~taiga.users.models.User` instance or id.
:param model: Show only objects of this kind. Can be any Django model class.
:return: Queryset of objects representing the votes of the user.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
conditions = ('notifications_watched.content_type_id = %s',
'%s.id = notifications_watched.object_id' % model._meta.db_table,
'notifications_watched.user_id = %s')
if isinstance(user_or_id, get_user_model()):
user_id = user_or_id.id
else:
user_id = user_or_id
return model.objects.extra(where=conditions, tables=('notifications_watched',),
params=(obj_type.id, user_id))
def get_projects_watched(user_or_id):
"""Get the objects watched by an user.
:param user_or_id: :class:`~taiga.users.models.User` instance or id.
:param model: Show only objects of this kind. Can be any Django model class.
:return: Queryset of objects representing the votes of the user.
"""
if isinstance(user_or_id, get_user_model()):
user_id = user_or_id.id
else:
user_id = user_or_id
project_class = apps.get_model("projects", "Project")
return project_class.objects.filter(notify_policies__user__id=user_id).exclude(notify_policies__notify_level=NotifyLevel.ignore)
def add_watcher(obj, user):
"""Add a watcher to an object.
If the user is already watching the object nothing happents (except if there is a level update),
so this function can be considered idempotent.
:param obj: Any Django model instance.
:param user: User adding the watch. :class:`~taiga.users.models.User` instance.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
watched, created = Watched.objects.get_or_create(content_type=obj_type,
object_id=obj.id, user=user, project=obj.project)
notify_policy, _ = apps.get_model("notifications", "NotifyPolicy").objects.get_or_create(
project=obj.project, user=user, defaults={"notify_level": NotifyLevel.watch})
return watched
def remove_watcher(obj, user):
"""Remove an watching user from an object.
If the user has not watched the object nothing happens so this function can be considered
idempotent.
:param obj: Any Django model instance.
:param user: User removing the watch. :class:`~taiga.users.models.User` instance.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
qs = Watched.objects.filter(content_type=obj_type, object_id=obj.id, user=user)
if not qs.exists():
return
qs.delete()
def set_notify_policy_level(notify_policy, notify_level):
"""
Set notification level for specified policy.
"""
if not notify_level in [e.value for e in NotifyLevel]:
raise exc.IntegrityError(_("Invalid value for notify level"))
notify_policy.notify_level = notify_level
notify_policy.save()
def set_notify_policy_level_to_ignore(notify_policy):
"""
Set notification level for specified policy.
"""
set_notify_policy_level(notify_policy, NotifyLevel.ignore)
def make_ms_thread_index(msg_id, dt):
"""
Create the 22-byte base of the thread-index string in the format:
6 bytes = First 6 significant bytes of the FILETIME stamp
16 bytes = GUID (we're using a md5 hash of the message id)
See http://www.meridiandiscovery.com/how-to/e-mail-conversation-index-metadata-computer-forensics/
"""
import base64
import hashlib
import struct
# Convert to FILETIME epoch (microseconds since 1601)
delta = datetime.date(1970, 1, 1) - datetime.date(1601, 1, 1)
filetime = int(dt.timestamp() + delta.total_seconds()) * 10000000
# only want the first 6 bytes
thread_bin = struct.pack(">Q", filetime)[:6]
# Make a guid. This is usually generated by Outlook.
# The format is usually >IHHQ, but we don't care since it's just a hash of the id
md5 = hashlib.md5(msg_id.encode('utf-8'))
thread_bin += md5.digest()
# base64 encode
return base64.b64encode(thread_bin)
| agpl-3.0 | 8,612,434,298,936,628,000 | 35.053419 | 132 | 0.656552 | false |
rserban/chrono | src/demos/python/irrlicht/demo_IRR_collisionNSC.py | 3 | 9432 | #------------------------------------------------------------------------------
# Name: pychrono example
# Purpose:
#
# Author: Lijing Yang
#
# Created: 6/10/2020
# Copyright: (c) ProjectChrono 2019
#------------------------------------------------------------------------------
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
print ("Example: demonstration of collisions and contacts")
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('relative/path/to/data/directory/')
def AddFallingItems(sys):
# Shared contact materials for falling objects
sph_mat = chrono.ChMaterialSurfaceNSC()
sph_mat.SetFriction(0.2)
box_mat = chrono.ChMaterialSurfaceNSC()
cyl_mat = chrono.ChMaterialSurfaceNSC()
# Create falling rigid bodies (spheres and boxes etc.)
for bi in range(29):
msphereBody = chrono.ChBodyEasySphere(1.1, # radius size
1000, # density
True, # visualization?
True, # collision?
sph_mat) # contact material
msphereBody.SetPos(chrono.ChVectorD(-5 + chrono.ChRandom() * 10, 4 + bi * 0.05, -5 + chrono.ChRandom() * 10))
sys.Add(msphereBody)
mtexture = chrono.ChTexture()
mtexture.SetTextureFilename(chrono.GetChronoDataFile("textures/bluewhite.png"))
msphereBody.AddAsset(mtexture)
mboxBody = chrono.ChBodyEasyBox(1.5, 1.5, 1.5, # x,y,z size
100, # density
True, # visualization?
True, # collision?
box_mat) # contact material
mboxBody.SetPos(chrono.ChVectorD(-5 + chrono.ChRandom() * 10, 4 + bi * 0.05, -5 + chrono.ChRandom() * 10))
sys.Add(mboxBody)
mtexturebox = chrono.ChTexture()
mtexturebox.SetTextureFilename(chrono.GetChronoDataFile("textures/cubetexture_bluewhite.png"))
mboxBody.AddAsset(mtexturebox)
mcylBody = chrono.ChBodyEasyCylinder(0.75, 0.5, # radius, height
100, # density
True, # visualization?
True, # collision?
cyl_mat) # contact material
mcylBody.SetPos(chrono.ChVectorD(-5 + chrono.ChRandom() * 10, 4 + bi * 0.05, -5 + chrono.ChRandom() * 10))
sys.Add(mcylBody)
# optional, attach a texture for better visualization
mtexturecyl = chrono.ChTexture()
mtexturecyl.SetTextureFilename(chrono.GetChronoDataFile("textures/pinkwhite.png"))
mcylBody.AddAsset(mtexturecyl)
def AddContainer(sys):
# Contact material for container
ground_mat = chrono.ChMaterialSurfaceNSC()
# Create the five walls of the rectangular container, using fixed rigid bodies of 'box' type
floorBody = chrono.ChBodyEasyBox(20, 1, 20, 1000, True, True, ground_mat)
floorBody.SetPos(chrono.ChVectorD(0, -5, 0))
floorBody.SetBodyFixed(True)
sys.Add(floorBody)
wallBody1 = chrono.ChBodyEasyBox(1, 10, 20.99, 1000, True, True, ground_mat)
wallBody1.SetPos(chrono.ChVectorD(-10, 0, 0))
wallBody1.SetBodyFixed(True)
sys.Add(wallBody1)
wallBody2 = chrono.ChBodyEasyBox(1, 10, 20.99, 1000, True, True, ground_mat)
wallBody2.SetPos(chrono.ChVectorD(10, 0, 0))
wallBody2.SetBodyFixed(True)
sys.Add(wallBody2)
wallBody3 = chrono.ChBodyEasyBox(20.99, 10, 1, 1000, False, True, ground_mat)
wallBody3.SetPos(chrono.ChVectorD(0, 0, -10))
wallBody3.SetBodyFixed(True)
sys.Add(wallBody3)
wallBody4 = chrono.ChBodyEasyBox(20.99, 10, 1, 1000, True, True, ground_mat)
wallBody4.SetPos(chrono.ChVectorD(0, 0, 10))
wallBody4.SetBodyFixed(True)
sys.Add(wallBody4)
# optional, attach textures for better visualization
mtexturewall = chrono.ChTexture()
mtexturewall.SetTextureFilename(chrono.GetChronoDataFile("textures/concrete.jpg"))
wallBody1.AddAsset(mtexturewall) # note: most assets can be shared
wallBody2.AddAsset(mtexturewall)
wallBody3.AddAsset(mtexturewall)
wallBody4.AddAsset(mtexturewall)
floorBody.AddAsset(mtexturewall)
# Add the rotating mixer
mixer_mat = chrono.ChMaterialSurfaceNSC()
mixer_mat.SetFriction(0.4)
rotatingBody = chrono.ChBodyEasyBox(10, 5, 1, # x,y,z size
4000, # density
True, # visualization?
True, # collision?
mixer_mat) # contact material
rotatingBody.SetPos(chrono.ChVectorD(0, -1.6, 0))
sys.Add(rotatingBody)
# .. a motor between mixer and truss
my_motor = chrono.ChLinkMotorRotationSpeed()
my_motor.Initialize(rotatingBody,
floorBody,
chrono.ChFrameD(chrono.ChVectorD(0, 0, 0),
chrono.Q_from_AngAxis(chrono.CH_C_PI_2, chrono.VECT_X)))
mfun = chrono.ChFunction_Const(chrono.CH_C_PI / 4.0) # speed 45 deg/s
my_motor.SetSpeedFunction(mfun)
sys.AddLink(my_motor)
# NOTE: Instead of creating five separate 'box' bodies to make
# the walls of the container, you could have used a single body
# made of five box shapes, which build a single collision description,
# as in the alternative approach:
"""
# create a plain ChBody (no colliding shape nor visualization mesh is used yet)
mrigidBody = chrono.ChBody()
# set as fixed body, and turn collision ON, otherwise no collide by default
mrigidBody.SetBodyFixed(True)
mrigidBody.SetCollide(True)
# Clear model. The colliding shape description MUST be between ClearModel() .. BuildModel() pair.
mrigidBody.GetCollisionModel().ClearModel()
# Describe the (invisible) colliding shape by adding five boxes (the walls and floor)
mrigidBody.GetCollisionModel().AddBox(ground_mat, 20, 1, 20, chrono.ChVectorD(0, -10, 0))
mrigidBody.GetCollisionModel().AddBox(ground_mat, 1, 40, 20, chrono.ChVectorD(-11, 0, 0))
mrigidBody.GetCollisionModel().AddBox(ground_mat, 1, 40, 20, chrono.ChVectorD(11, 0, 0))
mrigidBody.GetCollisionModel().AddBox(ground_mat, 20, 40, 1, chrono.ChVectorD(0, 0, -11))
mrigidBody.GetCollisionModel().AddBox(ground_mat, 20, 40, 1, chrono.ChVectorD(0, 0, 11))
# Complete the description of collision shape.
mrigidBody.GetCollisionModel().BuildModel()
# Attach some visualization shapes if needed:
vshape = chrono.ChBoxShape()
vshape.GetBoxGeometry().SetLengths(chrono.ChVectorD(20, 1, 20))
vshape.GetBoxGeometry().Pos = chrono.ChVectorD(0, -5, 0)
this.AddAsset(vshape)
# etc. for other 4 box shapes..
"""
return rotatingBody
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
mysystem = chrono.ChSystemNSC()
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(mysystem, 'PyChrono example: Collisions between objects', chronoirr.dimension2du(1024,768))
myapplication.AddTypicalSky()
myapplication.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
myapplication.AddTypicalCamera(chronoirr.vector3df(0, 14 , -20))
myapplication.AddTypicalLights()
mixer = AddContainer(mysystem)
AddFallingItems(mysystem)
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem) on a per-item basis.
myapplication.AssetBindAll()
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll()
# Modify some setting of the physical syustem for the simulation, if you want
mysystem.SetSolverType(chrono.ChSolver.Type_PSOR)
mysystem.SetSolverMaxIterations(20)
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.SetTimestep(0.02)
myapplication.SetTryRealtime(True)
while(myapplication.GetDevice().run()):
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
# print out contact force and torque
# frc = mixer.GetAppliedForce()
# trq = mixer.GetAppliedTorque()
# print(mysystem.GetChTime())
# print("force: ", frc)
# print("torque: ", trq)
# c_frc = mixer.GetContactForce()
# c_trq = mixer.GetContactTorque()
# print(mysystem.GetChTime())
# print("contact force: ", c_frc)
# print("contact torque: ", c_trq)
| bsd-3-clause | 2,346,722,833,753,652,000 | 39.831169 | 126 | 0.62341 | false |
tyndare/osmose-backend | plugins/TagFix_MultipleTag_fr.py | 1 | 5571 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2011 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from plugins.Plugin import Plugin
class TagFix_MultipleTag_fr(Plugin):
only_for = ["fr"]
def init(self, logger):
Plugin.init(self, logger)
self.errors[3032] = { "item": 3032, "level": 1, "tag": ["tag", "fix:chair"], "desc": T_(u"Watch multiple tags") }
import re
self.Eglise = re.compile(u"(.glise|chapelle|basilique|cath.drale) de .*", re.IGNORECASE)
self.EgliseNot1 = re.compile(u"(.glise|chapelle|basilique|cath.drale) de la .*", re.IGNORECASE)
self.EgliseNot2 = re.compile(u"(.glise|chapelle|basilique|cath.drale) de l'.*", re.IGNORECASE)
self.MonumentAuxMorts = re.compile(u"monument aux morts.*", re.IGNORECASE)
self.SalleDesFetes = re.compile(u".*salle des f.tes.*", re.IGNORECASE)
self.MaisonDeQuartier = re.compile(u".*maison de quartier.*", re.IGNORECASE)
self.Marche = re.compile(u"marché( .+)?", re.IGNORECASE)
def node(self, data, tags):
err = []
if not "name" in tags:
return err
if "amenity" in tags:
if tags["amenity"] == "place_of_worship":
if self.Eglise.match(tags["name"]) and not self.EgliseNot1.match(tags["name"]) and not self.EgliseNot2.match(tags["name"]):
err.append({"class": 3032, "subclass": 1, "text": T_(u"\"name=%s\" is the localisation but not the name", tags["name"])})
else:
if "shop" not in tags and self.Marche.match(tags["name"]):
err.append({"class": 3032, "subclass": 5, "fix": {"amenity": "marketplace"}})
if "historic" in tags:
if tags["historic"] == "monument":
if self.MonumentAuxMorts.match(tags["name"]):
err.append({"class": 3032, "subclass": 2, "text": T_(u"A war memorial is not a historic=monument"),
"fix": {"historic": "memorial"} })
if (not "highway" in tags) and (self.SalleDesFetes.match(tags["name"]) or self.MaisonDeQuartier.match(tags["name"])) and not ("amenity" in tags and tags["amenity"] == "community_centre"):
err.append({"class": 3032, "subclass": 3, "text": T_(u"Put a tag for a village hall or a community center"),
"fix": {"+": {"amenity": "community_centre"}} })
return err
def way(self, data, tags, nds):
return self.node(data, tags)
def relation(self, data, tags, members):
return self.node(data, tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = TagFix_MultipleTag_fr(None)
class _config:
options = {"language": "fr"}
class father:
config = _config()
a.father = father()
a.init(None)
for t in [{"amenity": "place_of_worship", "name": u"Église de Paris"},
{"amenity": "place_of_worship", "name": u"Cathédrale de Notre-Dame"},
{"name": u"Marché des Capucines"},
{"historic": "monument", "name": u"Monument aux morts du quartier"},
{"name": u"Salle des fêtes"},
{"name": u"Maison de quartier"},
]:
self.check_err(a.node(None, t), t)
self.check_err(a.way(None, t, None), t)
self.check_err(a.relation(None, t, None), t)
for t in [{"amenity": "place_of_worship", "name": u"Église de l'endroit"},
{"shop": "yes", "name": u"Marché des Capucines"},
{"amenity":"place_of_worship"},
{"historic": "yes", "name": u"Monument aux morts du quartier"},
{"historic": "monument", "name": u"Monument typique du quartier"},
{"highway": "primary", "name": u"Salle des fêtes"},
{"highway": "residential", "name": u"Maison de quartier"},
{"amenity": "community_centre", "name": u"Salle des fêtes"},
{"amenity": "community_centre", "name": u"Maison de quartier"},
]:
assert not a.way(None, t, None), t
| gpl-3.0 | -2,981,317,910,978,626,000 | 50.962617 | 195 | 0.498921 | false |
Grirrane/odoo | addons/account/report/report_vat.py | 7 | 11102 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from functools import partial
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class tax_report(report_sxw.rml_parse, common_report_header):
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
res = {}
if not data:
company_id = self.pool['res.users'].browse(self.cr, self.uid, self.uid).company_id.id
data = {
'form': {
'based_on': 'invoices',
'company_id': company_id,
'display_detail': False,
}
}
self.period_ids = []
period_obj = self.pool.get('account.period')
self.display_detail = data['form']['display_detail']
res['periods'] = ''
res['fiscalyear'] = data['form'].get('fiscalyear_id', False)
if data['form'].get('period_from', False) and data['form'].get('period_to', False):
self.period_ids = period_obj.build_ctx_periods(self.cr, self.uid, data['form']['period_from'], data['form']['period_to'])
periods_l = period_obj.read(self.cr, self.uid, self.period_ids, ['name'])
for period in periods_l:
if res['periods'] == '':
res['periods'] = period['name']
else:
res['periods'] += ", "+ period['name']
return super(tax_report, self).set_context(objects, data, new_ids, report_type=report_type)
def __init__(self, cr, uid, name, context=None):
super(tax_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_codes': self._get_codes,
'get_general': self._get_general,
'get_currency': self._get_currency,
'get_lines': partial(self._get_lines, context=context),
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_basedon': self._get_basedon,
})
def _get_basedon(self, form):
return form['form']['based_on']
def _get_lines(self, based_on, company_id=False, parent=False, level=0, context=None):
period_list = self.period_ids
res = self._get_codes(based_on, company_id, parent, level, period_list, context=context)
if period_list:
res = self._add_codes(based_on, res, period_list, context=context)
else:
self.cr.execute ("select id from account_fiscalyear")
fy = self.cr.fetchall()
self.cr.execute ("select id from account_period where fiscalyear_id = %s",(fy[0][0],))
periods = self.cr.fetchall()
for p in periods:
period_list.append(p[0])
res = self._add_codes(based_on, res, period_list, context=context)
i = 0
top_result = []
while i < len(res):
res_dict = { 'code': res[i][1]['code'],
'name': res[i][1]['name'],
'debit': 0,
'credit': 0,
'tax_amount': res[i][1]['sum_period'],
'type': 1,
'level': res[i][0],
'pos': 0
}
top_result.append(res_dict)
res_general = self._get_general(res[i][1]['id'], period_list, company_id, based_on, context=context)
ind_general = 0
while ind_general < len(res_general):
res_general[ind_general]['type'] = 2
res_general[ind_general]['pos'] = 0
res_general[ind_general]['level'] = res_dict['level']
top_result.append(res_general[ind_general])
ind_general+=1
i+=1
return top_result
def _get_general(self, tax_code_id, period_list, company_id, based_on, context=None):
if not self.display_detail:
return []
res = []
obj_account = self.pool.get('account.account')
periods_ids = tuple(period_list)
if based_on == 'payments':
self.cr.execute('SELECT SUM(line.tax_amount) AS tax_amount, \
SUM(line.debit) AS debit, \
SUM(line.credit) AS credit, \
COUNT(*) AS count, \
account.id AS account_id, \
account.name AS name, \
account.code AS code \
FROM account_move_line AS line, \
account_account AS account, \
account_move AS move \
LEFT JOIN account_invoice invoice ON \
(invoice.move_id = move.id) \
WHERE line.state<>%s \
AND line.tax_code_id = %s \
AND line.account_id = account.id \
AND account.company_id = %s \
AND move.id = line.move_id \
AND line.period_id IN %s \
AND ((invoice.state = %s) \
OR (invoice.id IS NULL)) \
GROUP BY account.id,account.name,account.code', ('draft', tax_code_id,
company_id, periods_ids, 'paid',))
else:
self.cr.execute('SELECT SUM(line.tax_amount) AS tax_amount, \
SUM(line.debit) AS debit, \
SUM(line.credit) AS credit, \
COUNT(*) AS count, \
account.id AS account_id, \
account.name AS name, \
account.code AS code \
FROM account_move_line AS line, \
account_account AS account \
WHERE line.state <> %s \
AND line.tax_code_id = %s \
AND line.account_id = account.id \
AND account.company_id = %s \
AND line.period_id IN %s\
AND account.active \
GROUP BY account.id,account.name,account.code', ('draft', tax_code_id,
company_id, periods_ids,))
res = self.cr.dictfetchall()
i = 0
while i<len(res):
res[i]['account'] = obj_account.browse(self.cr, self.uid, res[i]['account_id'], context=context)
i+=1
return res
def _get_codes(self, based_on, company_id, parent=False, level=0, period_list=None, context=None):
obj_tc = self.pool.get('account.tax.code')
ids = obj_tc.search(self.cr, self.uid, [('parent_id','=',parent),('company_id','=',company_id)], context=context)
res = []
for code in obj_tc.browse(self.cr, self.uid, ids, {'based_on': based_on}):
res.append(('.'*2*level, code))
res += self._get_codes(based_on, company_id, code.id, level+1, context=context)
return res
def _add_codes(self, based_on, account_list=None, period_list=None, context=None):
if context is None:
context = {}
if account_list is None:
account_list = []
if period_list is None:
period_list = []
res = []
obj_tc = self.pool.get('account.tax.code')
for account in account_list:
ids = obj_tc.search(self.cr, self.uid, [('id','=', account[1].id)], context=context)
code = {}
for period_ind in period_list:
context2 = dict(context, period_id=period_ind, based_on=based_on)
record = obj_tc.browse(self.cr, self.uid, ids, context=context2)[0]
if not code:
code = {
'id': record.id,
'name': record.name,
'code': record.code,
'sequence': record.sequence,
'sum_period': 0,
}
code['sum_period'] += record.sum_period
res.append((account[0], code))
return res
def _get_currency(self, form, context=None):
return self.pool.get('res.company').browse(self.cr, self.uid, form['company_id'], context=context).currency_id.name
def sort_result(self, accounts, context=None):
# On boucle sur notre rapport
result_accounts = []
ind=0
old_level=0
while ind<len(accounts):
#
account_elem = accounts[ind]
#
#
# we will now check if the level is lower than the previous level, in this case we will make a subtotal
if (account_elem['level'] < old_level):
bcl_current_level = old_level
bcl_rup_ind = ind - 1
while (bcl_current_level >= int(accounts[bcl_rup_ind]['level']) and bcl_rup_ind >= 0 ):
res_tot = { 'code': accounts[bcl_rup_ind]['code'],
'name': '',
'debit': 0,
'credit': 0,
'tax_amount': accounts[bcl_rup_ind]['tax_amount'],
'type': accounts[bcl_rup_ind]['type'],
'level': 0,
'pos': 0
}
if res_tot['type'] == 1:
# on change le type pour afficher le total
res_tot['type'] = 2
result_accounts.append(res_tot)
bcl_current_level = accounts[bcl_rup_ind]['level']
bcl_rup_ind -= 1
old_level = account_elem['level']
result_accounts.append(account_elem)
ind+=1
return result_accounts
class report_vat(osv.AbstractModel):
_name = 'report.account.report_vat'
_inherit = 'report.abstract_report'
_template = 'account.report_vat'
_wrapped_report_class = tax_report
| agpl-3.0 | 1,573,555,595,836,627,000 | 41.7 | 133 | 0.50036 | false |
rachmadaniHaryono/RedditImageGrab | setup.py | 1 | 3634 | #!/usr/bin/env python
# coding: utf8
"""setup.py.
package `SQLAlchemy` have to be restricted to certain version because following bug:
https://github.com/flask-admin/flask-admin/issues/1583
"""
import os
from setuptools import setup
from setuptools.command.test import test
# taken example from
# https://coderwall.com/p/qawuyq/use-markdown-readme-s-in-python-modules
HERE = os.path.dirname(__file__)
README_PATH = os.path.join(HERE, 'readme.md')
SHORT_DESCRIPTION = 'Downloads images from sub-reddits of reddit.com'
if os.path.isfile(README_PATH):
with open(README_PATH) as _fo:
LONG_DESCRIPTION = _fo.read()
else:
LONG_DESCRIPTION = SHORT_DESCRIPTION
TESTS_REQUIRE = [
'mock',
'pytest',
'tox'
]
class Tox(test):
"""Extend setuptools/distribute test command.
It is used to trigger a test run when python setup.py test is issued.
Taken from:
https://tox.readthedocs.io/en/latest/example/basic.html
"""
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
"""initialize_options."""
test.initialize_options(self)
self.tox_args = None
def finalize_options(self):
"""finalize options."""
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""run tests."""
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
tox.cmdline(args=args)
setup_kwargs = dict(
name='redditdownload',
version='2.1.0',
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
# classifiers=[],
# keywords='...,...',
author='HoverHell',
author_email='[email protected]',
url='https://github.com/HoverHell/RedditImageGrab',
license='GNU General Public License v3 (GPLv3)',
keywords="reddit image downloader",
packages=['redditdownload'],
entry_points={
'console_scripts': [
'redditdl.py = redditdownload.redditdownload:main',
'redditdl-server = redditdownload.server:cli',
],
},
install_requires=[
# Most of the dependencies are kept as minimum as possible.
'beautifulsoup4>=4.5.1',
'gallery-dl>=1.1.2',
'lxml>=3.6.4',
'requests>=2.11.1',
'structlog>=17.2.0',
# required by server but used also on testing.
'Yapsy>=1.11.223',
],
tests_require=TESTS_REQUIRE,
cmdclass={'test': Tox},
extras_require={
'recommended': [
'requests',
'html5lib',
'Pillow', 'python-magic',
'pyaux', 'yaml', 'ipython', 'atomicfile',
],
'server': [
'Flask-Admin>=1.5.0',
'Flask-DebugToolbar>=0.10.1',
'flask-paginate==0.5.1',
'Flask-SQLAlchemy>=2.3.1',
'Flask-WTF>=0.14.2',
'Flask>=0.12.2',
'humanize>=0.5.1',
'SQLAlchemy-Utils>=0.32.18',
# limited package, see module description.
'SQLAlchemy==1.2.0b3',
],
},
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Terminals',
],
)
if __name__ == '__main__':
setup(**setup_kwargs)
| gpl-3.0 | 8,568,630,644,292,358,000 | 26.323308 | 84 | 0.583104 | false |
xfouloux/Flexget | flexget/plugins/input/myepisodes_list.py | 5 | 3236 | from __future__ import unicode_literals, division, absolute_import
import logging
import re
from itertools import chain
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.soup import get_soup
log = logging.getLogger('myepisodes')
URL = 'http://www.myepisodes.com/'
class MyEpisodesList(object):
"""Creates an entry for each item in your myepisodes.com show list.
Syntax:
myepisodes_list:
username: <value>
password: <value>
strip_dates: <yes|no>
include_ignored: <yes|no>
Options username and password are required.
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'strip_dates': {'type': 'boolean', 'default': False},
'include_ignored': {'type': 'boolean', 'default': False},
},
'required': ['username', 'password'],
'additionalProperties': False,
}
@cached('myepisodes_list')
@plugin.internet(log)
def on_task_input(self, task, config):
if not task.requests.cookies:
username = config['username']
password = config['password']
log.debug("Logging in to %s ..." % URL)
params={
'username': username,
'password': password,
'action': 'Login'
}
loginsrc = task.requests.post(URL + 'login.php', data=params).content
if str(username) not in loginsrc:
raise plugin.PluginWarning(('Login to myepisodes.com failed, please check '
'your account data or see if the site is down.'), log)
page = task.requests.get(URL + "shows.php?type=manage").content
try:
soup = get_soup(page)
except Exception as e:
raise plugin.PluginError("Unable to parse myepisodes.com page: %s" % (e,))
entries = []
def show_list(select_id):
return soup.find('select', {'id': select_id}).findAll('option')
options = show_list('shows')
if config['include_ignored']:
options = chain(options, show_list('ignored_shows'))
for option in options:
name = option.text
if config.get('strip_dates'):
# Remove year from end of name if present
name = re.sub(r'\s+\(\d{4}\)$', '', name)
showid = option.get('value')
url = '%sviews.php?type=epsbyshow&showid=%s' % (URL, showid)
entry = Entry()
entry['title'] = name
entry['url'] = url
entry['series_name'] = name
entry['myepisodes_id'] = showid
if entry.isvalid():
entries.append(entry)
else:
log.debug('Invalid entry created? %s' % entry)
if not entries:
log.warn("No shows found on myepisodes.com list. Maybe you need to add some first?")
return entries
@event('plugin.register')
def register_plugin():
plugin.register(MyEpisodesList, 'myepisodes_list', api_ver=2)
| mit | 8,033,414,661,985,200,000 | 31.039604 | 96 | 0.564277 | false |
hjoo5/gap_eul | font.py | 1 | 1210 | def choose_font():
global m, text # I hate to use global, but for simplicity
t = tkinter.Toplevel(root)
font_name = tkinter.Label(t, text='Font Name: ')
font_name.grid(row=0, column=0, sticky='nsew')
enter_font = tkinter.Entry(t)
enter_font.grid(row=0, column=1, sticky='nsew')
font_size = tkinter.Label(t, text='Font Size: ')
font_size.grid(row=1, column=0, sticky='nsew')
enter_size = tkinter.Entry(t)
enter_size.grid(row=1, column=1, sticky='nsew')
# associating a lambda with the call to text.config()
# to change the font of text (a Text widget reference)
ok_btn = tkinter.Button(t, text='Apply Changes',
command=lambda: text.config(font=(enter_font.get(),
enter_size.get())))
ok_btn.grid(row=2, column=1, sticky='nsew')
# just to make strechable widgets
# you don't strictly need this
for i in range(2):
t.grid_rowconfigure(i, weight=1)
t.grid_columnconfigure(i, weight=1)
t.grid_rowconfigure(2, weight=1)
text = tkinter.Text(root)
text.pack(expand=1, fill='both')
chooser = tkinter.Button(root, text='Choose Font', command=choose_font)
chooser.pack(side='bottom') | gpl-3.0 | 5,209,644,874,406,466,000 | 36.84375 | 75 | 0.641322 | false |
zhimin711/nova | nova/tests/unit/pci/test_utils.py | 3 | 9362 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import fixtures
import mock
from six.moves import builtins
from nova import exception
from nova.pci import utils
from nova import test
class PciDeviceMatchTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceMatchTestCase, self).setUp()
self.fake_pci_1 = {'vendor_id': 'v1',
'device_id': 'd1'}
def test_single_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
def test_multiple_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
self.parse_result = utils.parse_address("0000:04:12.6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:04.12:6")
def test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:h4.12:6")
class GetFunctionByIfnameTestCase(test.NoDBTestCase):
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_virtual_function(self, mock_readlink, *args):
mock_readlink.return_value = '../../../0000.00.00.1'
with mock.patch.object(
builtins, 'open', side_effect=IOError()):
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000.00.00.1')
self.assertFalse(physical_function)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_physical_function(self, mock_readlink, *args):
mock_readlink.return_value = '../../../0000:00:00.1'
with mock.patch.object(
builtins, 'open', mock.mock_open(read_data='4')):
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000:00:00.1')
self.assertTrue(physical_function)
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
address, physical_function = utils.get_function_by_ifname('lo')
self.assertIsNone(address)
self.assertFalse(physical_function)
class IsPhysicalFunctionTestCase(test.NoDBTestCase):
def setUp(self):
super(IsPhysicalFunctionTestCase, self).setUp()
self.pci_args = utils.get_pci_address_fields('0000:00:00.1')
@mock.patch('os.path.isdir', return_value=True)
def test_virtual_function(self, *args):
with mock.patch.object(
builtins, 'open', side_effect=IOError()):
self.assertFalse(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=True)
def test_physical_function(self, *args):
with mock.patch.object(
builtins, 'open', mock.mock_open(read_data='4')):
self.assertTrue(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
self.assertFalse(utils.is_physical_function(*self.pci_args))
class GetIfnameByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetIfnameByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=True)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_virtual_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=False)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_ifname_by_pci_address,
self.pci_address
)
class GetMacByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetMacByPciAddressTestCase, self).setUp()
self.pci_address = '0000:07:00.1'
self.if_name = 'enp7s0f1'
self.tmpdir = self.useFixture(fixtures.TempDir())
self.fake_file = os.path.join(self.tmpdir.path, "address")
with open(self.fake_file, "w") as f:
f.write("a0:36:9f:72:00:00\n")
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/net" % self.pci_address, self.if_name,
"address")
self.assertEqual("a0:36:9f:72:00:00", mac)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails(self, mock_join, mock_listdir):
os.unlink(self.fake_file)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails_empty(self, mock_join, mock_listdir):
with open(self.fake_file, "w") as f:
f.truncate(0)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_physical_function_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address, pf_interface=True)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/physfn/net" % self.pci_address,
self.if_name, "address")
self.assertEqual("a0:36:9f:72:00:00", mac)
class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetVfNumByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
self.paths = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = utils.get_vf_num_by_pci_address(self.pci_address)
self.assertEqual(vf_num, '3')
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_not_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.2'
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_exception(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
| apache-2.0 | 417,865,615,174,227,140 | 37.368852 | 79 | 0.6348 | false |
damien-dg/horizon | openstack_dashboard/dashboards/project/vpn/tables.py | 15 | 12491 | # Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
forbid_updates = set(["PENDING_CREATE", "PENDING_UPDATE", "PENDING_DELETE"])
class AddIKEPolicyLink(tables.LinkAction):
name = "addikepolicy"
verbose_name = _("Add IKE Policy")
url = "horizon:project:vpn:addikepolicy"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_ikepolicy"),)
class AddIPSecPolicyLink(tables.LinkAction):
name = "addipsecpolicy"
verbose_name = _("Add IPSec Policy")
url = "horizon:project:vpn:addipsecpolicy"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_ipsecpolicy"),)
class AddVPNServiceLink(tables.LinkAction):
name = "addvpnservice"
verbose_name = _("Add VPN Service")
url = "horizon:project:vpn:addvpnservice"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vpnservice"),)
class AddIPSecSiteConnectionLink(tables.LinkAction):
name = "addipsecsiteconnection"
verbose_name = _("Add IPSec Site Connection")
url = "horizon:project:vpn:addipsecsiteconnection"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_ipsec_site_connection"),)
class DeleteVPNServiceLink(tables.DeleteAction):
name = "deletevpnservice"
policy_rules = (("network", "delete_vpnservice"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete VPN Service",
u"Delete VPN Services",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of VPN Service",
u"Scheduled deletion of VPN Services",
count
)
def allowed(self, request, datum=None):
if datum and datum.ipsecsiteconns:
return False
return True
class DeleteIKEPolicyLink(tables.DeleteAction):
name = "deleteikepolicy"
policy_rules = (("network", "delete_ikepolicy"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete IKE Policy",
u"Delete IKE Policies",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of IKE Policy",
u"Scheduled deletion of IKE Policies",
count
)
def allowed(self, request, datum=None):
if datum and datum.ipsecsiteconns:
return False
return True
class DeleteIPSecPolicyLink(tables.DeleteAction):
name = "deleteipsecpolicy"
policy_rules = (("network", "delete_ipsecpolicy"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete IPSec Policy",
u"Delete IPSec Policies",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of IPSec Policy",
u"Scheduled deletion of IPSec Policies",
count
)
def allowed(self, request, datum=None):
if datum and datum.ipsecsiteconns:
return False
return True
class DeleteIPSecSiteConnectionLink(tables.DeleteAction):
name = "deleteipsecsiteconnection"
policy_rules = (("network", "delete_ipsec_site_connection"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete IPSec Site Connection",
u"Delete IPSec Site Connections",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of IPSec Site Connection",
u"Scheduled deletion of IPSec Site Connections",
count
)
class UpdateVPNServiceLink(tables.LinkAction):
name = "update_vpnservice"
verbose_name = _("Edit VPN Service")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vpnservice"),)
def get_link_url(self, vpnservice):
return reverse("horizon:project:vpn:update_vpnservice",
kwargs={'vpnservice_id': vpnservice.id})
def allowed(self, request, datum=None):
if datum and datum.status not in forbid_updates:
return True
return False
class UpdateIKEPolicyLink(tables.LinkAction):
name = "updateikepolicy"
verbose_name = _("Edit IKE Policy")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_ikepolicy"),)
def get_link_url(self, ikepolicy):
return reverse("horizon:project:vpn:update_ikepolicy",
kwargs={'ikepolicy_id': ikepolicy.id})
def allowed(self, request, datum=None):
return not datum['ipsecsiteconns']
class UpdateIPSecPolicyLink(tables.LinkAction):
name = "updateipsecpolicy"
verbose_name = _("Edit IPSec Policy")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_ipsecpolicy"),)
def get_link_url(self, ipsecpolicy):
return reverse("horizon:project:vpn:update_ipsecpolicy",
kwargs={'ipsecpolicy_id': ipsecpolicy.id})
def allowed(self, request, datum=None):
return not datum['ipsecsiteconns']
class UpdateIPSecSiteConnectionLink(tables.LinkAction):
name = "updateipsecsiteconnection"
verbose_name = _("Edit Connection")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_ipsec_site_connection"),)
def get_link_url(self, ipsecsiteconnection):
return reverse("horizon:project:vpn:update_ipsecsiteconnection",
kwargs={'ipsecsiteconnection_id':
ipsecsiteconnection.id})
def allowed(self, request, datum=None):
if datum and datum.status not in forbid_updates:
return True
return False
class IPSecSiteConnectionsTable(tables.DataTable):
STATUS_CHOICES = (
("Active", True),
("Down", True),
("Error", False),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of an IPSec Site Connection",
u"Active")),
("Down", pgettext_lazy("Current status of an IPSec Site Connection",
u"Down")),
("Error", pgettext_lazy("Current status of an IPSec Site Connection",
u"Error")),
)
id = tables.Column('id', hidden=True)
name = tables.Column('name_or_id', verbose_name=_('Name'),
link="horizon:project:vpn:ipsecsiteconnectiondetails")
description = tables.Column('description', verbose_name=_('Description'))
vpnservice_name = tables.Column('vpnservice_name',
verbose_name=_('VPN Service'))
ikepolicy_name = tables.Column('ikepolicy_name',
verbose_name=_('IKE Policy'))
ipsecpolicy_name = tables.Column('ipsecpolicy_name',
verbose_name=_('IPSec Policy'))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "ipsecsiteconnectionstable"
verbose_name = _("IPSec Site Connections")
table_actions = (AddIPSecSiteConnectionLink,
DeleteIPSecSiteConnectionLink,
tables.NameFilterAction)
row_actions = (UpdateIPSecSiteConnectionLink,
DeleteIPSecSiteConnectionLink)
class VPNServicesTable(tables.DataTable):
STATUS_CHOICES = (
("Active", True),
("Down", True),
("Error", False),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of a VPN Service",
u"Active")),
("Down", pgettext_lazy("Current status of a VPN Service",
u"Down")),
("Error", pgettext_lazy("Current status of a VPN Service",
u"Error")),
("Created", pgettext_lazy("Current status of a VPN Service",
u"Created")),
("Pending_Create", pgettext_lazy("Current status of a VPN Service",
u"Pending Create")),
("Pending_Update", pgettext_lazy("Current status of a VPN Service",
u"Pending Update")),
("Pending_Delete", pgettext_lazy("Current status of a VPN Service",
u"Pending Delete")),
("Inactive", pgettext_lazy("Current status of a VPN Service",
u"Inactive")),
)
id = tables.Column('id', hidden=True)
name = tables.Column("name_or_id", verbose_name=_('Name'),
link="horizon:project:vpn:vpnservicedetails")
description = tables.Column('description', verbose_name=_('Description'))
subnet_name = tables.Column('subnet_name', verbose_name=_('Subnet'))
router_name = tables.Column('router_name', verbose_name=_('Router'))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "vpnservicestable"
verbose_name = _("VPN Services")
table_actions = (AddVPNServiceLink,
DeleteVPNServiceLink,
tables.NameFilterAction)
row_actions = (UpdateVPNServiceLink, DeleteVPNServiceLink)
class IKEPoliciesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name_or_id", verbose_name=_('Name'),
link="horizon:project:vpn:ikepolicydetails")
description = tables.Column('description', verbose_name=_('Description'))
auth_algorithm = tables.Column('auth_algorithm',
verbose_name=_('Authorization algorithm'))
encryption_algorithm = tables.Column(
'encryption_algorithm',
verbose_name=_('Encryption algorithm'))
pfs = tables.Column("pfs", verbose_name=_('PFS'))
class Meta(object):
name = "ikepoliciestable"
verbose_name = _("IKE Policies")
table_actions = (AddIKEPolicyLink,
DeleteIKEPolicyLink,
tables.NameFilterAction)
row_actions = (UpdateIKEPolicyLink, DeleteIKEPolicyLink)
class IPSecPoliciesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name_or_id", verbose_name=_('Name'),
link="horizon:project:vpn:ipsecpolicydetails")
description = tables.Column('description', verbose_name=_('Description'))
auth_algorithm = tables.Column('auth_algorithm',
verbose_name=_('Authorization algorithm'))
encryption_algorithm = tables.Column(
'encryption_algorithm',
verbose_name=_('Encryption algorithm'))
pfs = tables.Column("pfs", verbose_name=_('PFS'))
class Meta(object):
name = "ipsecpoliciestable"
verbose_name = _("IPSec Policies")
table_actions = (AddIPSecPolicyLink,
DeleteIPSecPolicyLink,
tables.NameFilterAction)
row_actions = (UpdateIPSecPolicyLink, DeleteIPSecPolicyLink)
| apache-2.0 | -4,770,567,308,548,457,000 | 35.101156 | 79 | 0.598751 | false |
daniel-leschkowski/generateDSv2 | tests/ipo1_sup.py | 2 | 59442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py.
#
import sys
import getopt
import re as re_
import base64
from datetime import datetime, tzinfo, timedelta
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class PurchaseOrderType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('orderDate', 'date', 0),
MemberSpec_('shipTo', 'Address', 0),
MemberSpec_('billTo', 'Address', 0),
MemberSpec_('comment', 'string', 0),
MemberSpec_('items', 'Items', 0),
]
subclass = None
superclass = None
def __init__(self, orderDate=None, shipTo=None, billTo=None, comment=None, items=None):
self.orderDate = _cast(None, orderDate)
self.shipTo = shipTo
self.billTo = billTo
self.comment = comment
self.items = items
def factory(*args_, **kwargs_):
if PurchaseOrderType.subclass:
return PurchaseOrderType.subclass(*args_, **kwargs_)
else:
return PurchaseOrderType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_shipTo(self): return self.shipTo
def set_shipTo(self, shipTo): self.shipTo = shipTo
def get_billTo(self): return self.billTo
def set_billTo(self, billTo): self.billTo = billTo
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_items(self): return self.items
def set_items(self, items): self.items = items
def get_orderDate(self): return self.orderDate
def set_orderDate(self, orderDate): self.orderDate = orderDate
def export(self, outfile, level, namespace_='ipo:', name_='PurchaseOrderType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PurchaseOrderType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='PurchaseOrderType'):
if self.orderDate is not None and 'orderDate' not in already_processed:
already_processed.append('orderDate')
outfile.write(' orderDate="%s"' % self.gds_format_date(self.orderDate, input_name='orderDate'))
def exportChildren(self, outfile, level, namespace_='ipo:', name_='PurchaseOrderType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.shipTo is not None:
self.shipTo.export(outfile, level, namespace_, name_='shipTo', pretty_print=pretty_print)
if self.billTo is not None:
self.billTo.export(outfile, level, namespace_, name_='billTo', pretty_print=pretty_print)
if self.comment is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomment>%s</%scomment>%s' % (namespace_, self.gds_format_string(quote_xml(self.comment).encode(ExternalEncoding), input_name='comment'), namespace_, eol_))
if self.items is not None:
self.items.export(outfile, level, namespace_, name_='items', pretty_print=pretty_print)
def hasContent_(self):
if (
self.shipTo is not None or
self.billTo is not None or
self.comment is not None or
self.items is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PurchaseOrderType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.orderDate is not None and 'orderDate' not in already_processed:
already_processed.append('orderDate')
showIndent(outfile, level)
outfile.write('orderDate = "%s",\n' % (self.orderDate,))
def exportLiteralChildren(self, outfile, level, name_):
if self.shipTo is not None:
showIndent(outfile, level)
outfile.write('shipTo=model_.Address(\n')
self.shipTo.exportLiteral(outfile, level, name_='shipTo')
showIndent(outfile, level)
outfile.write('),\n')
if self.billTo is not None:
showIndent(outfile, level)
outfile.write('billTo=model_.Address(\n')
self.billTo.exportLiteral(outfile, level, name_='billTo')
showIndent(outfile, level)
outfile.write('),\n')
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=%s,\n' % quote_python(self.comment).encode(ExternalEncoding))
if self.items is not None:
showIndent(outfile, level)
outfile.write('items=model_.Items(\n')
self.items.exportLiteral(outfile, level, name_='items')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('orderDate', node)
if value is not None and 'orderDate' not in already_processed:
already_processed.append('orderDate')
try:
self.orderDate = self.gds_parse_date(value, node, 'orderDate')
except ValueError, exp:
raise ValueError('Bad date attribute (orderDate): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'shipTo':
class_obj_ = self.get_class_obj_(child_, Address)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_shipTo(obj_)
elif nodeName_ == 'billTo':
class_obj_ = self.get_class_obj_(child_, Address)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_billTo(obj_)
elif nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment = comment_
elif nodeName_ == 'items':
obj_ = Items.factory()
obj_.build(child_)
self.set_items(obj_)
# end class PurchaseOrderType
class Items(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('item', 'itemType', 1),
]
subclass = None
superclass = None
def __init__(self, item=None):
if item is None:
self.item = []
else:
self.item = item
def factory(*args_, **kwargs_):
if Items.subclass:
return Items.subclass(*args_, **kwargs_)
else:
return Items(*args_, **kwargs_)
factory = staticmethod(factory)
def get_item(self): return self.item
def set_item(self, item): self.item = item
def add_item(self, value): self.item.append(value)
def insert_item(self, index, value): self.item[index] = value
def export(self, outfile, level, namespace_='ipo:', name_='Items', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Items')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='Items'):
pass
def exportChildren(self, outfile, level, namespace_='ipo:', name_='Items', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for item_ in self.item:
item_.export(outfile, level, namespace_, name_='item', pretty_print=pretty_print)
def hasContent_(self):
if (
self.item
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Items'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('item=[\n')
level += 1
for item_ in self.item:
showIndent(outfile, level)
outfile.write('model_.itemType(\n')
item_.exportLiteral(outfile, level, name_='itemType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'item':
obj_ = itemType.factory()
obj_.build(child_)
self.item.append(obj_)
# end class Items
class Address(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('name', 'string', 0),
MemberSpec_('street', 'string', 0),
MemberSpec_('city', 'string', 0),
]
subclass = None
superclass = None
def __init__(self, name=None, street=None, city=None, extensiontype_=None):
self.name = name
self.street = street
self.city = city
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if Address.subclass:
return Address.subclass(*args_, **kwargs_)
else:
return Address(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_street(self): return self.street
def set_street(self, street): self.street = street
def get_city(self): return self.city
def set_city(self, city): self.city = city
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ipo:', name_='Address', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Address')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='Address'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ipo:', name_='Address', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.street is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstreet>%s</%sstreet>%s' % (namespace_, self.gds_format_string(quote_xml(self.street).encode(ExternalEncoding), input_name='street'), namespace_, eol_))
if self.city is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scity>%s</%scity>%s' % (namespace_, self.gds_format_string(quote_xml(self.city).encode(ExternalEncoding), input_name='city'), namespace_, eol_))
def hasContent_(self):
if (
self.name is not None or
self.street is not None or
self.city is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Address'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.street is not None:
showIndent(outfile, level)
outfile.write('street=%s,\n' % quote_python(self.street).encode(ExternalEncoding))
if self.city is not None:
showIndent(outfile, level)
outfile.write('city=%s,\n' % quote_python(self.city).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'street':
street_ = child_.text
street_ = self.gds_validate_string(street_, node, 'street')
self.street = street_
elif nodeName_ == 'city':
city_ = child_.text
city_ = self.gds_validate_string(city_, node, 'city')
self.city = city_
# end class Address
class USAddress(Address):
member_data_items_ = [
MemberSpec_('state', ['USState', 'string'], 0),
MemberSpec_('zip', 'positiveInteger', 0),
]
subclass = None
superclass = Address
def __init__(self, name=None, street=None, city=None, state=None, zip=None):
super(USAddress, self).__init__(name, street, city, )
self.state = state
self.zip = zip
def factory(*args_, **kwargs_):
if USAddress.subclass:
return USAddress.subclass(*args_, **kwargs_)
else:
return USAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_state(self): return self.state
def set_state(self, state): self.state = state
def validate_USState(self, value):
# Validate type USState, a restriction on string.
pass
def get_zip(self): return self.zip
def set_zip(self, zip): self.zip = zip
def export(self, outfile, level, namespace_='ipo:', name_='USAddress', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='USAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='USAddress'):
super(USAddress, self).exportAttributes(outfile, level, already_processed, namespace_, name_='USAddress')
def exportChildren(self, outfile, level, namespace_='ipo:', name_='USAddress', fromsubclass_=False, pretty_print=True):
super(USAddress, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.state is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstate>%s</%sstate>%s' % (namespace_, self.gds_format_string(quote_xml(self.state).encode(ExternalEncoding), input_name='state'), namespace_, eol_))
if self.zip is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%szip>%s</%szip>%s' % (namespace_, self.gds_format_integer(self.zip, input_name='zip'), namespace_, eol_))
def hasContent_(self):
if (
self.state is not None or
self.zip is not None or
super(USAddress, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='USAddress'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(USAddress, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(USAddress, self).exportLiteralChildren(outfile, level, name_)
if self.state is not None:
showIndent(outfile, level)
outfile.write('state=%s,\n' % quote_python(self.state).encode(ExternalEncoding))
if self.zip is not None:
showIndent(outfile, level)
outfile.write('zip=%d,\n' % self.zip)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(USAddress, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'state':
state_ = child_.text
state_ = self.gds_validate_string(state_, node, 'state')
self.state = state_
self.validate_USState(self.state) # validate type USState
elif nodeName_ == 'zip':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'zip')
self.zip = ival_
super(USAddress, self).buildChildren(child_, node, nodeName_, True)
# end class USAddress
class UKAddress(Address):
member_data_items_ = [
MemberSpec_('category_attr', 'xs:string', 0),
MemberSpec_('exportCode', 'positiveInteger', 0),
MemberSpec_('postcode', ['UKPostcode', 'string'], 0),
MemberSpec_('category', 'string', 0),
]
subclass = None
superclass = Address
def __init__(self, name=None, street=None, city=None, category_attr=None, exportCode=None, postcode=None, category=None):
super(UKAddress, self).__init__(name, street, city, )
self.category_attr = _cast(None, category_attr)
self.exportCode = _cast(int, exportCode)
self.postcode = postcode
self.category = category
def factory(*args_, **kwargs_):
if UKAddress.subclass:
return UKAddress.subclass(*args_, **kwargs_)
else:
return UKAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_postcode(self): return self.postcode
def set_postcode(self, postcode): self.postcode = postcode
def validate_UKPostcode(self, value):
# Validate type UKPostcode, a restriction on string.
pass
def get_category(self): return self.category
def set_category(self, category): self.category = category
def get_category_attr(self): return self.category_attr
def set_category_attr(self, category_attr): self.category_attr = category_attr
def get_exportCode(self): return self.exportCode
def set_exportCode(self, exportCode): self.exportCode = exportCode
def export(self, outfile, level, namespace_='ipo:', name_='UKAddress', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='UKAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='UKAddress'):
super(UKAddress, self).exportAttributes(outfile, level, already_processed, namespace_, name_='UKAddress')
if self.category_attr is not None and 'category_attr' not in already_processed:
already_processed.append('category_attr')
outfile.write(' category=%s' % (quote_attrib(self.category_attr), ))
if self.exportCode is not None and 'exportCode' not in already_processed:
already_processed.append('exportCode')
outfile.write(' exportCode="%s"' % self.gds_format_integer(self.exportCode, input_name='exportCode'))
def exportChildren(self, outfile, level, namespace_='ipo:', name_='UKAddress', fromsubclass_=False, pretty_print=True):
super(UKAddress, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.postcode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spostcode>%s</%spostcode>%s' % (namespace_, self.gds_format_string(quote_xml(self.postcode).encode(ExternalEncoding), input_name='postcode'), namespace_, eol_))
if self.category is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scategory>%s</%scategory>%s' % (namespace_, self.gds_format_string(quote_xml(self.category).encode(ExternalEncoding), input_name='category'), namespace_, eol_))
def hasContent_(self):
if (
self.postcode is not None or
self.category is not None or
super(UKAddress, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='UKAddress'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.category_attr is not None and 'category_attr' not in already_processed:
already_processed.append('category_attr')
showIndent(outfile, level)
outfile.write('category_attr = %s,\n' % (self.category_attr,))
if self.exportCode is not None and 'exportCode' not in already_processed:
already_processed.append('exportCode')
showIndent(outfile, level)
outfile.write('exportCode = %d,\n' % (self.exportCode,))
super(UKAddress, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(UKAddress, self).exportLiteralChildren(outfile, level, name_)
if self.postcode is not None:
showIndent(outfile, level)
outfile.write('postcode=%s,\n' % quote_python(self.postcode).encode(ExternalEncoding))
if self.category is not None:
showIndent(outfile, level)
outfile.write('category=%s,\n' % quote_python(self.category).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('category', node)
if value is not None and 'category_attr' not in already_processed:
already_processed.append('category_attr')
self.category_attr = value
value = find_attr_value_('exportCode', node)
if value is not None and 'exportCode' not in already_processed:
already_processed.append('exportCode')
try:
self.exportCode = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.exportCode <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
super(UKAddress, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'postcode':
postcode_ = child_.text
postcode_ = self.gds_validate_string(postcode_, node, 'postcode')
self.postcode = postcode_
self.validate_UKPostcode(self.postcode) # validate type UKPostcode
elif nodeName_ == 'category':
category_ = child_.text
category_ = self.gds_validate_string(category_, node, 'category')
self.category = category_
super(UKAddress, self).buildChildren(child_, node, nodeName_, True)
# end class UKAddress
class itemType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('partNum', 'ipo:SKU', 0),
MemberSpec_('productName', 'string', 0),
MemberSpec_('quantity', ['quantity', 'positiveInteger'], 0),
MemberSpec_('USPrice', 'decimal', 0),
MemberSpec_('comment', 'string', 0),
MemberSpec_('shipDate', 'date', 0),
]
subclass = None
superclass = None
def __init__(self, partNum=None, productName=None, quantity=None, USPrice=None, comment=None, shipDate=None):
self.partNum = _cast(None, partNum)
self.productName = productName
self.quantity = quantity
self.USPrice = USPrice
self.comment = comment
self.shipDate = shipDate
def factory(*args_, **kwargs_):
if itemType.subclass:
return itemType.subclass(*args_, **kwargs_)
else:
return itemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_productName(self): return self.productName
def set_productName(self, productName): self.productName = productName
def get_quantity(self): return self.quantity
def set_quantity(self, quantity): self.quantity = quantity
def get_USPrice(self): return self.USPrice
def set_USPrice(self, USPrice): self.USPrice = USPrice
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_shipDate(self): return self.shipDate
def set_shipDate(self, shipDate): self.shipDate = shipDate
def get_partNum(self): return self.partNum
def set_partNum(self, partNum): self.partNum = partNum
def export(self, outfile, level, namespace_='ipo:', name_='itemType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='itemType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='itemType'):
if self.partNum is not None and 'partNum' not in already_processed:
already_processed.append('partNum')
outfile.write(' partNum=%s' % (quote_attrib(self.partNum), ))
def exportChildren(self, outfile, level, namespace_='ipo:', name_='itemType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.productName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sproductName>%s</%sproductName>%s' % (namespace_, self.gds_format_string(quote_xml(self.productName).encode(ExternalEncoding), input_name='productName'), namespace_, eol_))
if self.quantity is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%squantity>%s</%squantity>%s' % (namespace_, self.gds_format_integer(self.quantity, input_name='quantity'), namespace_, eol_))
if self.USPrice is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sUSPrice>%s</%sUSPrice>%s' % (namespace_, self.gds_format_float(self.USPrice, input_name='USPrice'), namespace_, eol_))
if self.comment is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomment>%s</%scomment>%s' % (namespace_, self.gds_format_string(quote_xml(self.comment).encode(ExternalEncoding), input_name='comment'), namespace_, eol_))
if self.shipDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sshipDate>%s</%sshipDate>%s' % (namespace_, self.gds_format_date(self.shipDate, input_name='shipDate'), namespace_, eol_))
def hasContent_(self):
if (
self.productName is not None or
self.quantity is not None or
self.USPrice is not None or
self.comment is not None or
self.shipDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='itemType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.partNum is not None and 'partNum' not in already_processed:
already_processed.append('partNum')
showIndent(outfile, level)
outfile.write('partNum = %s,\n' % (self.partNum,))
def exportLiteralChildren(self, outfile, level, name_):
if self.productName is not None:
showIndent(outfile, level)
outfile.write('productName=%s,\n' % quote_python(self.productName).encode(ExternalEncoding))
if self.quantity is not None:
showIndent(outfile, level)
outfile.write('quantity=%d,\n' % self.quantity)
if self.USPrice is not None:
showIndent(outfile, level)
outfile.write('USPrice=%f,\n' % self.USPrice)
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=%s,\n' % quote_python(self.comment).encode(ExternalEncoding))
if self.shipDate is not None:
showIndent(outfile, level)
outfile.write('shipDate=datetime_.strptime("%s", "%%Y-%%m-%%d"),\n' % self.gds_format_date(self.shipDate, input_name='shipDate'))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('partNum', node)
if value is not None and 'partNum' not in already_processed:
already_processed.append('partNum')
self.partNum = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'productName':
productName_ = child_.text
productName_ = self.gds_validate_string(productName_, node, 'productName')
self.productName = productName_
elif nodeName_ == 'quantity':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'quantity')
self.quantity = ival_
elif nodeName_ == 'USPrice':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'USPrice')
self.USPrice = fval_
elif nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment = comment_
elif nodeName_ == 'shipDate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_, node, 'shipDate')
self.shipDate = dval_
# end class itemType
class quantity(GeneratedsSuper):
member_data_items_ = [
]
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if quantity.subclass:
return quantity.subclass(*args_, **kwargs_)
else:
return quantity(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='ipo:', name_='quantity', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='quantity')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ipo:', name_='quantity'):
pass
def exportChildren(self, outfile, level, namespace_='ipo:', name_='quantity', fromsubclass_=False, pretty_print=True):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='quantity'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class quantity
GDSClassesMapping = {
'items': Items,
'purchaseOrder': PurchaseOrderType,
'shipTo': Address,
'billTo': Address,
'item': itemType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'purchaseOrder'
rootClass = PurchaseOrderType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'purchaseOrder'
rootClass = PurchaseOrderType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="purchaseOrder",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'purchaseOrder'
rootClass = PurchaseOrderType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from ipo2_sup import *\n\n')
sys.stdout.write('from datetime import datetime as datetime_\n\n')
sys.stdout.write('import ipo2_sup as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"Address",
"Items",
"PurchaseOrderType",
"UKAddress",
"USAddress",
"itemType",
"quantity"
]
| mit | -2,623,688,434,600,015,000 | 41.887446 | 201 | 0.581912 | false |
Tesora-Release/tesora-trove-dashboard | trove_dashboard/test/helpers.py | 2 | 1738 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import client as trove_client
from openstack_dashboard.test import helpers
from trove_dashboard import api
from trove_dashboard.test.test_data import utils
def create_stubs(stubs_to_create={}):
return helpers.create_stubs(stubs_to_create)
class TroveTestsMixin(object):
def _setup_test_data(self):
super(TroveTestsMixin, self)._setup_test_data()
utils.load_test_data(self)
class TestCase(TroveTestsMixin, helpers.TestCase):
pass
class BaseAdminViewTests(TroveTestsMixin, helpers.TestCase):
pass
class TroveAPITestCase(helpers.APITestCase):
def setUp(self):
super(TroveAPITestCase, self).setUp()
self._original_troveclient = api.trove.client
api.trove.client = lambda request: self.stub_troveclient()
def tearDown(self):
super(TroveAPITestCase, self).tearDown()
api.trove.client = self._original_troveclient
def stub_troveclient(self):
if not hasattr(self, "troveclient"):
self.mox.StubOutWithMock(trove_client, 'Client')
self.troveclient = self.mox.CreateMock(trove_client.Client)
return self.troveclient
| apache-2.0 | -4,966,032,793,658,775,000 | 29.491228 | 78 | 0.714614 | false |
vjmac15/Lyilis | lib/pymongo/read_preferences.py | 2 | 16515 | # Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License",
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for choosing which member of a replica set to read from."""
from collections import Mapping
from bson.py3compat import integer_types
from pymongo import max_staleness_selectors
from pymongo.errors import ConfigurationError
from pymongo.server_selectors import (member_with_tags_server_selector,
secondary_with_tags_server_selector)
_PRIMARY = 0
_PRIMARY_PREFERRED = 1
_SECONDARY = 2
_SECONDARY_PREFERRED = 3
_NEAREST = 4
_MONGOS_MODES = (
'primary',
'primaryPreferred',
'secondary',
'secondaryPreferred',
'nearest',
)
def _validate_tag_sets(tag_sets):
"""Validate tag sets for a MongoReplicaSetClient.
"""
if tag_sets is None:
return tag_sets
if not isinstance(tag_sets, list):
raise TypeError((
"Tag sets %r invalid, must be a list") % (tag_sets,))
if len(tag_sets) == 0:
raise ValueError((
"Tag sets %r invalid, must be None or contain at least one set of"
" tags") % (tag_sets,))
for tags in tag_sets:
if not isinstance(tags, Mapping):
raise TypeError(
"Tag set %r invalid, must be an instance of dict, "
"bson.son.SON or other type that inherits from "
"collection.Mapping" % (tags,))
return tag_sets
def _invalid_max_staleness_msg(max_staleness):
return ("maxStalenessSeconds must be a positive integer, not %s" %
max_staleness)
# Some duplication with common.py to avoid import cycle.
def _validate_max_staleness(max_staleness):
"""Validate max_staleness."""
if max_staleness == -1:
return -1
if not isinstance(max_staleness, integer_types):
raise TypeError(_invalid_max_staleness_msg(max_staleness))
if max_staleness <= 0:
raise ValueError(_invalid_max_staleness_msg(max_staleness))
return max_staleness
class _ServerMode(object):
"""Base class for all read preferences.
"""
__slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness")
def __init__(self, mode, tag_sets=None, max_staleness=-1):
self.__mongos_mode = _MONGOS_MODES[mode]
self.__mode = mode
self.__tag_sets = _validate_tag_sets(tag_sets)
self.__max_staleness = _validate_max_staleness(max_staleness)
@property
def name(self):
"""The name of this read preference.
"""
return self.__class__.__name__
@property
def mongos_mode(self):
"""The mongos mode of this read preference.
"""
return self.__mongos_mode
@property
def document(self):
"""Read preference as a document.
"""
doc = {'mode': self.__mongos_mode}
if self.__tag_sets not in (None, [{}]):
doc['tags'] = self.__tag_sets
if self.__max_staleness != -1:
doc['maxStalenessSeconds'] = self.__max_staleness
return doc
@property
def mode(self):
"""The mode of this read preference instance.
"""
return self.__mode
@property
def tag_sets(self):
"""Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to
read only from members whose ``dc`` tag has the value ``"ny"``.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." MongoReplicaSetClient tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
"""
return list(self.__tag_sets) if self.__tag_sets else [{}]
@property
def max_staleness(self):
"""The maximum estimated length of time (in seconds) a replica set
secondary can fall behind the primary in replication before it will
no longer be selected for operations, or -1 for no maximum."""
return self.__max_staleness
@property
def min_wire_version(self):
"""The wire protocol version the server must support.
Some read preferences impose version requirements on all servers (e.g.
maxStalenessSeconds requires MongoDB 3.4 / maxWireVersion 5).
All servers' maxWireVersion must be at least this read preference's
`min_wire_version`, or the driver raises
:exc:`~pymongo.errors.ConfigurationError`.
"""
return 0 if self.__max_staleness == -1 else 5
def __repr__(self):
return "%s(tag_sets=%r, max_staleness=%r)" % (
self.name, self.__tag_sets, self.__max_staleness)
def __eq__(self, other):
if isinstance(other, _ServerMode):
return (self.mode == other.mode and
self.tag_sets == other.tag_sets and
self.max_staleness == other.max_staleness)
return NotImplemented
def __ne__(self, other):
return not self == other
def __getstate__(self):
"""Return value of object for pickling.
Needed explicitly because __slots__() defined.
"""
return {'mode': self.__mode,
'tag_sets': self.__tag_sets,
'max_staleness': self.__max_staleness}
def __setstate__(self, value):
"""Restore from pickling."""
self.__mode = value['mode']
self.__mongos_mode = _MONGOS_MODES[self.__mode]
self.__tag_sets = _validate_tag_sets(value['tag_sets'])
self.__max_staleness = _validate_max_staleness(value['max_staleness'])
class Primary(_ServerMode):
"""Primary read preference.
* When directly connected to one mongod queries are allowed if the server
is standalone or a replica set primary.
* When connected to a mongos queries are sent to the primary of a shard.
* When connected to a replica set queries are sent to the primary of
the replica set.
"""
__slots__ = ()
def __init__(self):
super(Primary, self).__init__(_PRIMARY)
def __call__(self, selection):
"""Apply this read preference to a Selection."""
return selection.primary_selection
def __repr__(self):
return "Primary()"
def __eq__(self, other):
if isinstance(other, _ServerMode):
return other.mode == _PRIMARY
return NotImplemented
class PrimaryPreferred(_ServerMode):
"""PrimaryPreferred read preference.
* When directly connected to one mongod queries are allowed to standalone
servers, to a replica set primary, or to replica set secondaries.
* When connected to a mongos queries are sent to the primary of a shard if
available, otherwise a shard secondary.
* When connected to a replica set queries are sent to the primary if
available, otherwise a secondary.
:Parameters:
- `tag_sets`: The :attr:`~tag_sets` to use if the primary is not
available.
- `max_staleness`: (integer, in seconds) The maximum estimated
length of time a replica set secondary can fall behind the primary in
replication before it will no longer be selected for operations.
Default -1, meaning no maximum. If it is set, it must be at least
90 seconds.
"""
__slots__ = ()
def __init__(self, tag_sets=None, max_staleness=-1):
super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED,
tag_sets,
max_staleness)
def __call__(self, selection):
"""Apply this read preference to Selection."""
if selection.primary:
return selection.primary_selection
else:
return secondary_with_tags_server_selector(
self.tag_sets,
max_staleness_selectors.select(
self.max_staleness, selection))
class Secondary(_ServerMode):
"""Secondary read preference.
* When directly connected to one mongod queries are allowed to standalone
servers, to a replica set primary, or to replica set secondaries.
* When connected to a mongos queries are distributed among shard
secondaries. An error is raised if no secondaries are available.
* When connected to a replica set queries are distributed among
secondaries. An error is raised if no secondaries are available.
:Parameters:
- `tag_sets`: The :attr:`~tag_sets` for this read preference.
- `max_staleness`: (integer, in seconds) The maximum estimated
length of time a replica set secondary can fall behind the primary in
replication before it will no longer be selected for operations.
Default -1, meaning no maximum. If it is set, it must be at least
90 seconds.
"""
__slots__ = ()
def __init__(self, tag_sets=None, max_staleness=-1):
super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness)
def __call__(self, selection):
"""Apply this read preference to Selection."""
return secondary_with_tags_server_selector(
self.tag_sets,
max_staleness_selectors.select(
self.max_staleness, selection))
class SecondaryPreferred(_ServerMode):
"""SecondaryPreferred read preference.
* When directly connected to one mongod queries are allowed to standalone
servers, to a replica set primary, or to replica set secondaries.
* When connected to a mongos queries are distributed among shard
secondaries, or the shard primary if no secondary is available.
* When connected to a replica set queries are distributed among
secondaries, or the primary if no secondary is available.
:Parameters:
- `tag_sets`: The :attr:`~tag_sets` for this read preference.
- `max_staleness`: (integer, in seconds) The maximum estimated
length of time a replica set secondary can fall behind the primary in
replication before it will no longer be selected for operations.
Default -1, meaning no maximum. If it is set, it must be at least
90 seconds.
"""
__slots__ = ()
def __init__(self, tag_sets=None, max_staleness=-1):
super(SecondaryPreferred, self).__init__(_SECONDARY_PREFERRED,
tag_sets,
max_staleness)
def __call__(self, selection):
"""Apply this read preference to Selection."""
secondaries = secondary_with_tags_server_selector(
self.tag_sets,
max_staleness_selectors.select(
self.max_staleness, selection))
if secondaries:
return secondaries
else:
return selection.primary_selection
class Nearest(_ServerMode):
"""Nearest read preference.
* When directly connected to one mongod queries are allowed to standalone
servers, to a replica set primary, or to replica set secondaries.
* When connected to a mongos queries are distributed among all members of
a shard.
* When connected to a replica set queries are distributed among all
members.
:Parameters:
- `tag_sets`: The :attr:`~tag_sets` for this read preference.
- `max_staleness`: (integer, in seconds) The maximum estimated
length of time a replica set secondary can fall behind the primary in
replication before it will no longer be selected for operations.
Default -1, meaning no maximum. If it is set, it must be at least
90 seconds.
"""
__slots__ = ()
def __init__(self, tag_sets=None, max_staleness=-1):
super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness)
def __call__(self, selection):
"""Apply this read preference to Selection."""
return member_with_tags_server_selector(
self.tag_sets,
max_staleness_selectors.select(
self.max_staleness, selection))
_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred,
Secondary, SecondaryPreferred, Nearest)
def make_read_preference(mode, tag_sets, max_staleness=-1):
if mode == _PRIMARY:
if tag_sets not in (None, [{}]):
raise ConfigurationError("Read preference primary "
"cannot be combined with tags")
if max_staleness != -1:
raise ConfigurationError("Read preference primary cannot be "
"combined with maxStalenessSeconds")
return Primary()
return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness)
_MODES = (
'PRIMARY',
'PRIMARY_PREFERRED',
'SECONDARY',
'SECONDARY_PREFERRED',
'NEAREST',
)
class ReadPreference(object):
"""An enum that defines the read preference modes supported by PyMongo.
See :doc:`/examples/high_availability` for code examples.
A read preference is used in three cases:
:class:`~pymongo.mongo_client.MongoClient` connected to a single mongod:
- ``PRIMARY``: Queries are allowed if the server is standalone or a replica
set primary.
- All other modes allow queries to standalone servers, to a replica set
primary, or to replica set secondaries.
:class:`~pymongo.mongo_client.MongoClient` initialized with the
``replicaSet`` option:
- ``PRIMARY``: Read from the primary. This is the default, and provides the
strongest consistency. If no primary is available, raise
:class:`~pymongo.errors.AutoReconnect`.
- ``PRIMARY_PREFERRED``: Read from the primary if available, or if there is
none, read from a secondary.
- ``SECONDARY``: Read from a secondary. If no secondary is available,
raise :class:`~pymongo.errors.AutoReconnect`.
- ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise
from the primary.
- ``NEAREST``: Read from any member.
:class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a
sharded cluster of replica sets:
- ``PRIMARY``: Read from the primary of the shard, or raise
:class:`~pymongo.errors.OperationFailure` if there is none.
This is the default.
- ``PRIMARY_PREFERRED``: Read from the primary of the shard, or if there is
none, read from a secondary of the shard.
- ``SECONDARY``: Read from a secondary of the shard, or raise
:class:`~pymongo.errors.OperationFailure` if there is none.
- ``SECONDARY_PREFERRED``: Read from a secondary of the shard if available,
otherwise from the shard primary.
- ``NEAREST``: Read from any shard member.
"""
PRIMARY = Primary()
PRIMARY_PREFERRED = PrimaryPreferred()
SECONDARY = Secondary()
SECONDARY_PREFERRED = SecondaryPreferred()
NEAREST = Nearest()
def read_pref_mode_from_name(name):
"""Get the read preference mode from mongos/uri name.
"""
return _MONGOS_MODES.index(name)
class MovingAverage(object):
"""Tracks an exponentially-weighted moving average."""
def __init__(self):
self.average = None
def add_sample(self, sample):
if sample < 0:
# Likely system time change while waiting for ismaster response
# and not using time.monotonic. Ignore it, the next one will
# probably be valid.
return
if self.average is None:
self.average = sample
else:
# The Server Selection Spec requires an exponentially weighted
# average with alpha = 0.2.
self.average = 0.8 * self.average + 0.2 * sample
def get(self):
"""Get the calculated average, or None if no samples yet."""
return self.average
def reset(self):
self.average = None
| gpl-3.0 | -1,705,393,581,778,410,000 | 33.915433 | 79 | 0.628641 | false |
g-fleischer/wtfy | trackingserver/thirdparty/tornado/tornado/__init__.py | 8 | 1098 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Tornado web server and tools."""
from __future__ import absolute_import, division, with_statement
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate (after the base version number
# has been incremented)
version = "2.3"
version_info = (2, 3, 0, 0)
| gpl-3.0 | 4,952,082,865,183,179,000 | 36.862069 | 75 | 0.755009 | false |
jmorse/numbness | util.py | 1 | 1040 | import config
def print_integer(val, width):
return "(_ bv{0} {1})".format(val, width)
def sparticus(r, match, slot):
return "(sparticus {0} {1} {2})".format(print_integer(r, config.ROUNDBITS),
print_integer(match, config.MATCHBITS),
print_integer(slot, config.SLOTBITS))
def read_smt_bvnum(num):
lparen, underscore, bvnum, width, rparen = num
bvnum, = bvnum
bvnum = bvnum[2:]
return int(bvnum)
def read_func_app(expr):
# Function application looks like this:
# (sparticus (_ bv0 4) (_ bv0 4) (_ bv0 2))
# And we can be confident that it has the righ tnumber of vars.
lparen, funcname, arg1, arg2, arg3, rparen = expr
return read_smt_bvnum(arg1), read_smt_bvnum(arg2), read_smt_bvnum(arg3)
def read_assign(string):
if len(string) == 1:
string, = string
hash, bee, string2 = string
if hash == '#' and bee == 'b':
return int(string2, 2)
else:
raise Exception("I don't recognize SMT string {0}".format(string))
else:
return read_smt_bvnum(string)
| bsd-2-clause | -2,551,587,749,110,294,500 | 30.515152 | 83 | 0.641346 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/mcl_platform/data/env/__init__.py | 1 | 1153 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
def CheckValue(name, globalValue=False):
import dsz
if globalValue:
return dsz.env.Check(name)
else:
return dsz.env.Check(name, int(dsz.script.Env['script_command_id']))
def DeleteValue(name, globalValue=False):
import dsz
if globalValue:
rtn = dsz.env.Delete(name)
else:
rtn = dsz.env.Delete(name, int(dsz.script.Env['script_command_id']))
if not rtn:
raise RuntimeError('Delete of %s env value failed' % name)
def GetValue(name, globalValue=False):
import dsz
if globalValue:
return dsz.env.Get(name)
else:
return dsz.env.Get(name, int(dsz.script.Env['script_command_id']))
def SetValue(name, value, globalValue=False):
import dsz
if globalValue:
rtn = dsz.env.Set(name, value)
else:
rtn = dsz.env.Set(name, value, int(dsz.script.Env['script_command_id']))
if not rtn:
raise RuntimeError('Set of %s env value failed' % name) | unlicense | -1,954,939,991,147,152,400 | 27.146341 | 80 | 0.645273 | false |
sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/classify/tadm.py | 5 | 3515 | # Natural Language Toolkit: Interface to TADM Classifier
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Joseph Frazee <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import sys
import subprocess
from nltk import compat
from nltk.internals import find_binary
try:
import numpy
except ImportError:
pass
_tadm_bin = None
def config_tadm(bin=None):
global _tadm_bin
_tadm_bin = find_binary(
'tadm', bin,
env_vars=['TADM'],
binary_names=['tadm'],
url='http://tadm.sf.net')
def write_tadm_file(train_toks, encoding, stream):
"""
Generate an input file for ``tadm`` based on the given corpus of
classified tokens.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type encoding: TadmEventMaxentFeatureEncoding
:param encoding: A feature encoding, used to convert featuresets
into feature vectors.
:type stream: stream
:param stream: The stream to which the ``tadm`` input file should be
written.
"""
# See the following for a file format description:
#
# http://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
# http://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
labels = encoding.labels()
for featureset, label in train_toks:
length_line = '%d\n' % len(labels)
stream.write(length_line)
for known_label in labels:
v = encoding.encode(featureset, known_label)
line = '%d %d %s\n' % (
int(label == known_label),
len(v),
' '.join('%d %d' % u for u in v)
)
stream.write(line)
def parse_tadm_weights(paramfile):
"""
Given the stdout output generated by ``tadm`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector.
"""
weights = []
for line in paramfile:
weights.append(float(line.strip()))
return numpy.array(weights, 'd')
def call_tadm(args):
"""
Call the ``tadm`` binary with the given arguments.
"""
if isinstance(args, compat.string_types):
raise TypeError('args should be a list of strings')
if _tadm_bin is None:
config_tadm()
# Call tadm via a subprocess
cmd = [_tadm_bin] + args
p = subprocess.Popen(cmd, stdout=sys.stdout)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print()
print(stderr)
raise OSError('tadm command failed!')
def names_demo():
from nltk.classify.util import names_demo
from nltk.classify.maxent import TadmMaxentClassifier
classifier = names_demo(TadmMaxentClassifier.train)
def encoding_demo():
import sys
from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
tokens = [({'f0':1, 'f1':1, 'f3':1}, 'A'),
({'f0':1, 'f2':1, 'f4':1}, 'B'),
({'f0':2, 'f2':1, 'f3':1, 'f4':1}, 'A')]
encoding = TadmEventMaxentFeatureEncoding.train(tokens)
write_tadm_file(tokens, encoding, sys.stdout)
print()
for i in range(encoding.length()):
print('%s --> %d' % (encoding.describe(i), i))
print()
if __name__ == '__main__':
encoding_demo()
names_demo()
| mit | -6,331,268,552,049,120,000 | 30.383929 | 72 | 0.62532 | false |
google/deepvariant | third_party/nucleus/io/python/fastq_writer_wrap_test.py | 1 | 5906 | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for FastqWriter CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
from tensorflow.python.platform import gfile
from third_party.nucleus.io import fastq
from third_party.nucleus.io import tfrecord
from third_party.nucleus.io.python import fastq_writer
from third_party.nucleus.protos import fastq_pb2
from third_party.nucleus.testing import test_utils
_DOUBLE_CLOSE_ERROR = 'Cannot close an already closed FastqWriter'
_WRITE_TO_CLOSED_ERROR = 'Cannot write to closed FASTQ stream'
class WrapFastqWriterTest(parameterized.TestCase):
def setUp(self):
writer_options = fastq_pb2.FastqWriterOptions()
out_fname = test_utils.test_tmpfile('output.fastq')
self.writer = fastq_writer.FastqWriter.to_file(out_fname, writer_options)
self.expected_fastq_content = [
'@NODESC:header\n',
'GATTACA\n',
'+\n',
'BB>B@FA\n',
'@M01321:49:000000000-A6HWP:1:1101:17009:2216 1:N:0:1\n',
'CGTTAGCGCAGGGGGCATCTTCACACTGGTGACAGGTAACCGCCGTAGTAAAGGTTCCGCCTTTCACT\n',
'+\n',
'AAAAABF@BBBDGGGG?FFGFGHBFBFBFABBBHGGGFHHCEFGGGGG?FGFFHEDG3EFGGGHEGHG\n',
'@FASTQ contains multiple spaces in description\n',
'CGGCTGGTCAGGCTGACATCGCCGCCGGCCTGCAGCGAGCCGCTGC\n',
'+\n',
'FAFAF;F/9;.:/;999B/9A.DFFF;-->.AAB/FC;9-@-=;=.\n',
'@FASTQ_with_trailing_space\n',
'CGG\n',
'+\n',
'FAD\n',
]
self.record = fastq_pb2.FastqRecord(
id='ID', description='desc', sequence='ACGTAC', quality='ABCDEF')
def test_writing_canned_records(self):
"""Tests writing all the variants that are 'canned' in our tfrecord file."""
# This file is in TFRecord format.
tfrecord_file = test_utils.genomics_core_testdata(
'test_reads.fastq.tfrecord')
writer_options = fastq_pb2.FastqWriterOptions()
fastq_records = list(
tfrecord.read_tfrecords(tfrecord_file, proto=fastq_pb2.FastqRecord))
out_fname = test_utils.test_tmpfile('output.fastq')
with fastq_writer.FastqWriter.to_file(out_fname, writer_options) as writer:
for record in fastq_records:
writer.write(record)
with gfile.Open(out_fname, 'r') as f:
self.assertEqual(f.readlines(), self.expected_fastq_content)
def test_context_manager(self):
with self.writer:
# Writing within the context manager succeeds.
self.assertIsNone(self.writer.write(self.record))
# self.writer should be closed, so writing again will fail.
with self.assertRaisesRegexp(ValueError, _WRITE_TO_CLOSED_ERROR):
self.writer.write(self.record)
def test_double_context_manager(self):
with self.writer:
# Writing within the context manager succeeds.
self.assertIsNone(self.writer.write(self.record))
with self.assertRaisesRegexp(ValueError, _DOUBLE_CLOSE_ERROR):
# Entering the closed writer should be fine.
with self.writer:
pass # We want to raise an error on exit, so nothing to do in context.
class WrapFastqWriterRoundTripTests(parameterized.TestCase):
@parameterized.parameters('test_reads.fastq', 'test_reads.fastq.gz')
def test_round_trip_fastq(self, test_datum_name):
# Round-trip FASTQ records through writing and reading:
# 1. Read records v1 from FastqReader;
# 2. Write v1 to fastq using our FastqWriter;
# 3. Read back in using FastqReader -- v2;
# 4. compare v1 and v2.
in_file = test_utils.genomics_core_testdata(test_datum_name)
out_file = test_utils.test_tmpfile('output_' + test_datum_name)
v1_reader = fastq.FastqReader(in_file)
v1_records = list(v1_reader.iterate())
self.assertTrue(v1_records, 'Reader failed to find records')
writer_options = fastq_pb2.FastqWriterOptions()
with fastq_writer.FastqWriter.to_file(out_file, writer_options) as writer:
for record in v1_records:
writer.write(record)
v2_reader = fastq.FastqReader(out_file)
v2_records = list(v2_reader.iterate())
self.assertEqual(v1_records, v2_records,
'Round-tripped FASTQ files not as expected')
if __name__ == '__main__':
absltest.main()
| bsd-3-clause | -3,413,702,303,076,360,700 | 39.452055 | 81 | 0.718591 | false |
pronto/dotfiles | .vim/pylibs/logilab/common/urllib2ext.py | 24 | 3374 | import logging
import urllib2
import kerberos as krb
class GssapiAuthError(Exception):
"""raised on error during authentication process"""
import re
RGX = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
def get_negociate_value(headers):
for authreq in headers.getheaders('www-authenticate'):
match = RGX.search(authreq)
if match:
return match.group(1)
class HTTPGssapiAuthHandler(urllib2.BaseHandler):
"""Negotiate HTTP authentication using context from GSSAPI"""
handler_order = 400 # before Digest Auth
def __init__(self):
self._reset()
def _reset(self):
self._retried = 0
self._context = None
def clean_context(self):
if self._context is not None:
krb.authGSSClientClean(self._context)
def http_error_401(self, req, fp, code, msg, headers):
try:
if self._retried > 5:
raise urllib2.HTTPError(req.get_full_url(), 401,
"negotiate auth failed", headers, None)
self._retried += 1
logging.debug('gssapi handler, try %s' % self._retried)
negotiate = get_negociate_value(headers)
if negotiate is None:
logging.debug('no negociate found in a www-authenticate header')
return None
logging.debug('HTTPGssapiAuthHandler: negotiate 1 is %r' % negotiate)
result, self._context = krb.authGSSClientInit("HTTP@%s" % req.get_host())
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: init failed with %d" % result)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 0:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 1 failed with %d" % result)
client_response = krb.authGSSClientResponse(self._context)
logging.debug('HTTPGssapiAuthHandler: client response is %s...' % client_response[:10])
req.add_unredirected_header('Authorization', "Negotiate %s" % client_response)
server_response = self.parent.open(req)
negotiate = get_negociate_value(server_response.info())
if negotiate is None:
logging.warning('HTTPGssapiAuthHandler: failed to authenticate server')
else:
logging.debug('HTTPGssapiAuthHandler negotiate 2: %s' % negotiate)
result = krb.authGSSClientStep(self._context, negotiate)
if result < 1:
raise GssapiAuthError("HTTPGssapiAuthHandler: step 2 failed with %d" % result)
return server_response
except GssapiAuthError, exc:
logging.error(repr(exc))
finally:
self.clean_context()
self._reset()
if __name__ == '__main__':
import sys
# debug
import httplib
httplib.HTTPConnection.debuglevel = 1
httplib.HTTPSConnection.debuglevel = 1
# debug
import logging
logging.basicConfig(level=logging.DEBUG)
# handle cookies
import cookielib
cj = cookielib.CookieJar()
ch = urllib2.HTTPCookieProcessor(cj)
# test with url sys.argv[1]
h = HTTPGssapiAuthHandler()
response = urllib2.build_opener(h, ch).open(sys.argv[1])
print '\nresponse: %s\n--------------\n' % response.code, response.info()
| bsd-2-clause | 2,195,780,718,057,201,000 | 37.781609 | 99 | 0.609662 | false |
BrandonY/gsutil | gslib/addlhelp/dev.py | 10 | 6525 | # -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about contributing code to gsutil."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
We're open to incorporating gsutil code changes authored by users. Here
are some guidelines:
1. Before we can accept code submissions, we have to jump a couple of legal
hurdles. Please fill out either the individual or corporate Contributor
License Agreement:
- If you are an individual writing original source code and you're
sure you own the intellectual property,
then you'll need to sign an individual CLA
(https://cla.developers.google.com/about/google-individual).
- If you work for a company that wants to allow you to contribute your
work to gsutil, then you'll need to sign a corporate CLA
(https://cla.developers.google.com/about/google-corporate)
Follow either of the two links above to access the appropriate CLA and
instructions for how to sign and return it. Once we receive it, we'll
add you to the official list of contributors and be able to accept
your patches.
2. If you found a bug or have an idea for a feature enhancement, we suggest
you check https://github.com/GoogleCloudPlatform/gsutil/issues to see if it
has already been reported by another user. From there you can also
subscribe to updates to the issue.
3. If a GitHub issue doesn't already exist, create one about your idea before
sending actual code. Often we can discuss the idea and help propose things
that could save you later revision work.
4. We tend to avoid adding command line options that are of use to only
a very small fraction of users, especially if there's some other way
to accommodate such needs. Adding such options complicates the code and
also adds overhead to users having to read through an "alphabet soup"
list of option documentation.
5. While gsutil has a number of features specific to Google Cloud Storage,
it can also be used with other cloud storage providers. We're open to
including changes for making gsutil support features specific to other
providers, as long as those changes don't make gsutil work worse for Google
Cloud Storage. If you do make such changes we recommend including someone
with knowledge of the specific provider as a code reviewer (see below).
6. You can check out the gsutil code from the GitHub repository:
https://github.com/GoogleCloudPlatform/gsutil
To clone a read-only copy of the repository:
git clone git://github.com/GoogleCloudPlatform/gsutil.git
To push your own changes to GitHub, click the Fork button on the
repository page and clone the repository from your own fork.
7. The gsutil git repository uses git submodules to pull in external modules.
After checking out the repository, make sure to also pull the submodules
by entering into the gsutil top-level directory and run:
git submodule update --init --recursive
8. Please make sure to run all tests against your modified code. To
do this, change directories into the gsutil top-level directory and run:
./gsutil test
The above tests take a long time to run because they send many requests to
the production service. The gsutil test command has a -u argument that will
only run unit tests. These run quickly, as they are executed with an
in-memory mock storage service implementation. To run only the unit tests,
run:
./gsutil test -u
If you made changes to boto, please run the boto tests. For these tests you
need to use HMAC credentials (from gsutil config -a), because the current
boto test suite doesn't import the OAuth2 handler. You'll also need to
install some python modules. Change directories into the boto root
directory at third_party/boto and run:
pip install -r requirements.txt
(You probably need to run this command using sudo.)
Make sure each of the individual installations succeeded. If they don't
you may need to run the install command again.
Then ensure your .boto file has HMAC credentials defined (the boto tests
don't load the OAUTH2 plugin), and then change directories into boto's
tests directory and run:
python test.py unit
python test.py -t s3 -t gs -t ssl
9. Please consider contributing test code for your change, especially if the
change impacts any of the core gsutil code (like the gsutil cp command).
10. When it's time to send us code, please use the Rietveld code review tool
rather than simply sending us a code patch. Do this as follows:
- Check out the gsutil code from your fork of the gsutil repository and
apply your changes.
- Download the "upload.py" script from
https://github.com/rietveld-codereview/rietveld
- Run upload.py from your git directory with the changes.
- Click the codereview.appspot.com link it generates, click "Edit Issue",
and add [email protected] and [email protected] as reviewers, and
Cc [email protected].
- Click Publish+Mail Comments.
- Once your changes are accepted, submit a pull request on GitHub and we
will merge your commits.
""")
class CommandOptions(HelpProvider):
"""Additional help about contributing code to gsutil."""
# TODO: gsutil-beta: Add lint .rc file and linting instructions.
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='dev',
help_name_aliases=[
'development', 'developer', 'code', 'mods', 'software'],
help_type='additional_help',
help_one_line_summary='Contributing Code to gsutil',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| apache-2.0 | -6,072,491,776,404,896,000 | 43.387755 | 80 | 0.724751 | false |
SpectreJan/gnuradio | grc/gui/MainWindow.py | 5 | 16918 | """
Copyright 2008, 2009, 2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import gtk
from . import Bars, Actions, Preferences, Utils
from .BlockTreeWindow import BlockTreeWindow
from .VariableEditor import VariableEditor
from .Constants import \
NEW_FLOGRAPH_TITLE, DEFAULT_CONSOLE_WINDOW_WIDTH
from .Dialogs import TextDisplay, MessageDialogHelper
from .NotebookPage import NotebookPage
from ..core import Messages
MAIN_WINDOW_TITLE_TMPL = """\
#if not $saved
*#slurp
#end if
#if $basename
$basename#slurp
#else
$new_flowgraph_title#slurp
#end if
#if $read_only
(read only)#slurp
#end if
#if $dirname
- $dirname#slurp
#end if
- $platform_name#slurp
"""
PAGE_TITLE_MARKUP_TMPL = """\
#set $foreground = $saved and 'black' or 'red'
<span foreground="$foreground">$encode($title or $new_flowgraph_title)</span>#slurp
#if $read_only
(ro)#slurp
#end if
"""
############################################################
# Main window
############################################################
class MainWindow(gtk.Window):
"""The topmost window with menus, the tool bar, and other major windows."""
# Constants the action handler can use to indicate which panel visibility to change.
BLOCKS = 0
CONSOLE = 1
VARIABLES = 2
def __init__(self, platform, action_handler_callback):
"""
MainWindow constructor
Setup the menu, toolbar, flow graph editor notebook, block selection window...
"""
self._platform = platform
gen_opts = platform.blocks['options'].get_param('generate_options')
generate_mode_default = gen_opts.get_value()
generate_modes = [
(o.get_key(), o.get_name(), o.get_key() == generate_mode_default)
for o in gen_opts.get_options()]
# Load preferences
Preferences.load(platform)
# Setup window
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
vbox = gtk.VBox()
self.add(vbox)
icon_theme = gtk.icon_theme_get_default()
icon = icon_theme.lookup_icon("gnuradio-grc", 48, 0)
if not icon:
# Set window icon
self.set_icon_from_file(os.path.dirname(os.path.abspath(__file__)) + "/icon.png")
# Create the menu bar and toolbar
self.add_accel_group(Actions.get_accel_group())
self.menu_bar = Bars.MenuBar(generate_modes, action_handler_callback)
vbox.pack_start(self.menu_bar, False)
self.tool_bar = Bars.Toolbar(generate_modes, action_handler_callback)
vbox.pack_start(self.tool_bar, False)
# Main parent container for the different panels
self.container = gtk.HPaned()
vbox.pack_start(self.container)
# Create the notebook
self.notebook = gtk.Notebook()
self.page_to_be_closed = None
self.current_page = None
self.notebook.set_show_border(False)
self.notebook.set_scrollable(True) # scroll arrows for page tabs
self.notebook.connect('switch-page', self._handle_page_change)
# Create the console window
self.text_display = TextDisplay()
self.console_window = gtk.ScrolledWindow()
self.console_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.console_window.add(self.text_display)
self.console_window.set_size_request(-1, DEFAULT_CONSOLE_WINDOW_WIDTH)
# Create the block tree and variable panels
self.btwin = BlockTreeWindow(platform, self.get_flow_graph)
self.vars = VariableEditor(platform, self.get_flow_graph)
# Figure out which place to put the variable editor
self.left = gtk.VPaned()
self.right = gtk.VPaned()
self.left_subpanel = gtk.HPaned()
self.variable_panel_sidebar = Preferences.variable_editor_sidebar()
if self.variable_panel_sidebar:
self.left.pack1(self.notebook)
self.left.pack2(self.console_window, False)
self.right.pack1(self.btwin)
self.right.pack2(self.vars, False)
else:
# Put the variable editor in a panel with the console
self.left.pack1(self.notebook)
self.left_subpanel.pack1(self.console_window, shrink=False)
self.left_subpanel.pack2(self.vars, resize=False, shrink=True)
self.left.pack2(self.left_subpanel, False)
# Create the right panel
self.right.pack1(self.btwin)
self.container.pack1(self.left)
self.container.pack2(self.right, False)
# load preferences and show the main window
self.resize(*Preferences.main_window_size())
self.container.set_position(Preferences.blocks_window_position())
self.left.set_position(Preferences.console_window_position())
if self.variable_panel_sidebar:
self.right.set_position(Preferences.variable_editor_position(sidebar=True))
else:
self.left_subpanel.set_position(Preferences.variable_editor_position())
self.show_all()
self.console_window.hide()
self.vars.hide()
self.btwin.hide()
############################################################
# Event Handlers
############################################################
def _quit(self, window, event):
"""
Handle the delete event from the main window.
Generated by pressing X to close, alt+f4, or right click+close.
This method in turns calls the state handler to quit.
Returns:
true
"""
Actions.APPLICATION_QUIT()
return True
def _handle_page_change(self, notebook, page, page_num):
"""
Handle a page change. When the user clicks on a new tab,
reload the flow graph to update the vars window and
call handle states (select nothing) to update the buttons.
Args:
notebook: the notebook
page: new page
page_num: new page number
"""
self.current_page = self.notebook.get_nth_page(page_num)
Actions.PAGE_CHANGE()
def update_panel_visibility(self, panel, visibility=True):
"""
Handles changing visibility of panels.
"""
# Set the visibility for the requested panel, then update the containers if they need
# to be hidden as well.
if panel == self.BLOCKS:
if visibility:
self.btwin.show()
else:
self.btwin.hide()
elif panel == self.CONSOLE:
if visibility:
self.console_window.show()
else:
self.console_window.hide()
elif panel == self.VARIABLES:
if visibility:
self.vars.show()
else:
self.vars.hide()
else:
return
if self.variable_panel_sidebar:
# If both the variable editor and block panels are hidden, hide the right container
if not (self.btwin.get_property('visible')) and not (self.vars.get_property('visible')):
self.right.hide()
else:
self.right.show()
else:
if not (self.btwin.get_property('visible')):
self.right.hide()
else:
self.right.show()
if not (self.vars.get_property('visible')) and not (self.console_window.get_property('visible')):
self.left_subpanel.hide()
else:
self.left_subpanel.show()
############################################################
# Console Window
############################################################
def add_console_line(self, line):
"""
Place line at the end of the text buffer, then scroll its window all the way down.
Args:
line: the new text
"""
self.text_display.insert(line)
############################################################
# Pages: create and close
############################################################
def new_page(self, file_path='', show=False):
"""
Create a new notebook page.
Set the tab to be selected.
Args:
file_path: optional file to load into the flow graph
show: true if the page should be shown after loading
"""
#if the file is already open, show the open page and return
if file_path and file_path in self._get_files(): #already open
page = self.notebook.get_nth_page(self._get_files().index(file_path))
self._set_page(page)
return
try: #try to load from file
if file_path: Messages.send_start_load(file_path)
flow_graph = self._platform.get_new_flow_graph()
flow_graph.grc_file_path = file_path
#print flow_graph
page = NotebookPage(
self,
flow_graph=flow_graph,
file_path=file_path,
)
if file_path: Messages.send_end_load()
except Exception, e: #return on failure
Messages.send_fail_load(e)
if isinstance(e, KeyError) and str(e) == "'options'":
# This error is unrecoverable, so crash gracefully
exit(-1)
return
#add this page to the notebook
self.notebook.append_page(page, page.get_tab())
try: self.notebook.set_tab_reorderable(page, True)
except: pass #gtk too old
self.notebook.set_tab_label_packing(page, False, False, gtk.PACK_START)
#only show if blank or manual
if not file_path or show: self._set_page(page)
def close_pages(self):
"""
Close all the pages in this notebook.
Returns:
true if all closed
"""
open_files = filter(lambda file: file, self._get_files()) #filter blank files
open_file = self.get_page().get_file_path()
#close each page
for page in sorted(self.get_pages(), key=lambda p: p.get_saved()):
self.page_to_be_closed = page
closed = self.close_page(False)
if not closed:
break
if self.notebook.get_n_pages(): return False
#save state before closing
Preferences.set_open_files(open_files)
Preferences.file_open(open_file)
Preferences.main_window_size(self.get_size())
Preferences.console_window_position(self.left.get_position())
Preferences.blocks_window_position(self.container.get_position())
if self.variable_panel_sidebar:
Preferences.variable_editor_position(self.right.get_position(), sidebar=True)
else:
Preferences.variable_editor_position(self.left_subpanel.get_position())
Preferences.save()
return True
def close_page(self, ensure=True):
"""
Close the current page.
If the notebook becomes empty, and ensure is true,
call new page upon exit to ensure that at least one page exists.
Args:
ensure: boolean
"""
if not self.page_to_be_closed: self.page_to_be_closed = self.get_page()
#show the page if it has an executing flow graph or is unsaved
if self.page_to_be_closed.get_proc() or not self.page_to_be_closed.get_saved():
self._set_page(self.page_to_be_closed)
#unsaved? ask the user
if not self.page_to_be_closed.get_saved():
response = self._save_changes() # return value is either OK, CLOSE, or CANCEL
if response == gtk.RESPONSE_OK:
Actions.FLOW_GRAPH_SAVE() #try to save
if not self.page_to_be_closed.get_saved(): #still unsaved?
self.page_to_be_closed = None #set the page to be closed back to None
return False
elif response == gtk.RESPONSE_CANCEL:
self.page_to_be_closed = None
return False
#stop the flow graph if executing
if self.page_to_be_closed.get_proc(): Actions.FLOW_GRAPH_KILL()
#remove the page
self.notebook.remove_page(self.notebook.page_num(self.page_to_be_closed))
if ensure and self.notebook.get_n_pages() == 0: self.new_page() #no pages, make a new one
self.page_to_be_closed = None #set the page to be closed back to None
return True
############################################################
# Misc
############################################################
def update(self):
"""
Set the title of the main window.
Set the titles on the page tabs.
Show/hide the console window.
Args:
title: the window title
"""
gtk.Window.set_title(self, Utils.parse_template(MAIN_WINDOW_TITLE_TMPL,
basename=os.path.basename(self.get_page().get_file_path()),
dirname=os.path.dirname(self.get_page().get_file_path()),
new_flowgraph_title=NEW_FLOGRAPH_TITLE,
read_only=self.get_page().get_read_only(),
saved=self.get_page().get_saved(),
platform_name=self._platform.config.name,
)
)
#set tab titles
for page in self.get_pages(): page.set_markup(
Utils.parse_template(PAGE_TITLE_MARKUP_TMPL,
#get filename and strip out file extension
title=os.path.splitext(os.path.basename(page.get_file_path()))[0],
read_only=page.get_read_only(), saved=page.get_saved(),
new_flowgraph_title=NEW_FLOGRAPH_TITLE,
)
)
#show/hide notebook tabs
self.notebook.set_show_tabs(len(self.get_pages()) > 1)
# Need to update the variable window when changing
self.vars.update_gui()
def update_pages(self):
"""
Forces a reload of all the pages in this notebook.
"""
for page in self.get_pages():
success = page.get_flow_graph().reload()
if success: # Only set saved if errors occurred during import
page.set_saved(False)
def get_page(self):
"""
Get the selected page.
Returns:
the selected page
"""
return self.current_page
def get_flow_graph(self):
"""
Get the selected flow graph.
Returns:
the selected flow graph
"""
return self.get_page().get_flow_graph()
def get_focus_flag(self):
"""
Get the focus flag from the current page.
Returns:
the focus flag
"""
return self.get_page().get_drawing_area().get_focus_flag()
############################################################
# Helpers
############################################################
def _set_page(self, page):
"""
Set the current page.
Args:
page: the page widget
"""
self.current_page = page
self.notebook.set_current_page(self.notebook.page_num(self.current_page))
def _save_changes(self):
"""
Save changes to flow graph?
Returns:
the response_id (see buttons variable below)
"""
buttons = (
'Close without saving', gtk.RESPONSE_CLOSE,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK
)
return MessageDialogHelper(
gtk.MESSAGE_QUESTION, gtk.BUTTONS_NONE, 'Unsaved Changes!',
'Would you like to save changes before closing?', gtk.RESPONSE_OK, buttons
)
def _get_files(self):
"""
Get the file names for all the pages, in order.
Returns:
list of file paths
"""
return map(lambda page: page.get_file_path(), self.get_pages())
def get_pages(self):
"""
Get a list of all pages in the notebook.
Returns:
list of pages
"""
return [self.notebook.get_nth_page(page_num) for page_num in range(self.notebook.get_n_pages())]
| gpl-3.0 | -7,599,733,800,357,388,000 | 34.542017 | 109 | 0.574181 | false |
ronnienv/antTrails | main.py | 1 | 12930 | """`main` is the top level module for your Bottle application.
Loads the Bottle framework and adds a custom error
handler.
"""
# import the Bottle framework
from bottle import Bottle,route, run, template, static_file, get, post, request, redirect, response
from antTrailsDatabase import Occupant, Spot
from google.appengine.ext import ndb
import datetime, pyimgur, time
# Run the Bottle wsgi application. We don't need to call run() since our
# application is embedded within an App Engine WSGI application server.
bottle = Bottle()
@bottle.get('/')
def home():
query = Occupant.query()
tmp_vendors = convertVendors(query)
vendors = {'vendors':tmp_vendors}
#query = Spot.query()
#query_spots = convertSpots(query)
tmp_spots = []
for v in tmp_vendors:
spot = Spot.get_by_id(v['spot_id'])
if spot != None:
tmp = convertOneSpot(spot,v)
tmp_spots.append(tmp)
spots = {'spots':tmp_spots}
header = template('header', home="active", vendor="", edit="", about="")
content = template('buyer', vendors, spots)
footer = template('footer',"")
deleted = """
<script>
$(window).load(function() {
alert("The spot entered and it's information has been deleted. \\n\\nThank you!");
});
</script>"""
confirmation = """
<script>
$(window).load(function() {
alert("Your reservation is complete! Please note that official reservations must be made through the Student Center and not through antTrails. Also, please note that the spots reset at 12am everyday. \\n\\nThank you!");
});
</script>
"""
if request.get_cookie("submittedForm") == "yes":
response.set_cookie("submittedForm", "no")
content = template('buyer', vendors, spots)
return header + content + footer + confirmation
elif request.get_cookie("deleted") == "yes":
response.set_cookie("deleted", "no")
content = template('buyer', vendors, spots)
return header + content + footer + deleted
else:
return header + content + footer
@bottle.get('/vendor')
def home():
header = template('header', home="", vendor="active", edit="", about="")
content = template('vendor', message = "", sn = "", hl = "" , org = "", desc = "", pw = "")
footer = template('footer',"")
return header + content + footer
@bottle.post('/vendor')
def home():
sn = request.forms.get('spotNumber')
hl = request.forms.get('headline')
org = request.forms.get('organization')
desc = request.forms.get('description')
pw = request.forms.get('password')
img_url = request.forms.get('image_url')
#img = uploadImage(img_url)
if isValidSpot(sn):
snInt = int(sn)
snInt = str(snInt)
snDatabase = Occupant.get_by_id(snInt)
if snDatabase == None:
occupant = Occupant(id = snInt, headline = hl, description = desc, date_time = datetime.datetime.now(), spot_id = snInt, organization = org, spot_image = img_url, password = pw, report = 0)
occupant.put()
time.sleep(2)
response.set_cookie("submittedForm", "yes")
redirect('/')
else:
header = template('header', home="", vendor="active", edit="", about="")
content = template('vendor', message = "*Sorry, the Spot Number entered has already been taken.*", sn = "", hl = hl, org = org, desc = desc, pw = pw)
footer = template('footer',"")
return header + content + footer
else:
header = template('header', home="", vendor="active", edit="", about="")
content = template('vendor', message = "*Sorry, the spot number must be a valid spot number.*", sn = "", hl = hl, org = org, desc = desc, pw = pw)
footer = template('footer',"")
return header + content + footer
@bottle.get('/edit')
def home():
header = template('header', home="", vendor="", edit="active", about="")
content = template('edit', message="")
footer = template('footer',"")
response.set_cookie("edittingForm", "no")
return header + content + footer
@bottle.post('/edit')
def home():
#if the user has not submitted the new editted form, it will enter this branch
if request.get_cookie("edittingForm") == "no":
sn = request.forms.get('spotNumber')
pw = request.forms.get('password')
if isValidSpot(sn):
#gets rid of leading 0s
snInt = int(sn)
snInt = str(snInt)
snDatabase = Occupant.get_by_id(snInt)
if snDatabase == None:
header = template('header', home="", vendor="", edit="active", about="")
content = template('edit', message="*The entered Spot Number has not been reserved yet.")
footer = template('footer',"")
return header + content + footer
else:
if snDatabase.password == pw:
#if the user chose to delete a spot, here they go!
if request.forms.get("delete") == "Delete Spot":
spot = Occupant.get_by_id(snInt)
spot.key.delete()
response.set_cookie("deleted", "yes")
time.sleep(2)
redirect('/')
else:
response.set_cookie("edittingForm", "yes")
snInt = int(sn)
snInt = str(snInt)
response.set_cookie("originalSN", snInt)
hl = snDatabase.headline
org = snDatabase.organization
desc = snDatabase.description
header = template('header', home="", vendor="", edit="active", about="")
content = template('vendor', message = "Spot Information has been loaded!", sn = sn, hl = hl, org = org, desc = desc, pw = pw)
footer = template('footer',"")
return header + content + footer
else:
header = template('header', home="", vendor="", edit="active", about="")
content = template('edit', message="*Invalid Password")
footer = template('footer',"")
return header + content + footer
else:
header = template('header', home="", vendor="", edit="active", about="")
content = template('edit', message = "*Sorry, the spot number must be a valid spot number.*")
footer = template('footer',"")
return header + content + footer
#if the user has submitted a new editted spot form it will enter this branch
#this branch is basically the check in vnedor that validates input
else:
sn = request.forms.get('spotNumber')
hl = request.forms.get('headline')
org = request.forms.get('organization')
desc = request.forms.get('description')
pw = request.forms.get('password')
img_url = request.forms.get('image_url')
if isValidSpot(sn):
snInt = int(sn)
snInt = str(snInt)
snDatabase = Occupant.get_by_id(snInt)
if snDatabase == None or snInt == str(request.get_cookie("originalSN")):
occupant = Occupant(id = snInt, headline = hl, description = desc, date_time = datetime.datetime.now(), spot_id = snInt, organization = org, spot_image = img_url, password = pw, report = 0)
occupant.put()
#if the new spot number does not exist that means that changed the spot number
#so the old can be deleted because it is being replaced
if snDatabase == None:
oldSpot = Occupant.get_by_id(str(request.get_cookie("originalSN")))
oldSpot.key.delete()
response.set_cookie("edittingForm", "no")
response.set_cookie("originalSN", "no")
time.sleep(2)
response.set_cookie("submittedForm", "yes")
redirect('/')
else:
header = template('header', home="", vendor="", edit="active", about="")
content = template('vendor', message = "*Sorry, the Spot Number entered has already been taken.*", sn = "", hl = hl, org = org, desc = desc, pw = pw)
footer = template('footer',"")
return header + content + footer
else:
header = template('header', home="", vendor="", edit="active", about="")
content = template('vendor', message = "*Sorry, the spot number must be a valid spot number.*", sn = "", hl = hl, org = org, desc = desc, pw = pw)
footer = template('footer',"")
return header + content + footer
@bottle.get('/about')
def home():
header = template('header', home="", vendor="", edit="", about="active")
content = template('about',"")
footer = template('footer',"")
return header + content + footer
@bottle.error(404)
def error_404(error):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.'
@bottle.get('/addSpotsToDatabase')
def home():
vendor_to_longlat('LongituteLatitutde.txt')
@bottle.get('/addOccupantData')
def home():
o1 = Occupant(id = "1", headline = "Selling chicken nuggets!", description = "Chicken nuggets! 5 for $1", date_time = datetime.datetime.now(), spot_id = "1", organization = "Circle K", spot_image = "", password = "", report = 0)
o2 = Occupant(id = "2", headline = "Free drinks!", description = "Answer a simple survey to get a free drink!", date_time = datetime.datetime.now(), spot_id = "2", organization = "Mahjong Club", spot_image = "", password = "", report = 0)
o3 = Occupant(id = "3", headline = "Resume Critque", description = "Stop by to get your resume up in shape!", date_time = datetime.datetime.now(), spot_id = "3", organization = "Career Center", spot_image = "", password = "", report = 0)
o4 = Occupant(id = "76", headline = "Study at our Booth!", description = "Come study with us at our booth and learn about stuff!", date_time = datetime.datetime.now(), spot_id = "76", organization = "Study Club", spot_image = "", password = "", report = 0)
o5 = Occupant(id = "233", headline = "Boats", description = "Boats, Boats BOATS!!!", date_time = datetime.datetime.now(), spot_id = "233", organization = "Touch the Boat", spot_image = "", password = "", report = 0)
o6 = Occupant(id = "314", headline = "Don't Smile at Strangers", description = "Have you ever wanted to see what goes on behind the Don't Smile at Strangers group meetings? Come by to find out! We're always happy to show you!", date_time = datetime.datetime.now(), spot_id = "314", organization = "INFX 151", spot_image = "", password = "", report = 0)
o7 = Occupant(id = "106", headline = "ASUCI Elections", description = "Vote today!", date_time = datetime.datetime.now(), spot_id = "106", organization = "ASUCI", spot_image = "", password = "", report = 0)
o1.put()
o2.put()
o3.put()
o4.put()
o5.put()
o6.put()
o7.put()
return "Occupant Data successfully added!"
@bottle.get('/seasonalfruits')
def home():
ndb.delete_multi(Occupant.query().fetch(keys_only=True))
#ndb.delete_multi(Spot.query().fetch(keys_only=True))
@bottle.get('/imgur')
def home():
CLIENT_ID = "023b858ecdb2d0c"
CLIENT_SECRET = "83234b0ff6b2fce855205f69594811b671448848"
im = pyimgur.Imgur(CLIENT_ID, CLIENT_SECRET)
image = im.get_image('S1jmapR')
link = "<img src="+image.link_medium_thumbnail+">"
header = template('header', home="active", vendor="", edit="", about="")
footer = template('footer',"")
return header + link + footer
def isValidSpot(s):
spot = Spot.get_by_id(s)
return spot != None
def convertSpots(spots):
returner = []
for s in spots:
returner.append({
'longitude' : s.longitude,
'latitude' : s.latitude,
'spot_id' : s.spot_id,
'location_image' : s.location_image,
'general_area' : s.general_area
})
return returner
def convertOneSpot(spot, vendor):
returner=[{
'longitude' : spot.longitude,
'latitude' : spot.latitude,
'spot_id' : spot.spot_id,
'location_image' : spot.location_image,
'general_area' : spot.general_area,
'organization' : vendor['organization'],
'description' : vendor['description'],
'headline' : vendor['headline'],
}]
return returner
def convertVendors(vendors):
returner = []
i = 0
for v in vendors:
returner.append({
'headline' : v.headline,
'description' : v.description,
'date_time' : v.date_time,
'spot_id' : v.spot_id,
'organization' : v.organization,
'spot_image' : v.spot_image,
'password' : v.password,
'report' : v.report,
'pos' : i
})
i += 1
return returner
def uploadImage(img_url):
CLIENT_ID = "023b858ecdb2d0c"
CLIENT_SECRET = "83234b0ff6b2fce855205f69594811b671448848"
im = pyimgur.Imgur(CLIENT_ID, CLIENT_SECRET)
image = im.upload_image(path = img_url, title= "Test upload")
return image.link
def vendor_to_longlat(spreadsheet):
'''Making a dictionary from Tech Beckas spreadsheet'''
sheet = open(spreadsheet, 'r')
sheet = sheet.readlines()
vendor_longlat = {}
for item in range(len(sheet)):
sheet[item]=sheet[item].split(',')
for item in sheet:
try:
item[2] = item[2].strip('\n')
s = Spot(id = item[0], latitude = item[1], longitude = item[2], location_image = "", spot_id =item[0], general_area ="")
s.put()
except:
pass
vendor_to_longlat('LongituteLatitutde.txt')
| apache-2.0 | 724,284,863,946,687,100 | 36.807018 | 354 | 0.626991 | false |
flag0010/pop_gen_cnn | data_prep_tricks/test.effects.of.data.prep.tricks.on.tajD.py | 1 | 7751 | import numpy as np
import keras
#from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Merge
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from generic_tajD import calc_S_and_k_from_seqs, count_all, tajD
from random import choice
from matplotlib import pyplot as plt
#from keras_diagram import ascii
from sklearn.neighbors import NearestNeighbors
from pprint import pprint
nreps, nepoch = 10, 10
def sort_min_diff(amat):
mb = NearestNeighbors(len(amat), metric='manhattan').fit(amat)
v = mb.kneighbors(amat)
smallest = np.argmin(v[0].sum(axis=1))
return amat[v[1][smallest]]
def transpose_shape(x):
n = []
for i in x: n.append(i.T)
return np.array(n)
x = []
for i in xrange(5000):
#print i
q = []
v = np.random.randint(2, size=60)#*-2+1
#print v
for j in range(35):
if choice([0,1]): q.append(v)
else: q.append(np.random.randint(2, size=60))
x.append(np.array(q))
x = np.array(x)
#print x[1]
#print x.shape
y = []
for i in x:
S,k = calc_S_and_k_from_seqs(i)
#print S,k
td = tajD(35, S, k)
#print td
y.append(td)
ytest, ytrain = y[:1000], y[1000:]
xtest, xtrain = x[:1000], x[1000:]
#print xtest.shape
#print xtrain.shape
np.savez_compressed('tajd.npz', ytest=ytest, xtest=xtest, xtrain=xtrain, ytrain=ytrain)
all_out = {'not_transposed':[], 'binary':[], 'neg1_1': [], 'resort':[], 'resort_and_neg1_1':[]}
for i in range(nreps):
model = Sequential()
model.add(Conv1D(128, kernel_size=2,
activation='relu',
input_shape=(35, 60)))
#model.add(Dropout(0.25))#not helpful
model.add(Conv1D(128, kernel_size=2, activation='relu'))
model.add(AveragePooling1D(pool_size=2))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='normal'))
model.add(Dropout(0.25))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
pred = model.predict(xtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
f = [rmse]
for j in range(nepoch):
model.fit(xtrain, ytrain, batch_size=64,
epochs=1, verbose=0, validation_data=(xtest, ytest))
pred = model.predict(xtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
print( i,j, rmse)
f.append(rmse)
all_out['not_transposed'].append(f)
pprint( all_out )
xtrain, xtest = transpose_shape(xtrain), transpose_shape(xtest)
for i in range(nreps):
model = Sequential()
model.add(Conv1D(128, kernel_size=2,
activation='relu',
input_shape=(60, 35)))
#model.add(Dropout(0.25))#not helpful
model.add(Conv1D(128, kernel_size=2, activation='relu'))
model.add(AveragePooling1D(pool_size=2))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='normal'))
model.add(Dropout(0.25))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
pred = model.predict(xtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
f = [rmse]
for j in range(nepoch):
model.fit(xtrain, ytrain, batch_size=64,
epochs=1, verbose=0, validation_data=(xtest, ytest))
pred = model.predict(xtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
print( i,j, rmse)
f.append(rmse)
all_out['binary'].append(f)
pprint( all_out )
mtrain, mtest = (xtrain*-2+1)*-1, (xtest*-2+1)*-1
for i in range(nreps):
model = Sequential()
model.add(Conv1D(128, kernel_size=2,
activation='relu',
input_shape=(60, 35)))
#model.add(Dropout(0.25))#not helpful
model.add(Conv1D(128, kernel_size=2, activation='relu'))
model.add(AveragePooling1D(pool_size=2))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='normal'))
model.add(Dropout(0.25))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
pred = model.predict(mtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
f = [rmse]
for j in range(nepoch):
model.fit(mtrain, ytrain, batch_size=64,
epochs=1, verbose=0, validation_data=(mtest, ytest))
pred = model.predict(mtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
print( i,j, rmse)
f.append(rmse)
all_out['neg1_1'].append(f)
pprint( all_out )
rtrain = np.array([sort_min_diff(i.T).T for i in xtrain])
rtest = np.array([sort_min_diff(i.T).T for i in xtest])
for i in range(nreps):
model = Sequential()
model.add(Conv1D(128, kernel_size=2,
activation='relu',
input_shape=(60, 35)))
#model.add(Dropout(0.25))#not helpful
model.add(Conv1D(128, kernel_size=2, activation='relu'))
model.add(AveragePooling1D(pool_size=2))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='normal'))
model.add(Dropout(0.25))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
pred = model.predict(rtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
f = [rmse]
for j in range(nepoch):
model.fit(rtrain, ytrain, batch_size=64,
epochs=1, verbose=0, validation_data=(rtest, ytest))
pred = model.predict(rtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
print( i,j, rmse)
f.append(rmse)
all_out['resort'].append(f)
pprint( all_out )
rtrain = (rtrain*-2+1)*-1
rtest = (rtest*-2+1)*-1
for i in range(nreps):
model = Sequential()
model.add(Conv1D(128, kernel_size=2,
activation='relu',
input_shape=(60, 35)))
#model.add(Dropout(0.25))#not helpful
model.add(Conv1D(128, kernel_size=2, activation='relu'))
model.add(AveragePooling1D(pool_size=2))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='normal'))
model.add(Dropout(0.25))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
pred = model.predict(rtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
f = [rmse]
for j in range(nepoch):
model.fit(rtrain, ytrain, batch_size=64,
epochs=1, verbose=0, validation_data=(rtest, ytest))
pred = model.predict(rtest)
rmse = np.mean([(iii-jjj)**2 for iii,jjj in zip(ytest, pred)])**0.5
print( i,j, rmse)
f.append(rmse)
all_out['resort_and_neg1_1'].append(f)
pprint( all_out )
def rmean(x):
k = [sum(i)*len(i)**-1 for i in zip(*x)]
return k
for i,color in zip(['not_transposed', 'binary', 'neg1_1', 'resort', 'resort_and_neg1_1'], ['r', 'b', 'g', 'k', 'magenta']):
#for j in all_out[i]:
#plt.plot(range(11), j, color=color, alpha=.05)
#plt.scatter(range(11), j, color=color, alpha=.3)
plt.plot(range(11), rmean(all_out[i]), color=color)
from pickle import dump
dump(all_out, open('all.fitted.nets.pickle', 'w'))
plt.xlabel('Training Epoch')
plt.ylabel("Hold-out Data RMSE")
plt.semilogy()
plt.show()
| gpl-3.0 | -4,948,531,630,374,158,000 | 34.231818 | 123 | 0.612566 | false |
jdanbrown/pydatalab | google/datalab/ml/__init__.py | 2 | 1045 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# flake8: noqa
"""CloudML Helper Library."""
from __future__ import absolute_import
from ._job import Jobs, Job
from ._summary import Summary
from ._tensorboard import TensorBoard
from ._dataset import CsvDataSet, BigQueryDataSet
from ._cloud_models import Models, ModelVersions
from ._confusion_matrix import ConfusionMatrix
from ._feature_slice_view import FeatureSliceView
from ._cloud_training_config import CloudTrainingConfig
from ._util import *
| apache-2.0 | -8,488,155,526,236,006,000 | 37.703704 | 99 | 0.781818 | false |
cjaymes/pyscap | src/scap/model/oval_5/sc/linux/SeLinuxBooleanItemElement.py | 1 | 1241 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
from scap.model.oval_5.sc.ItemType import ItemType
logger = logging.getLogger(__name__)
class SeLinuxBooleanItemElement(ItemType):
MODEL_MAP = {
'elements': [
{'tag_name': 'name', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'current_status', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
{'tag_name': 'pending_status', 'class': 'scap.model.oval_5.sc.EntityItemType', 'min': 0, 'max': 1},
],
}
| gpl-3.0 | 480,960,904,359,227,650 | 39.032258 | 111 | 0.689766 | false |
hendradarwin/VTK | IO/Geometry/Testing/Python/TestChacoReader.py | 17 | 1189 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# read in a Chaco file
chReader = vtk.vtkChacoReader()
chReader.SetBaseName(VTK_DATA_ROOT + "/Data/vwgt")
chReader.SetGenerateGlobalElementIdArray(1)
chReader.SetGenerateGlobalNodeIdArray(1)
chReader.SetGenerateEdgeWeightArrays(1)
chReader.SetGenerateVertexWeightArrays(1)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(chReader.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.SelectColorArray("VertexWeight1")
mapper.SetScalarRange(1, 5)
actor0 = vtk.vtkActor()
actor0.SetMapper(mapper)
# Create the RenderWindow, Renderer and interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actor to the renderer, set the background and size
#
ren1.AddActor(actor0)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.SetMultiSamples(0)
iren.Initialize()
renWin.Render()
#iren.Start()
| bsd-3-clause | 6,055,860,417,004,810,000 | 23.770833 | 60 | 0.805719 | false |
CarlosRDomin/crazyflie-firmware | tools/usdlog/CF_functions.py | 3 | 4153 | # -*- coding: utf-8 -*-
"""
decode: decodes binary logged sensor data from crazyflie2 with uSD-Card-Deck
createConfig: create config file which has to placed on µSD-Card
@author: jsschell
"""
from zlib import crc32
import struct
import numpy as np
import os
# lookup dictionary to determine size of data types
fmtChars = {'c': 1, 'b': 1, 'b': 1, 'B': 1, '?': 1, 'h': 2, 'H': 2,
'i': 4, 'I': 4, 'l': 4, 'L': 4, 'q': 8, 'Q': 8, 'f': 4, 'd': 8}
def decode(filName):
# read file as binary
filObj = open(filName, 'rb')
filCon = filObj.read()
filObj.close()
# get file size to forecast output array
statinfo = os.stat(filName)
# process file header
setWidth = struct.unpack('B', filCon[:1])
setNames = []
for ii in range(0, setWidth[0]):
setNames.append(filCon[ii*5+1:ii*5+6])
print("[CRC] of file header:", end="")
crcVal = crc32(filCon[0:setWidth[0]*5+1+4]) & 0xffffffff
crcErrors = 0
if ( crcVal == 0xffffffff):
print("\tOK\t["+hex(crcVal)+"]")
else:
print("\tERROR\t["+hex(crcVal)+"]")
crcErrors += 1
offset = setWidth[0]*5+5
# process data sets
setCon = np.zeros(statinfo.st_size // 4)
idx = 0
fmtStr = ""
setBytes = 0
for setName in setNames:
fmtStr += chr(setName[0])
setBytes += fmtChars[chr(setName[0])]
while(offset < len(filCon)):
setNumber = struct.unpack('B', filCon[offset:offset+1])
offset += 1
for ii in range(setNumber[0]):
setCon[idx:idx+setWidth[0]] = np.array(struct.unpack(fmtStr, filCon[offset:setBytes+offset]))
offset += setBytes
idx += setWidth[0]
crcVal = crc32(filCon[offset-setBytes*setNumber[0]-1:offset+4]) & 0xffffffff
print("[CRC] of data set:", end="")
if ( crcVal == 0xffffffff):
print("\tOK\t["+hex(crcVal)+"]")
else:
print("\tERROR\t["+hex(crcVal)+"]")
crcErrors += 1
offset += 4
if (not crcErrors):
print("[CRC] no errors occurred:\tOK")
else:
print("[CRC] {0} errors occurred:\tERROR".format(crcErrors))
# remove not required elements and reshape as matrix
setCon = np.reshape(setCon[0:idx], (setWidth[0], idx//setWidth[0]), 'f')
# create output dictionary
output = {}
for ii in range(setWidth[0]):
output[setNames[ii][1:].decode("utf-8").strip()] = setCon[ii]
return output
def createConfig():
import re
temp = 0
print("Which data should be logged?")
inStr = input(" * Acceleration ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 1
inStr = input(" * Gyroscope ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 2
inStr = input(" * Barometer ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 4
inStr = input(" * Magnetometer ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 8
inStr = input(" * Stabilizer ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 16
inStr = input(" * Control ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 32
inStr = input(" * Z-Range ([Y]es / [n]o): ")
if ((re.search('^[Yy]', inStr)) or (inStr == '')):
temp += 64
config = temp.to_bytes(1, byteorder='big', signed=False)
config += int(input("\nEnter the log frequency [1-1000]: ")).to_bytes(2, byteorder='big', signed=False)
config += int(input("Enter buffer size [0-255]: ")).to_bytes(1, byteorder='big', signed=False)
config += bytes(input("Filename [max. 10 letters]: ").encode('ascii'))
# write config to file
filObj = open("config", 'wb')
filObj.write(config)
filObj.close()
if __name__=='__main__':
createConfig() | gpl-3.0 | 579,685,083,883,614,600 | 31.232 | 107 | 0.520231 | false |
whitepages/nova | nova/tests/unit/api/openstack/compute/microversions.py | 39 | 5031 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Microversions Test Extension"""
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.tests.unit.api.openstack.compute import dummy_schema
ALIAS = 'test-microversions'
class MicroversionsController(wsgi.Controller):
@wsgi.Controller.api_version("2.1")
def index(self, req):
data = {'param': 'val'}
return data
@wsgi.Controller.api_version("2.2") # noqa
def index(self, req):
data = {'param': 'val2'}
return data
@wsgi.Controller.api_version("3.0") # noqa
def index(self, req):
raise webob.exc.HTTPBadRequest()
# We have a second example controller here to help check
# for accidental dependencies between API controllers
# due to base class changes
class MicroversionsController2(wsgi.Controller):
@wsgi.Controller.api_version("2.2", "2.5")
def index(self, req):
data = {'param': 'controller2_val1'}
return data
@wsgi.Controller.api_version("2.5", "3.1") # noqa
@wsgi.response(202)
def index(self, req):
data = {'param': 'controller2_val2'}
return data
class MicroversionsController3(wsgi.Controller):
@wsgi.Controller.api_version("2.1")
@validation.schema(dummy_schema.dummy)
def create(self, req, body):
data = {'param': 'create_val1'}
return data
@wsgi.Controller.api_version("2.1")
@validation.schema(dummy_schema.dummy, "2.3", "2.8")
@validation.schema(dummy_schema.dummy2, "2.9")
def update(self, req, id, body):
data = {'param': 'update_val1'}
return data
@wsgi.Controller.api_version("2.1", "2.2")
@wsgi.response(202)
@wsgi.action('foo')
def _foo(self, req, id, body):
data = {'foo': 'bar'}
return data
class MicroversionsController4(wsgi.Controller):
@wsgi.Controller.api_version("2.1")
def _create(self, req):
data = {'param': 'controller4_val1'}
return data
@wsgi.Controller.api_version("2.2") # noqa
def _create(self, req):
data = {'param': 'controller4_val2'}
return data
def create(self, req, body):
return self._create(req)
class MicroversionsExtendsBaseController(wsgi.Controller):
@wsgi.Controller.api_version("2.1")
def show(self, req, id):
return {'base_param': 'base_val'}
class MicroversionsExtendsController1(wsgi.Controller):
@wsgi.Controller.api_version("2.3")
@wsgi.extends
def show(self, req, resp_obj, id):
resp_obj.obj['extend_ctrlr1'] = 'val_1'
class MicroversionsExtendsController2(wsgi.Controller):
@wsgi.Controller.api_version("2.4")
@wsgi.extends
def show(self, req, resp_obj, id):
resp_obj.obj['extend_ctrlr2'] = 'val_2'
class MicroversionsExtendsController3(wsgi.Controller):
@wsgi.Controller.api_version("2.2", "2.3")
@wsgi.extends
def show(self, req, resp_obj, id):
resp_obj.obj['extend_ctrlr3'] = 'val_3'
class Microversions(extensions.V21APIExtensionBase):
"""Basic Microversions Extension."""
name = "Microversions"
alias = ALIAS
version = 1
def get_resources(self):
res1 = extensions.ResourceExtension('microversions',
MicroversionsController())
res2 = extensions.ResourceExtension('microversions2',
MicroversionsController2())
res3 = extensions.ResourceExtension('microversions3',
MicroversionsController3(),
member_actions={"action": "POST"})
res4 = extensions.ResourceExtension('microversions4',
MicroversionsController4())
res5 = extensions.ResourceExtension(
'microversions5', MicroversionsExtendsBaseController())
return [res1, res2, res3, res4, res5]
def get_controller_extensions(self):
extension1 = extensions.ControllerExtension(
self, 'microversions5', MicroversionsExtendsController1())
extension2 = extensions.ControllerExtension(
self, 'microversions5', MicroversionsExtendsController2())
extension3 = extensions.ControllerExtension(
self, 'microversions5', MicroversionsExtendsController3())
return [extension1, extension2, extension3]
| apache-2.0 | 7,368,281,486,471,552,000 | 31.458065 | 78 | 0.642417 | false |
gfarmerfr/nicotine-plus | plugins/spamfilter/__init__.py | 2 | 2071 | # -*- coding: utf-8 -*-
from pynicotine.pluginsystem import BasePlugin, returncode
def enable(frame):
global PLUGIN
PLUGIN = Plugin(frame)
def disable(frame):
global PLUGIN
PLUGIN = None
class Plugin(BasePlugin):
__name__ = "Spamfilter"
settings = {'minlength':200,
'maxlength':400,
'maxdiffcharacters':10,
'badprivatephrases':['buy viagra now','mybrute.com','mybrute.es','0daymusic.biz']
}
metasettings = {'minlength': {"description": 'The minimum length of a line before it\'s considered as ASCII spam', 'type':'integer'} ,
'maxdiffcharacters': {"description": 'The maximum number of different characters that is still considered ASCII spam', 'type':'integer'},
'maxlength': {"description": 'The maximum length of a line before it\'s considered as spam.', 'type':'integer'},
'badprivatephrases': {"description": 'Things people send you in private that is spam.', 'type':'list string'},
}
def LoadNotification(self):
self.log('A line should be at least %s long with a maximum of %s different characters before it\'s considered ASCII spam.' % (self.settings['minlength'], self.settings['maxdiffcharacters']))
def IncomingPublicChatEvent(self, room, user, line):
if len(line) >= self.settings['minlength'] and len(set(line)) < self.settings['maxdiffcharacters']:
self.log('Filtered ASCII spam from "%s" in room "%s"' % (user, room))
return returncode['zap']
if len(line) > self.settings['maxlength']:
self.log('Filtered really long line (%s characters) from "%s" in room "%s"' % (len(line), user, room))
return returncode['zap']
def IncomingPrivateChatEvent(self, user, line):
for phrase in self.settings['badprivatephrases']:
if line.lower().find(phrase) > -1:
self.log("Blocked spam from %s: %s" % (user, line))
return returncode['zap']
| gpl-3.0 | 3,998,236,504,790,852,000 | 52.102564 | 199 | 0.608885 | false |
Spiderlover/Toontown | toontown/uberdog/ScavengerHuntDataStore.py | 6 | 1751 | from direct.directnotify import DirectNotifyGlobal
from toontown.uberdog.DataStore import *
class ScavengerHuntDataStore(DataStore):
QueryTypes = DataStore.addQueryTypes(['GetGoals', 'AddGoal'])
notify = DirectNotifyGlobal.directNotify.newCategory('ScavengerHuntDataStore')
def __init__(self, filepath):
DataStore.__init__(self, filepath)
def handleQuery(self, query):
qId, qData = query
if qId == self.QueryTypes['GetGoals']:
avId, goal = qData
goals = self.__getGoalsForAvatarId(avId)
return (qId, (avId, goal, goals))
elif qId == self.QueryTypes['AddGoal']:
avId, goal = qData
self.__addGoalToAvatarId(avId, goal)
return (qId, (avId,))
return None
def __addGoalToAvatarId(self, avId, goal):
if self.wantAnyDbm:
pAvId = cPickle.dumps(avId)
pGoal = cPickle.dumps(goal)
pData = self.data.get(pAvId, None)
if pData is not None:
data = cPickle.loads(pData)
else:
data = set()
data.add(goal)
pData = cPickle.dumps(data)
self.data[pAvId] = pData
else:
self.data.setdefault(avId, set())
self.data[avId].add(goal)
self.incrementWriteCount()
return
def __getGoalsForAvatarId(self, avId):
if self.wantAnyDbm:
pAvId = cPickle.dumps(avId)
pData = self.data.get(pAvId, None)
if pData is not None:
data = list(cPickle.loads(pData))
else:
data = []
return data
else:
return list(self.data.get(avId, []))
return
| mit | 4,570,379,914,637,634,000 | 32.673077 | 82 | 0.556825 | false |
anntzer/scikit-learn | sklearn/multioutput.py | 7 | 30307 | """
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: Tim Head <[email protected]>
# Author: Hugo Bowne-Anderson <[email protected]>
# Author: Chris Rivera <[email protected]>
# Author: Michael Williamson
# Author: James Ashton Nichols <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from joblib import Parallel
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator, clone, MetaEstimatorMixin
from .base import RegressorMixin, ClassifierMixin, is_classifier
from .model_selection import cross_val_predict
from .utils import check_array, check_X_y, check_random_state
from .utils.metaestimators import if_delegate_has_method
from .utils.validation import (check_is_fitted, has_fit_parameter,
_check_fit_params, _deprecate_positional_args)
from .utils.multiclass import check_classification_targets
from .utils.fixes import delayed
__all__ = ["MultiOutputRegressor", "MultiOutputClassifier",
"ClassifierChain", "RegressorChain"]
def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight, **fit_params)
else:
estimator.fit(X, y, **fit_params)
return estimator
def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None,
first_time=True):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes,
sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
class _MultiOutputEstimator(MetaEstimatorMixin,
BaseEstimator,
metaclass=ABCMeta):
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
self.estimator = estimator
self.n_jobs = n_jobs
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
classes : list of ndarray of shape (n_outputs,)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
X, y = check_X_y(X, y,
force_all_finite=False,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
first_time = not hasattr(self, 'estimators_')
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X, y[:, i],
classes[i] if classes is not None else None,
sample_weight, first_time) for i in range(y.shape[1]))
return self
def fit(self, X, y, sample_weight=None, **fit_params):
""" Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement"
" a fit method")
X, y = self._validate_data(X, y,
force_all_finite=False,
multi_output=True, accept_sparse=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
fit_params_validated = _check_fit_params(X, fit_params)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight,
**fit_params_validated)
for i in range(y.shape[1]))
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
Returns
-------
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self)
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement"
" a predict method")
X = check_array(X, force_all_finite=False, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(e.predict)(X)
for e in self.estimators_)
return np.asarray(y).T
def _more_tags(self):
return {'multioutput_only': True}
class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
.. versionadded:: 0.18
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit` and :term:`predict`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
:meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported
by the passed estimator) will be parallelized for each target.
When individual estimators are fast to train or predict,
using ``n_jobs > 1`` can result in slower performance due
to the parallelism overhead.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all available processes / threads.
See :term:`Glossary <n_jobs>` for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
Attributes
----------
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_linnerud
>>> from sklearn.multioutput import MultiOutputRegressor
>>> from sklearn.linear_model import Ridge
>>> X, y = load_linnerud(return_X_y=True)
>>> clf = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y)
>>> clf.predict(X[[0]])
array([[176..., 35..., 57...]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
super().partial_fit(
X, y, sample_weight=sample_weight)
class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit`, :term:`score` and
:term:`predict_proba`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
:meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported
by the passed estimator) will be parallelized for each target.
When individual estimators are fast to train or predict,
using ``n_jobs > 1`` can result in slower performance due
to the parallelism overhead.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all available processes / threads.
See :term:`Glossary <n_jobs>` for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels.
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> from sklearn.neighbors import KNeighborsClassifier
>>> X, y = make_multilabel_classification(n_classes=3, random_state=0)
>>> clf = MultiOutputClassifier(KNeighborsClassifier()).fit(X, y)
>>> clf.predict(X[-2:])
array([[1, 1, 0], [1, 1, 1]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
def fit(self, X, Y, sample_weight=None, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying classifier supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
super().fit(X, Y, sample_weight, **fit_params)
self.classes_ = [estimator.classes_ for estimator in self.estimators_]
return self
@property
def predict_proba(self):
"""Probability estimates.
Returns prediction probabilities for each class of each output.
This method will raise a ``ValueError`` if any of the
estimators do not have ``predict_proba``.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data
Returns
-------
p : array of shape (n_samples, n_classes), or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
.. versionchanged:: 0.19
This function now returns a list of arrays where the length of
the list is ``n_outputs``, and each array is (``n_samples``,
``n_classes``) for that particular output.
"""
check_is_fitted(self)
if not all([hasattr(estimator, "predict_proba")
for estimator in self.estimators_]):
raise AttributeError("The base estimator should "
"implement predict_proba method")
return self._predict_proba
def _predict_proba(self, X):
results = [estimator.predict_proba(X) for estimator in
self.estimators_]
return results
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples
y : array-like of shape (n_samples, n_outputs)
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self)
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi target classification but has only one")
if y.shape[1] != n_outputs_:
raise ValueError("The number of outputs of Y for fit {0} and"
" score {1} should be same".
format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
def _more_tags(self):
# FIXME
return {'_skip_test': True}
class _BaseChain(BaseEstimator, metaclass=ABCMeta):
@_deprecate_positional_args
def __init__(self, base_estimator, *, order=None, cv=None,
random_state=None):
self.base_estimator = base_estimator
self.order = order
self.cv = cv
self.random_state = random_state
@abstractmethod
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
X, Y = self._validate_data(X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
check_array(X, accept_sparse=True)
self.order_ = self.order
if isinstance(self.order_, tuple):
self.order_ = np.array(self.order_)
if self.order_ is None:
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if self.order_ == 'random':
self.order_ = random_state.permutation(Y.shape[1])
elif sorted(self.order_) != list(range(Y.shape[1])):
raise ValueError("invalid order")
self.estimators_ = [clone(self.base_estimator)
for _ in range(Y.shape[1])]
if self.cv is None:
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
for chain_idx, estimator in enumerate(self.estimators_):
y = Y[:, self.order_[chain_idx]]
estimator.fit(X_aug[:, :(X.shape[1] + chain_idx)], y,
**fit_params)
if self.cv is not None and chain_idx < len(self.estimators_) - 1:
col_idx = X.shape[1] + chain_idx
cv_result = cross_val_predict(
self.base_estimator, X_aug[:, :col_idx],
y=y, cv=self.cv)
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
return self
def predict(self, X):
"""Predict on the data matrix X using the ClassifierChain model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_pred : array-like of shape (n_samples, n_classes)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=True)
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
if chain_idx == 0:
X_aug = X
else:
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_pred = Y_pred_chain[:, inv_order]
return Y_pred
class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain):
"""A multi-label model that arranges binary classifiers into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <classifierchain>`.
.. versionadded:: 0.19
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
classes_ : list
A list of arrays of length ``len(estimators_)`` containing the
class labels for each estimator in the chain.
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.multioutput import ClassifierChain
>>> X, Y = make_multilabel_classification(
... n_samples=12, n_classes=3, random_state=0
... )
>>> X_train, X_test, Y_train, Y_test = train_test_split(
... X, Y, random_state=0
... )
>>> base_lr = LogisticRegression(solver='lbfgs', random_state=0)
>>> chain = ClassifierChain(base_lr, order='random', random_state=0)
>>> chain.fit(X_train, Y_train).predict(X_test)
array([[1., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
>>> chain.predict_proba(X_test)
array([[0.8387..., 0.9431..., 0.4576...],
[0.8878..., 0.3684..., 0.2640...],
[0.0321..., 0.9935..., 0.0625...]])
See Also
--------
RegressorChain : Equivalent for regression.
MultioutputClassifier : Classifies each output independently rather than
chaining.
References
----------
Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier
Chains for Multi-label Classification", 2009.
"""
def fit(self, X, Y):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
Returns
-------
self : object
"""
super().fit(X, Y)
self.classes_ = [estimator.classes_
for chain_idx, estimator
in enumerate(self.estimators_)]
return self
@if_delegate_has_method('base_estimator')
def predict_proba(self, X):
"""Predict probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Returns
-------
Y_prob : array-like of shape (n_samples, n_classes)
"""
X = check_array(X, accept_sparse=True)
Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1]
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_prob = Y_prob_chain[:, inv_order]
return Y_prob
@if_delegate_has_method('base_estimator')
def decision_function(self, X):
"""Evaluate the decision_function of the models in the chain.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
Y_decision : array-like of shape (n_samples, n_classes)
Returns the decision function of the sample for each model
in the chain.
"""
Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug)
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_decision = Y_decision_chain[:, inv_order]
return Y_decision
def _more_tags(self):
return {'_skip_test': True,
'multioutput_only': True}
class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain):
"""A multi-label model that arranges regressions into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <regressorchain>`.
.. versionadded:: 0.20
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
Examples
--------
>>> from sklearn.multioutput import RegressorChain
>>> from sklearn.linear_model import LogisticRegression
>>> logreg = LogisticRegression(solver='lbfgs',multi_class='multinomial')
>>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]]
>>> chain = RegressorChain(base_estimator=logreg, order=[0, 1]).fit(X, Y)
>>> chain.predict(X)
array([[0., 2.],
[1., 1.],
[2., 0.]])
See Also
--------
ClassifierChain : Equivalent for classification.
MultioutputRegressor : Learns each output independently rather than
chaining.
"""
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method at each step
of the regressor chain.
.. versionadded:: 0.23
Returns
-------
self : object
"""
super().fit(X, Y, **fit_params)
return self
def _more_tags(self):
return {'multioutput_only': True}
| bsd-3-clause | 5,233,138,792,602,842,000 | 34.823877 | 79 | 0.586201 | false |
UMD-DRASTIC/drastic | tests/test_metadata_validation.py | 1 | 2399 | import unittest
import StringIO
from drastic.metadata import (MetadataValidator,
get_resource_validator,
get_collection_validator)
from nose.tools import raises
TEST_RULES = [
{
"name": "required-field",
"required": True,
},
{
"name": "required-choice",
"required": True,
"choices": ["a", "b"] # Non-optional constrained choices
},
{
"name": "non-required-choice",
"required": False,
"choices": ["a", "b"] # Optional constrained choices
},
]
def rules_as_fileobj():
import json
return StringIO.StringIO(json.dumps({"resources":TEST_RULES, "collections":TEST_RULES}))
class MetadataValidationTest(unittest.TestCase):
_multiprocess_can_split_ = True
def test_validation_empty(self):
m = MetadataValidator([])
ok, errs = m.validate({})
assert ok == True
assert errs == []
def test_validation_empty_rules(self):
m = MetadataValidator([])
ok, errs = m.validate({"field": "value"})
assert ok == True
assert errs == []
def test_validation_empty_input(self):
m = get_resource_validator(rules_as_fileobj())
ok, errs = m.validate({})
assert ok == True
assert errs == []
def test_failing_requires(self):
m = get_resource_validator(rules_as_fileobj())
ok, errs = m.validate({"required-field": ""})
assert ok == False
assert len(errs) == 1
def test_failing_choice(self):
m = get_resource_validator(rules_as_fileobj())
ok, errs = m.validate({"required-choice": "z"})
assert ok == False
assert len(errs) == 1
def test_failing_choice_empty(self):
m = get_resource_validator(rules_as_fileobj())
ok, errs = m.validate({"required-choice": ""})
assert ok == False
assert len(errs) == 1
def test_failing_non_required_choice(self):
m = get_resource_validator(rules_as_fileobj())
ok, errs = m.validate({"non-required-choice": "z"})
assert ok == False
assert len(errs) == 1
def test_failing_non_required_choice_empty(self):
m = get_resource_validator(rules_as_fileobj())
ok, errs = m.validate({"non-required-choice": ""})
assert ok == True
assert len(errs) == 0
| agpl-3.0 | -4,341,217,662,852,642,000 | 28.256098 | 92 | 0.571071 | false |
stefanv/scipy3 | scipy/misc/helpmod.py | 2 | 7208 | import inspect
import types
import sys
import pydoc
__all__ = ['info','source']
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals to see if something is defined
def makenamedict():
import scipy
thedict = {'scipy':scipy.__dict__}
dictlist = ['scipy']
totraverse = [scipy.__dict__]
while 1:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x],types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,):
"""Get help information for a function, class, or module.
Example:
>>> from scipy import *
>>> info(polyval)
polyval(p, x)
Evaluate the polymnomial p at x.
Description:
If p is of length N, this function returns the value:
p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1]
"""
global _namedict, _dictlist
if hasattr(object,'_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, types.StringType):
if _namedict is None:
_namedict, _dictlist = makenamedict()
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print >> output, "\n *** Repeat reference found in %s *** " % namestr
else:
objlist.append(id(obj))
print >> output, " *** Found in %s ***" % namestr
info(obj)
print >> output, "-"*maxwidth
numfound += 1
except KeyError:
pass
if numfound == 0:
print >> output, "Help for %s not found." % object
else:
print >> output, "\n *** Total of %d references found. ***" % numfound
elif inspect.isfunction(object):
name = object.func_name
arguments = apply(inspect.formatargspec, inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif inspect.isclass(object):
name = object.__name__
if hasattr(object, '__init__'):
arguments = apply(inspect.formatargspec, inspect.getargspec(object.__init__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
print >> output, inspect.getdoc(object.__init__)
else:
print >> output, inspect.getdoc(object)
methods = pydoc.allmethods(object)
if methods != []:
print >> output, "\n\nMethods:\n"
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print >> output, " %s -- %s" % (meth, methstr)
elif type(object) is types.InstanceType: ## check for __call__ method
print >> output, "Instance of class: ", object.__class__.__name__
print >> output
if hasattr(object, '__call__'):
arguments = apply(inspect.formatargspec, inspect.getargspec(object.__call__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object,'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc = inspect.getdoc(object.__call__)
if doc is not None:
print >> output, inspect.getdoc(object.__call__)
print >> output, inspect.getdoc(object)
else:
print >> output, inspect.getdoc(object)
elif inspect.ismethod(object):
name = object.__name__
arguments = apply(inspect.formatargspec, inspect.getargspec(object.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif hasattr(object, '__doc__'):
print >> output, inspect.getdoc(object)
def source(object, output=sys.stdout):
"""Write source for this object to output.
"""
try:
print >> output, "In file: %s\n" % inspect.getsourcefile(object)
print >> output, inspect.getsource(object)
except:
print >> output, "Not available for this object."
| bsd-3-clause | 4,891,364,221,481,810,000 | 32.840376 | 97 | 0.525666 | false |
rhcarvalho/kombu | kombu/transport/mongodb.py | 3 | 9762 | """
kombu.transport.mongodb
=======================
MongoDB transport.
:copyright: (c) 2010 - 2013 by Flavio Percoco Premoli.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import pymongo
from pymongo import errors
from pymongo import MongoClient, uri_parser
from kombu.five import Empty
from kombu.syn import _detect_environment
from kombu.utils.encoding import bytes_to_str
from kombu.utils.json import loads, dumps
from . import virtual
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 27017
DEFAULT_MESSAGES_COLLECTION = 'messages'
DEFAULT_ROUTING_COLLECTION = 'messages.routing'
DEFAULT_BROADCAST_COLLECTION = 'messages.broadcast'
class BroadcastCursor(object):
"""Cursor for broadcast queues."""
def __init__(self, cursor):
self._cursor = cursor
self.purge(rewind=False)
def get_size(self):
return self._cursor.count() - self._offset
def close(self):
self._cursor.close()
def purge(self, rewind=True):
if rewind:
self._cursor.rewind()
# Fast forward the cursor past old events
self._offset = self._cursor.count()
self._cursor = self._cursor.skip(self._offset)
def __iter__(self):
return self
def __next__(self):
while True:
try:
msg = next(self._cursor)
except pymongo.errors.OperationFailure as exc:
# In some cases tailed cursor can become invalid
# and have to be reinitalized
if 'not valid at server' in exc.message:
self.purge()
continue
raise
else:
break
self._offset += 1
return msg
next = __next__
class Channel(virtual.Channel):
_client = None
supports_fanout = True
_fanout_queues = {}
def __init__(self, *vargs, **kwargs):
super(Channel, self).__init__(*vargs, **kwargs)
self._broadcast_cursors = {}
# Evaluate connection
self._create_client()
def _new_queue(self, queue, **kwargs):
pass
def _get(self, queue):
if queue in self._fanout_queues:
try:
msg = next(self.get_broadcast_cursor(queue))
except StopIteration:
msg = None
else:
msg = self.get_messages().find_and_modify(
query={'queue': queue},
sort=[('priority', pymongo.ASCENDING),
('_id', pymongo.ASCENDING)],
remove=True,
)
if msg is None:
raise Empty()
return loads(bytes_to_str(msg['payload']))
def _size(self, queue):
if queue in self._fanout_queues:
return self.get_broadcast_cursor(queue).get_size()
return self.get_messages().find({'queue': queue}).count()
def _put(self, queue, message, **kwargs):
self.get_messages().insert({
'payload': dumps(message),
'queue': queue,
'priority': self._get_message_priority(message, reverse=True),
})
def _purge(self, queue):
size = self._size(queue)
if queue in self._fanout_queues:
self.get_broadcaset_cursor(queue).purge()
else:
self.get_messages().remove({'queue': queue})
return size
def _parse_uri(self, scheme='mongodb://'):
# See mongodb uri documentation:
# http://docs.mongodb.org/manual/reference/connection-string/
client = self.connection.client
hostname = client.hostname
if not hostname.startswith(scheme):
hostname = scheme + hostname
if not hostname[len(scheme):]:
hostname += DEFAULT_HOST
if client.userid and '@' not in hostname:
head, tail = hostname.split('://')
credentials = client.userid
if client.password:
credentials += ':' + client.password
hostname = head + '://' + credentials + '@' + tail
port = client.port if client.port is not None else DEFAULT_PORT
parsed = uri_parser.parse_uri(hostname, port)
dbname = parsed['database'] or client.virtual_host
if dbname in ('/', None):
dbname = 'kombu_default'
options = {
'auto_start_request': True,
'ssl': client.ssl,
'connectTimeoutMS': (int(client.connect_timeout * 1000)
if client.connect_timeout else None),
}
options.update(client.transport_options)
options.update(parsed['options'])
return hostname, dbname, options
def _open(self, scheme='mongodb://'):
hostname, dbname, options = self._parse_uri(scheme=scheme)
mongoconn = MongoClient(
host=hostname, ssl=options['ssl'],
auto_start_request=options['auto_start_request'],
connectTimeoutMS=options['connectTimeoutMS'],
use_greenlets=_detect_environment() != 'default',
)
database = mongoconn[dbname]
version = mongoconn.server_info()['version']
if tuple(map(int, version.split('.')[:2])) < (1, 3):
raise NotImplementedError(
'Kombu requires MongoDB version 1.3+ (server is {0})'.format(
version))
self._create_broadcast(database, options)
self._client = database
def _create_broadcast(self, database, options):
'''Create capped collection for broadcast messages.'''
if DEFAULT_BROADCAST_COLLECTION in database.collection_names():
return
capsize = options.get('capped_queue_size') or 100000
database.create_collection(DEFAULT_BROADCAST_COLLECTION,
size=capsize, capped=True)
def _ensure_indexes(self):
'''Ensure indexes on collections.'''
self.get_messages().ensure_index(
[('queue', 1), ('priority', 1), ('_id', 1)], background=True,
)
self.get_broadcast().ensure_index([('queue', 1)])
self.get_routing().ensure_index([('queue', 1), ('exchange', 1)])
def get_table(self, exchange):
"""Get table of bindings for ``exchange``."""
# TODO Store a more complete exchange metatable in the
# routing collection
localRoutes = frozenset(self.state.exchanges[exchange]['table'])
brokerRoutes = self.get_messages().routing.find(
{'exchange': exchange}
)
return localRoutes | frozenset((r['routing_key'],
r['pattern'],
r['queue']) for r in brokerRoutes)
def _put_fanout(self, exchange, message, routing_key, **kwargs):
"""Deliver fanout message."""
self.get_broadcast().insert({'payload': dumps(message),
'queue': exchange})
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
self.create_broadcast_cursor(exchange, routing_key, pattern, queue)
self._fanout_queues[queue] = exchange
meta = {'exchange': exchange,
'queue': queue,
'routing_key': routing_key,
'pattern': pattern}
self.get_routing().update(meta, meta, upsert=True)
def queue_delete(self, queue, **kwargs):
self.get_routing().remove({'queue': queue})
super(Channel, self).queue_delete(queue, **kwargs)
if queue in self._fanout_queues:
try:
cursor = self._broadcast_cursors.pop(queue)
except KeyError:
pass
else:
cursor.close()
self._fanout_queues.pop(queue)
def _create_client(self):
self._open()
self._ensure_indexes()
@property
def client(self):
if self._client is None:
self._create_client()
return self._client
def get_messages(self):
return self.client[DEFAULT_MESSAGES_COLLECTION]
def get_routing(self):
return self.client[DEFAULT_ROUTING_COLLECTION]
def get_broadcast(self):
return self.client[DEFAULT_BROADCAST_COLLECTION]
def get_broadcast_cursor(self, queue):
try:
return self._broadcast_cursors[queue]
except KeyError:
# Cursor may be absent when Channel created more than once.
# _fanout_queues is a class-level mutable attribute so it's
# shared over all Channel instances.
return self.create_broadcast_cursor(
self._fanout_queues[queue], None, None, queue,
)
def create_broadcast_cursor(self, exchange, routing_key, pattern, queue):
cursor = self.get_broadcast().find(
query={'queue': exchange},
sort=[('$natural', 1)],
tailable=True,
)
ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor)
return ret
class Transport(virtual.Transport):
Channel = Channel
can_parse_url = True
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (
virtual.Transport.connection_errors + (errors.ConnectionFailure, )
)
channel_errors = (
virtual.Transport.channel_errors + (
errors.ConnectionFailure,
errors.OperationFailure)
)
driver_type = 'mongodb'
driver_name = 'pymongo'
implements = virtual.Transport.implements.extend(
exchange_types=frozenset(['direct', 'topic', 'fanout']),
)
def driver_version(self):
return pymongo.version
| bsd-3-clause | -7,842,946,632,299,698,000 | 29.22291 | 79 | 0.57017 | false |
nikitakurylev/TuxemonX | tuxemon/core/components/event/__init__.py | 4 | 5934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <[email protected]>
#
#
# core.components.event Game events module.
#
#
import logging
import traceback
import pygame
from collections import namedtuple
from core import prepare
from core.components import plugin
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("%s successfully imported" % __name__)
# Set up action and condition objects
condition_fields = [
"type",
"parameters",
"x",
"y",
"width",
"height",
"operator"]
action_fields = [
"type",
"parameters"]
Condition = namedtuple("condition", condition_fields)
Action = namedtuple("action", action_fields)
class EventEngine(object):
"""A class for the event engine. The event engine checks to see if a group of conditions have
been met and then executes a set of actions.
"""
def __init__(self):
# Load all the available conditions and actions as plugins.
condition_plugins = plugin.load_directory(prepare.BASEDIR + "core/components/event/conditions")
self.conditions = plugin.get_available_methods(condition_plugins)
action_plugins = plugin.load_directory(prepare.BASEDIR + "core/components/event/actions")
self.actions = plugin.get_available_methods(action_plugins)
self.name = "Event"
self.current_map = None
self.state = "running"
self.timer = 0.0
self.wait = 0.0
self.button = None
def check_conditions(self, game, dt):
"""Checks a list of conditions to see if any of them have been met.
:param game: The main game object that contains all the game's variables.
:param game.event_conditions: The multi-dimensional list of conditions to check for. See
:py:func:`core.components.map.Map.loadevents` to see the format of the list.
:param dt: Amount of time passed in seconds since last frame.
:type game: core.control.Control
:type game.event_conditions: List
:type dt: Float
:rtype: None
:returns: None
"""
if self.state == "running":
for e in game.events:
should_run = True
# If any conditions fail, the event should not be run
for cond in e['conds']:
# Conditions have so-called "operators". If a condition's operator == "is" then
# the condition should be processed as usual.
# However, if the condition != "is", the result should be inverted.
# The following line implements this.
# I am not satisfied with the clarity of this line, so if anyone can express this better,
# please change it.
if not self.state == "running":
return
check_condition = self.conditions[cond.type]['method']
should_run = (check_condition(game, cond) == (cond.operator == 'is'))
if not should_run:
break
if should_run:
self.execute_action(e['acts'], game)
elif self.state == "waiting":
if self.timer >= self.wait:
self.state = "running"
self.timer = 0.0
else:
self.timer += dt
logger.debug("Waiting %s seconds to resume event engine..." % str(self.wait - self.timer))
elif self.state == "waiting for input":
if not self.button:
self.state = "running"
return
def process_event(self, event):
# NOTE: getattr on pygame is a little dangerous. We should sanitize input.
if self.button and event.type == pygame.KEYUP and event.key == getattr(pygame, self.button):
self.state = "running"
self.button = None
return None
return event
def execute_action(self, action_list, game):
"""Executes a particular action in a list of actions.
:param action_list: A list of actions fetched from the database.
:param game: The main game object that contains all the game's variables.
:type action_list: List
:type game: core.control.Control
Here is an example of what an action list might look like:
>>> action_list
[<class 'core.components.map.action'>, <class 'core.components.map.action'>]
:rtype: None
:returns: None
"""
logger.debug("Executing Action")
# Loop through the list of actions and execute them
for action in action_list:
# Call the method listed and return the modified event data
try:
self.actions[action.type]["method"](game, action)
except Exception as message:
error = 'Error: Action method "%s" not implemented' % str(action.type)
logger.error(error)
logger.error(message)
traceback.print_exc()
| gpl-3.0 | 3,626,261,277,444,036,000 | 33.103448 | 109 | 0.610212 | false |
StackStorm/st2 | st2common/st2common/models/db/runner.py | 3 | 2967 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mongoengine as me
from st2common import log as logging
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
from st2common.constants.types import ResourceType
__all__ = [
"RunnerTypeDB",
]
LOG = logging.getLogger(__name__)
PACK_SEPARATOR = "."
class RunnerTypeDB(stormbase.StormBaseDB, stormbase.UIDFieldMixin):
"""
The representation of an RunnerType in the system. An RunnerType
has a one-to-one mapping to a particular ActionRunner implementation.
Attributes:
id: See StormBaseAPI
name: See StormBaseAPI
description: See StormBaseAPI
enabled: A flag indicating whether the runner for this type is enabled.
runner_module: The python module that implements the action runner for this type.
runner_parameters: The specification for parameters for the action runner.
"""
RESOURCE_TYPE = ResourceType.RUNNER_TYPE
UID_FIELDS = ["name"]
enabled = me.BooleanField(
required=True,
default=True,
help_text="A flag indicating whether the runner for this type is enabled.",
)
runner_package = me.StringField(
required=False,
help_text=(
"The python package that implements the action runner for this type. If"
"not provided it assumes package name equals module name."
),
)
runner_module = me.StringField(
required=True,
help_text="The python module that implements the action runner for this type.",
)
runner_parameters = me.DictField(
help_text="The specification for parameters for the action runner."
)
output_key = me.StringField(
help_text="Default key to expect results to be published to."
)
output_schema = me.DictField(help_text="The schema for runner output.")
query_module = me.StringField(
required=False,
help_text="The python module that implements the query module for this runner.",
)
meta = {"indexes": stormbase.UIDFieldMixin.get_indexes()}
def __init__(self, *args, **values):
super(RunnerTypeDB, self).__init__(*args, **values)
self.uid = self.get_uid()
# specialized access objects
runnertype_access = MongoDBAccess(RunnerTypeDB)
MODELS = [RunnerTypeDB]
| apache-2.0 | -7,336,859,662,917,291,000 | 32.337079 | 89 | 0.699023 | false |
digineo/gluon-autoflasher | lib/gluon/models.py | 1 | 1389 | # coding: utf-8
import os
import re
import requests
from . import *
models = loadYAML("lib/gluon/models.yml")
imageDir = os.path.join(baseDir, "images")
# Returns the local path to the image
def get(model):
fwConfig = config["firmware"]
filename = "%s-%s.bin" % (fwConfig["prefix"], model)
url = "%s%s" % (fwConfig["url"], filename)
path = os.path.join(imageDir, filename)
if not os.path.exists(path):
write("Downloading %s ..." % url)
tmpfile = "%s.tmp" % path
# Do the request
response = requests.get(url, stream=True)
if response.status_code != requests.codes.ok:
response.raise_for_status()
# Save the response
with open(tmpfile, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024*256):
f.write(chunk)
write(".")
# Rename tempfile to target
os.rename(tmpfile, path)
print " ✓"
return path
# Extracts the model from the HTML code
def extract(html):
match = re.compile("(WD?R\d+[A-Z]*|Archer C\d) v\d+").search(html)
if match == None:
#with open('unknown-model.html', 'w') as f:
# f.write(html)
raise UnknownModel("Unable to extract model information")
model = match.group(0)
name = models.get(model, None)
if name != None:
return name
else:
raise UnsupportedModel("Unsupported model: %s\nPlease add it to lib/gluon/models.yml" % model)
| agpl-3.0 | -9,028,988,125,066,911,000 | 25.673077 | 98 | 0.63951 | false |
G33kDude/ev3dev-web-remote | keyboard.py | 1 | 3641 | import evdev
e = evdev.ecodes
js_map = {
0x08: e.KEY_BACKSPACE, # BACKSPACE
0x09: e.KEY_TAB, # TAB
0x0D: e.KEY_ENTER, # ENTER
0x10: e.KEY_LEFTSHIFT, # SHIFT
0x11: e.KEY_LEFTCTRL, # CTRL
0x12: e.KEY_LEFTALT, # ALT
0x13: e.KEY_PAUSE, # PAUSE
0x14: e.KEY_CAPSLOCK, # CAPS_LOCK
0x1B: e.KEY_ESC, # ESC
0x20: e.KEY_SPACE, # SPACE
0x21: e.KEY_PAGEUP, # PAGE_UP # also NUM_NORTH_EAST
0x22: e.KEY_DOWN, # PAGE_DOWN # also NUM_SOUTH_EAST
0x23: e.KEY_END, # END # also NUM_SOUTH_WEST
0x24: e.KEY_HOME, # HOME # also NUM_NORTH_WEST
0x25: e.KEY_LEFT, # LEFT # also NUM_WEST
0x26: e.KEY_UP, # UP # also NUM_NORTH
0x27: e.KEY_RIGHT, # RIGHT # also NUM_EAST
0x28: e.KEY_DOWN, # DOWN # also NUM_SOUTH
0x2D: e.KEY_INSERT, # INSERT # also NUM_INSERT
0x2E: e.KEY_DELETE, # DELETE # also NUM_DELETE
0x30: e.KEY_0, # ZERO
0x31: e.KEY_1, # ONE
0x32: e.KEY_2, # TWO
0x33: e.KEY_3, # THREE
0x34: e.KEY_4, # FOUR
0x35: e.KEY_5, # FIVE
0x36: e.KEY_6, # SIX
0x37: e.KEY_7, # SEVEN
0x38: e.KEY_8, # EIGHT
0x39: e.KEY_9, # NINE
0x41: e.KEY_A, # A
0x42: e.KEY_B, # B
0x43: e.KEY_C, # C
0x44: e.KEY_D, # D
0x45: e.KEY_E, # E
0x46: e.KEY_F, # F
0x47: e.KEY_G, # G
0x48: e.KEY_H, # H
0x49: e.KEY_I, # I
0x4A: e.KEY_J, # J
0x4B: e.KEY_K, # K
0x4C: e.KEY_L, # L
0x4D: e.KEY_M, # M
0x4E: e.KEY_N, # N
0x4F: e.KEY_O, # O
0x50: e.KEY_P, # P
0x51: e.KEY_Q, # Q
0x52: e.KEY_R, # R
0x53: e.KEY_S, # S
0x54: e.KEY_T, # T
0x55: e.KEY_U, # U
0x56: e.KEY_V, # V
0x57: e.KEY_W, # W
0x58: e.KEY_X, # X
0x59: e.KEY_Y, # Y
0x5A: e.KEY_Z, # Z
0x5B: e.KEY_LEFTMETA, # META # WIN_KEY_LEFT
0x5C: e.KEY_RIGHTMETA, # WIN_KEY_RIGHT
0x60: e.KEY_KP0, # NUM_ZERO
0x61: e.KEY_KP1, # NUM_ONE
0x62: e.KEY_KP2, # NUM_TWO
0x63: e.KEY_KP3, # NUM_THREE
0x64: e.KEY_KP4, # NUM_FOUR
0x65: e.KEY_KP5, # NUM_FIVE
0x66: e.KEY_KP6, # NUM_SIX
0x67: e.KEY_KP7, # NUM_SEVEN
0x68: e.KEY_KP8, # NUM_EIGHT
0x69: e.KEY_KP9, # NUM_NINE
0x6A: e.KEY_KPASTERISK, # NUM_MULTIPLY
0x6B: e.KEY_KPPLUS, # NUM_PLUS
0x6D: e.KEY_KPMINUS, # NUM_MINUS
0x6E: e.KEY_KPDOT, # NUM_PERIOD
0x6F: e.KEY_KPSLASH, # NUM_DIVISION
0x70: e.KEY_F1, # F1
0x71: e.KEY_F2, # F2
0x72: e.KEY_F3, # F3
0x73: e.KEY_F4, # F4
0x74: e.KEY_F5, # F5
0x75: e.KEY_F6, # F6
0x76: e.KEY_F7, # F7
0x77: e.KEY_F8, # F8
0x78: e.KEY_F9, # F9
0x79: e.KEY_F10, # F10
0x7A: e.KEY_F11, # F11
0x7B: e.KEY_F12, # F12
0x90: e.KEY_NUMLOCK, # NUMLOCK
0x91: e.KEY_SCROLLLOCK, # SCROLL_LOCK
0xBA: e.KEY_SEMICOLON, # SEMICOLON
0xBC: e.KEY_COMMA, # COMMA
0xBE: e.KEY_DOT, # PERIOD
0xBF: e.KEY_SLASH, # SLASH
0xC0: e.KEY_GRAVE, # APOSTROPHE
0xDE: e.KEY_APOSTROPHE, # SINGLE_QUOTE
0xDB: e.KEY_LEFTBRACE, # OPEN_SQUARE_BRACKET
0xDC: e.KEY_BACKSLASH, # BACKSLASH
0xDD: e.KEY_RIGHTBRACE, # CLOSE_SQUARE_BRACKET
}
class Keyboard:
def __init__(self):
self.uinput = evdev.UInput()
def close(self):
self.uinput.close()
def send_key(self, js_keycode, state):
self.uinput.write(evdev.ecodes.EV_KEY, js_map[js_keycode], 1 if state else 0)
self.uinput.syn()
| mit | 1,377,887,666,387,933,700 | 32.1 | 79 | 0.514968 | false |
iohannez/gnuradio | gr-qtgui/python/qtgui/qa_qtgui.py | 7 | 2791 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, qtgui
class test_qtgui(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
# Tests to make sure we can instantiate the sink.
# We use '5' in place of filter.firdes.WIN_BLACKMAN_hARRIS so we
# don't have to worry about importing filter just for this one
# constant.
def test01(self):
self.qtsnk = qtgui.sink_c(1024, 5,
0, 1, "Test",
True, True, True, True)
def test02(self):
self.qtsnk = qtgui.sink_f(1024, 5,
0, 1, "Test",
True, True, True, True)
def test03(self):
self.qtsnk = qtgui.time_sink_c(1024, 1, "Test", 1)
def test04(self):
self.qtsnk = qtgui.time_sink_f(1024, 1, "Test", 1)
def test05(self):
self.qtsnk = qtgui.freq_sink_c(1024, 5,
0, 1, "Test", 1)
def test06(self):
self.qtsnk = qtgui.freq_sink_f(1024, 5,
0, 1, "Test", 1)
def test07(self):
self.qtsnk = qtgui.waterfall_sink_c(1024, 5,
0, 1, "Test")
def test08(self):
self.qtsnk = qtgui.waterfall_sink_f(1024, 5,
0, 1, "Test")
def test09(self):
self.qtsnk = qtgui.const_sink_c(1024, "Test", 1)
def test10(self):
self.qtsnk = qtgui.time_raster_sink_b(1024, 100, 100.5,
[], [], "Test", 1)
def test11(self):
self.qtsnk = qtgui.time_raster_sink_f(1024, 100, 100.5,
[], [], "Test", 1)
def test12(self):
self.qtsnk = qtgui.histogram_sink_f(1024, 100, -1, 1, "Test", 1)
if __name__ == '__main__':
gr_unittest.run(test_qtgui, "test_qtgui.xml")
| gpl-3.0 | -654,642,042,566,068,500 | 31.835294 | 72 | 0.551774 | false |
tickbox-smc-ltd/xfero | src/xfero/db/create_XFERO_DB.py | 1 | 10418 | #!/usr/bin/env python
'''
'XFERO Database creation
'''
import sqlite3 as lite
import sys
from /xfero/ import get_conf as get_conf
def create_db():
'''
**Purpose:**
This script has been developed for System Administrators to create the
Database and associated tables for /Xfero/.
**Usage Notes:**
This script will DROP any pre-existing tables prior to creation. So, beware
when using.
This script will report "/Xfero/ Database created!" when the
database has been successfully created.
Any errors that occur during the creation of the database will be reported
to the command line.
*Example usage:*
```create_db()``
:param none: This script takes no parameters
:returns: This script does not return any values
**Unit Test Module:** test_create_db.py
*External dependencies*
/xfero/
get_conf (/xfero/.db.create_XFERO_DB)
+------------+-------------+-----------------------------------------------+
| Date | Author | Change Details |
+============+=============+===============================================+
| 02/07/2013 | Chris Falck | Created |
+------------+-------------+-----------------------------------------------+
| 08/01/2014 | Chris Falck | Added error trapping |
+------------+-------------+-----------------------------------------------+
| 04/05/2014 | Chris Falck | Added new column 'priority_worker_threads' to |
| | | create Priority table. |
| | | Added new column 'workflow_item_class' to hold|
| | | class details - OO conversion |
+------------+-------------+-----------------------------------------------+
| 12/05/2014 | Chris Falck | Added new column 'xfer_delsrc' to |
| | | create_XFERO_Xfer table. |
+------------+-------------+-----------------------------------------------+
| 16/10/2014 | Chris Falck | Modified the creation of Priority and Control |
| | | table |
+------------+-------------+-----------------------------------------------+
| 28/04/2015 | Chris Falck | Added support for eNDI and UTM to the Partner |
| | | table |
+------------+-------------+-----------------------------------------------+
'''
try:
(xfero_logger,.xfero_database, outbound_directory, transient_directory,
error_directory, xfero_pid) = get_conf.get.xfero_config()
except Exception as err:
print('Cannot get XFERO Config: %s', err)
sys.exit(err)
dbase = xfero_database
try:
con = lite.connect(dbase)
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS XFERO_Function")
cur.execute("DROP TABLE IF EXISTS XFERO_COTS_Pattern")
cur.execute("DROP TABLE IF EXISTS XFERO_AV_Pattern")
cur.execute("DROP TABLE IF EXISTS XFERO_Priority")
cur.execute("DROP TABLE IF EXISTS XFERO_Route")
cur.execute("DROP TABLE IF EXISTS XFERO_Xfer")
cur.execute("DROP TABLE IF EXISTS XFERO_Workflow_Item")
cur.execute("DROP TABLE IF EXISTS XFERO_Scheduled_Task")
cur.execute("DROP TABLE IF EXISTS XFERO_Control")
cur.execute("DROP TABLE IF EXISTS XFERO_Partner")
cur.execute("DROP INDEX IF EXISTS routeindex")
cur.execute("DROP INDEX IF EXISTS xferindex")
cur.execute("DROP INDEX IF EXISTS workflowindex")
cur.execute("DROP INDEX IF EXISTS xfercotspatternindex")
cur.execute(
"CREATE TABLE XFERO_Function \
(function_id INTEGER NOT NULL PRIMARY KEY, \
function_name TEXT NOT NULL, \
function_class TEXT NULL, \
function_description TEXT NULL, \
function_prototype TEXT NOT NULL);")
cur.execute(
"CREATE TABLE XFERO_Partner \
(partner_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
partner_service_name TEXT NOT NULL, \
partner_service_description TEXT NULL, \
partner_COTS_type TEXT NOT NULL, \
partner_remote_system_id TEXT NOT NULL, \
partner_code TEXT NULL, \
partner_mode TEXT NULL, \
partner_local_username TEXT NULL, \
partner_local_password TEXT NULL, \
partner_remote_user TEXT NULL, \
partner_remote_password TEXT NULL, \
partner_CA_certificate TEXT NULL, \
partner_cert_bundle TEXT NULL, \
partner_control_port INTEGER NULL, \
partner_IDF TEXT NULL, \
partner_parm TEXT NULL, \
partner_pgp_pub_key TEXT NULL, \
partner_lqm TEXT NULL, \
partner_dqm TEXT NULL, \
partner_oqm TEXT NULL, \
partner_cq TEXT NULL, \
partner_exit TEXT NULL, \
partner_exitdll TEXT NULL, \
partner_exitentry TEXT NULL, \
partner_exitdata TEXT NULL, \
partner_ofile TEXT NULL, \
partner_receiving_app TEXT NULL, \
partner_target_app TEXT NULL, \
partner_action TEXT NULL, \
partner_post_xfer_hook TEXT NULL, \
partner_post_xfer_comp_hook TEXT NULL, \
partner_retain_file TEXT NULL, \
partner_priority TEXT NULL);")
cur.execute(
"CREATE TABLE XFERO_COTS_Pattern \
(cotspattern_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
cotspattern_product TEXT NOT NULL, \
cotspattern_pattern_name TEXT NOT NULL, \
cotspattern_prototype TEXT NOT NULL);")
cur.execute(
"CREATE TABLE XFERO_AV_Pattern \
(avpattern_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
avpattern_product TEXT NOT NULL, \
avpattern_pattern_name TEXT NOT NULL, \
avpattern_params TEXT NULL);")
cur.execute(
"CREATE TABLE XFERO_Priority \
(priority_level INTEGER NOT NULL PRIMARY KEY, \
priority_detail TEXT NOT NULL);")
cur.execute(
"CREATE TABLE XFERO_Scheduled_Task \
(scheduled_task_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
scheduled_task_name TEXT NOT NULL, \
scheduled_task_function TEXT NOT NULL, \
scheduled_task_year TEXT NULL, \
scheduled_task_month TEXT NULL, \
scheduled_task_day TEXT NULL, \
scheduled_task_week TEXT NULL, \
scheduled_task_day_of_week TEXT NULL, \
scheduled_task_hour TEXT NULL, \
scheduled_task_minute TEXT NULL, \
scheduled_task_second TEXT NULL, \
scheduled_task_args TEXT NOT NULL, \
scheduled_task_active TEXT NOT NULL);")
cur.execute(
"CREATE TABLE XFERO_Control \
(control_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
control_status TEXT NOT NULL, \
control_pgp_priv_key TEXT NULL, \
control_pgp_passphrase TEXT NULL, \
control_num_threads INTEGER NOT NULL);")
cur.execute(
"CREATE TABLE XFERO_Route \
(route_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
route_monitoreddir TEXT NOT NULL, \
route_filenamepattern TEXT NOT NULL, \
route_active INTEGER NOT NULL, \
route_priority INTEGER NOT NULL REFERENCES \
XFERO_Priority(priority_level) ON \
DELETE RESTRICT ON UPDATE CASCADE);")
cur.execute("CREATE INDEX routeindex ON XFERO_Route(route_priority);")
cur.execute(
"CREATE TABLE XFERO_Xfer \
(xfer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
xfer_route INTEGER NOT NULL, \
xfer_cotspattern INTEGER NOT NULL, \
xfer_partner INTEGER NOT NULL, \
xfer_cmd TEXT NOT NULL, \
xfer_params TEXT NOT NULL, \
xfer_delsrc TEXT NOT NULL, \
FOREIGN KEY(xfer_route) REFERENCES XFERO_Route(route_id) ON \
DELETE CASCADE ON UPDATE CASCADE, \
FOREIGN KEY(xfer_cotspattern) REFERENCES \
XFERO_COTS_Pattern(cotspattern_id) ON DELETE CASCADE ON UPDATE \
CASCADE, \
FOREIGN KEY(xfer_partner) REFERENCES XFERO_Partner(partner_id) ON \
DELETE CASCADE ON UPDATE CASCADE);")
cur.execute("CREATE INDEX xferindex ON XFERO_Xfer(xfer_route);")
cur.execute(
"CREATE INDEX xfercotspatternindex ON \
XFERO_Xfer(xfer_cotspattern);")
cur.execute("CREATE INDEX xferpartner ON XFERO_Xfer(xfer_partner);")
cur.execute(
"CREATE TABLE XFERO_Workflow_Item \
(workflow_item_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, \
workflow_item_route INTEGER NOT NULL REFERENCES \
XFERO_Route(route_id) ON DELETE CASCADE ON UPDATE CASCADE, \
workflow_item_class TEXT NOT NULL, \
workflow_item_function_call TEXT NOT NULL, \
workflow_item_args TEXT NULL, \
workflow_item_running_order INTEGER NOT NULL);")
cur.execute(
"CREATE INDEX workflowindex ON \
XFERO_Workflow_Item(workflow_item_route);")
except lite.Error as err:
print("Error %s:" % err.args[0])
sys.exit(err)
# Commit
con.commit()
con.close()
print("/Xfero/ Database created!")
if __name__ == "__main__":
create_db()
| agpl-3.0 | -6,934,363,958,459,111,000 | 43.712446 | 83 | 0.508927 | false |
claudyus/pylibftdi | tests/test_bus.py | 1 | 1510 | """
pylibftdi - python wrapper for libftdi
Copyright (c) 2010-2014 Ben Bass <[email protected]>
See LICENSE file for details and (absence of) warranty
pylibftdi: http://bitbucket.org/codedstructure/pylibftdi
This module contains some basic tests for the higher-level
functionality without requiring an actual hardware device
to be attached.
"""
from tests.test_common import unittest
from pylibftdi.util import Bus
class TestBus(unittest.TestCase):
class MockDevice(object):
port = 0
class Bus1(object):
a = Bus(0, 2)
b = Bus(2, 1)
c = Bus(3, 5)
def __init__(self):
self.device = TestBus.MockDevice()
def test_bus_write(self):
test_bus = TestBus.Bus1()
# test writing to the bus
self.assertEqual(test_bus.device.port, 0)
test_bus.a = 3
test_bus.b = 1
test_bus.c = 31
self.assertEqual(test_bus.device.port, 255)
test_bus.b = 0
self.assertEqual(test_bus.device.port, 251)
test_bus.c = 16
self.assertEqual(test_bus.device.port, 131)
def test_bus_read(self):
test_bus = TestBus.Bus1()
# test reading from the bus
test_bus.device.port = 0x55
assert test_bus.a == 1
assert test_bus.b == 1
assert test_bus.c == 10
test_bus.device.port = 0xAA
assert test_bus.a == 2
assert test_bus.b == 0
assert test_bus.c == 21
if __name__ == "__main__":
unittest.main()
| mit | 3,878,225,642,109,555,700 | 25.034483 | 61 | 0.611258 | false |
ekampf/webapp2_requestparser | docs/conf.py | 2 | 8507 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# webapp2_restful documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import webapp2_restful
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WebApp2 RequestParser'
copyright = u'2015, Eran Kampf'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = webapp2_restful.__version__
# The full version, including alpha/beta/rc tags.
release = webapp2_restful.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'webapp2_restfuldoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'webapp2_restful.tex',
u'WebApp2 RequestParser Documentation',
u'Eran Kampf', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webapp2_restful',
u'WebApp2 RequestParser Documentation',
[u'Eran Kampf'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'webapp2_restful',
u'WebApp2 RequestParser Documentation',
u'Eran Kampf',
'webapp2_restful',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | -5,062,733,025,095,551,000 | 29.934545 | 76 | 0.706712 | false |
dennybritz/deepdive | examples/ocr/data/raw/gen_feature_table.py | 3 | 1046 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys # needed by most
import random # random
import yaml # yaml parsing
import pprint # pretty print
dirbase = 'boolean-f52-c3-m620/'
ids = [f.rstrip('.features.txt') for f in os.listdir(dirbase) if f.endswith('.features.txt')]
print 'Process files:', ids
wid = 1
fout = open('feature_table.csv', 'w')
for fid in ids:
lines = open(dirbase + fid+'.features.txt').readlines()
for l in lines:
vals = [b for b in l.strip().split('\t')]
# print vals
for sub in range(0, len(vals)):
print >>fout, str(wid) + ',' + str(sub)+','+ str(vals[sub])
wid += 1
totid = wid
wid = 1
fl1 = open('label1_table.csv', 'w')
fl2 = open('label2_table.csv', 'w')
for fid in ids:
labels = [int(s) for s in open(dirbase + fid+'.labels.txt').readlines()]
for l in labels:
l1 = False
l2 = False
if l == 1: l1 = True
if l == 2: l2 = True
print >>fl1, str(wid) + ',' + str(l1)
print >>fl2, str(wid) + ',' + str(l2)
wid += 1
fl1.close()
fl2.close()
| apache-2.0 | 5,247,162,795,309,445,000 | 23.904762 | 93 | 0.58413 | false |
dvor85/kmotion | core/actions/rtsp2mp4.py | 1 | 4430 | '''
@author: demon
'''
import os
import subprocess
import shlex
import time
import datetime
import signal
import sample
log = None
class rtsp2mp4(sample.sample):
def __init__(self, kmotion_dir, feed):
sample.sample.__init__(self, kmotion_dir, feed)
from core import logger
global log
log = logger.Logger('kmotion', logger.DEBUG)
self.key = 'rtsp2mp4'
try:
from core.mutex_parsers import mutex_kmotion_parser_rd, mutex_www_parser_rd
parser = mutex_kmotion_parser_rd(kmotion_dir)
self.ramdisk_dir = parser.get('dirs', 'ramdisk_dir')
self.images_dbase_dir = parser.get('dirs', 'images_dbase_dir')
except Exception:
log.exception('init error while parsing kmotion_rc file')
self.event_file = os.path.join(self.ramdisk_dir, 'events', str(self.feed))
try:
www_parser = mutex_www_parser_rd(self.kmotion_dir)
self.sound = www_parser.getboolean('motion_feed%02i' % self.feed, '%s_sound' % self.key)
self.recode = www_parser.getboolean('motion_feed%02i' % self.feed, '%s_recode' % self.key)
self.feed_kbs = www_parser.get('motion_feed%02i' % self.feed, 'feed_kbs')
self.feed_username = www_parser.get('motion_feed%02i' % self.feed, 'feed_lgn_name')
self.feed_password = www_parser.get('motion_feed%02i' % self.feed, 'feed_lgn_pw')
from core.utils import add_userinfo
self.feed_grab_url = add_userinfo(
www_parser.get('motion_feed%02i' % self.feed, '%s_grab_url' % self.key), self.feed_username, self.feed_password)
except Exception:
log.exception('init error')
def get_grabber_pids(self):
try:
p_obj = subprocess.Popen(
'pgrep -f "^avconv.+{src}.*"'.format(src=self.feed_grab_url), stdout=subprocess.PIPE, shell=True)
stdout = p_obj.communicate()[0]
return stdout.splitlines()
except Exception:
return []
def get_cmdline(self, pid):
cmdline_file = os.path.join('/proc', str(pid), 'cmdline')
if os.path.isfile(cmdline_file):
with open(cmdline_file, 'r') as f_obj:
cmdline = f_obj.read()
return cmdline.replace('\x00', ' ')
else:
return ''
def start_grab(self, src, dst):
if self.sound:
audio = "-c:a libfaac -ac 1 -ar 22050 -b:a 64k"
else:
audio = "-an"
if self.recode:
vcodec = "-c:v libx264 -preset ultrafast -profile:v baseline -b:v %sk -qp 30" % self.feed_kbs
else:
vcodec = '-c:v copy'
grab = 'avconv -threads auto -rtsp_transport tcp -n -i {src} {vcodec} {audio} {dst}'.format(
src=src, dst=dst, vcodec=vcodec, audio=audio)
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
log.debug('try start grabbing {src} to {dst}'.format(src=src, dst=dst))
ps = subprocess.Popen(shlex.split(grab), stderr=DEVNULL, stdout=DEVNULL, close_fds=True)
return ps.pid
def start(self):
sample.sample.start(self)
try:
dt = datetime.datetime.fromtimestamp(time.time())
event_date = dt.strftime("%Y%m%d")
event_time = dt.strftime("%H%M%S")
movie_dir = os.path.join(self.images_dbase_dir, event_date, '%0.2i' % self.feed, 'movie')
if len(self.get_grabber_pids()) == 0:
if not os.path.isdir(movie_dir):
os.makedirs(movie_dir)
dst = os.path.join(movie_dir, '%s.mp4' % event_time)
self.start_grab(self.feed_grab_url, dst)
if len(self.get_grabber_pids()) == 0 and os.path.isfile(dst):
os.unlink(dst)
except Exception:
log.exception('start error')
def end(self):
sample.sample.end(self)
for pid in self.get_grabber_pids():
try:
dst = shlex.split(self.get_cmdline(pid))[-1]
os.kill(int(pid), signal.SIGTERM)
if os.path.isfile(dst) and not os.path.getsize(dst) > 0:
os.unlink(dst)
time.sleep(1)
except Exception:
log.exception('end error')
| gpl-3.0 | 8,434,391,264,230,338,000 | 34.15873 | 128 | 0.559368 | false |
jcjohnson/densecap | preprocess.py | 4 | 16197 | # coding=utf8
import argparse, os, json, string
from collections import Counter
from Queue import Queue
from threading import Thread, Lock
from math import floor
import h5py
import numpy as np
from scipy.misc import imread, imresize
"""
This file expects a JSON file containing ground-truth regions and captions
in the same format as the region descriptions file from the Visual Genome
website. Concretely, this is a single large JSON file containing a list;
each element of the list describes a single image and has the following
format:
{
"id": [int], Unique identifier for this image,
"regions": [
{
"id": [int] Unique identifier for this region,
"image": [int] ID of the image to which this region belongs,
"height": [int] Height of the region in pixels,
"width": [int] Width of the region in pixels,
"phrase": [string] Caption for this region,
"x": [int] x-coordinate of the upper-left corner of the region,
"y": [int] y-coordinate of the upper-left corner of the region,
},
...
]
}
We assume that all images are on disk in a single folder, and that
the filename for each image is the same as its id with a .jpg extension.
This file will be preprocessed into an HDF5 file and a JSON file with
some auxiliary information. The captions will be tokenized with some
basic preprocessing (split by words, remove special characters).
Note, in general any indices anywhere in input/output of this file are 1-indexed.
The output JSON file is an object with the following elements:
- token_to_idx: Dictionary mapping strings to integers for encoding tokens,
in 1-indexed format.
- filename_to_idx: Dictionary mapping string filenames to indices.
- idx_to_token: Inverse of the above.
- idx_to_filename: Inverse of the above.
The output HDF5 file has the following format to describe N images with
M total regions:
- images: uint8 array of shape (N, 3, image_size, image_size) of pixel data,
in BDHW format. Images will be resized so their longest edge is image_size
pixels long, aligned to the upper left corner, and padded with zeros.
The actual size of each image is stored in the image_heights and image_widths
fields.
- image_heights: int32 array of shape (N,) giving the height of each image.
- image_widths: int32 array of shape (N,) giving the width of each image.
- original_heights: int32 array of shape (N,) giving the original height of
each image.
- original_widths: int32 array of shape (N,) giving the original width of
each image.
- boxes: int32 array of shape (M, 4) giving the coordinates of each bounding box.
Each row is (xc, yc, w, h) where yc and xc are center coordinates of the box,
and are one-indexed.
- lengths: int32 array of shape (M,) giving lengths of label sequence for each box
- captions: int32 array of shape (M, L) giving the captions for each region.
Captions in the input with more than L = --max_token_length tokens are
discarded. To recover a token from an integer in this matrix,
use idx_to_token from the JSON output file. Padded with zeros.
- img_to_first_box: int32 array of shape (N,). If img_to_first_box[i] = j then
captions[j] and boxes[j] give the first annotation for image i
(using one-indexing).
- img_to_last_box: int32 array of shape (N,). If img_to_last_box[i] = j then
captions[j] and boxes[j] give the last annotation for image i
(using one-indexing).
- box_to_img: int32 array of shape (M,). If box_to_img[i] = j then then
regions[i] and captions[i] refer to images[j] (using one-indexing).
"""
def build_vocab(data, min_token_instances, verbose=True):
""" Builds a set that contains the vocab. Filters infrequent tokens. """
token_counter = Counter()
for img in data:
for region in img['regions']:
if region['tokens'] is not None:
token_counter.update(region['tokens'])
vocab = set()
for token, count in token_counter.iteritems():
if count >= min_token_instances:
vocab.add(token)
if verbose:
print ('Keeping %d / %d tokens with enough instances'
% (len(vocab), len(token_counter)))
if len(vocab) < len(token_counter):
vocab.add('<UNK>')
if verbose:
print('adding special <UNK> token.')
else:
if verbose:
print('no <UNK> token needed.')
return vocab
def build_vocab_dict(vocab):
token_to_idx, idx_to_token = {}, {}
next_idx = 1
for token in vocab:
token_to_idx[token] = next_idx
idx_to_token[next_idx] = token
next_idx = next_idx + 1
return token_to_idx, idx_to_token
def encode_caption(tokens, token_to_idx, max_token_length):
encoded = np.zeros(max_token_length, dtype=np.int32)
for i, token in enumerate(tokens):
if token in token_to_idx:
encoded[i] = token_to_idx[token]
else:
encoded[i] = token_to_idx['<UNK>']
return encoded
def encode_captions(data, token_to_idx, max_token_length):
encoded_list = []
lengths = []
for img in data:
for region in img['regions']:
tokens = region['tokens']
if tokens is None: continue
tokens_encoded = encode_caption(tokens, token_to_idx, max_token_length)
encoded_list.append(tokens_encoded)
lengths.append(len(tokens))
return np.vstack(encoded_list), np.asarray(lengths, dtype=np.int32)
def encode_boxes(data, original_heights, original_widths, image_size):
all_boxes = []
xwasbad = 0
ywasbad = 0
wwasbad = 0
hwasbad = 0
for i, img in enumerate(data):
H, W = original_heights[i], original_widths[i]
scale = float(image_size) / max(H, W)
for region in img['regions']:
if region['tokens'] is None: continue
# recall: x,y are 1-indexed
x, y = round(scale*(region['x']-1)+1), round(scale*(region['y']-1)+1)
w, h = round(scale*region['width']), round(scale*region['height'])
# clamp to image
if x < 1: x = 1
if y < 1: y = 1
if x > image_size - 1:
x = image_size - 1
xwasbad += 1
if y > image_size - 1:
y = image_size - 1
ywasbad += 1
if x + w > image_size:
w = image_size - x
wwasbad += 1
if y + h > image_size:
h = image_size - y
hwasbad += 1
box = np.asarray([x+floor(w/2), y+floor(h/2), w, h], dtype=np.int32) # also convert to center-coord oriented
assert box[2]>=0 # width height should be positive numbers
assert box[3]>=0
all_boxes.append(box)
print 'number of bad x,y,w,h: ', xwasbad, ywasbad, wwasbad, hwasbad
return np.vstack(all_boxes)
def build_img_idx_to_box_idxs(data):
img_idx = 1
box_idx = 1
num_images = len(data)
img_to_first_box = np.zeros(num_images, dtype=np.int32)
img_to_last_box = np.zeros(num_images, dtype=np.int32)
for img in data:
img_to_first_box[img_idx - 1] = box_idx
for region in img['regions']:
if region['tokens'] is None: continue
box_idx += 1
img_to_last_box[img_idx - 1] = box_idx - 1 # -1 to make these inclusive limits
img_idx += 1
return img_to_first_box, img_to_last_box
def build_filename_dict(data):
# First make sure all filenames
filenames_list = ['%d.jpg' % img['id'] for img in data]
assert len(filenames_list) == len(set(filenames_list))
next_idx = 1
filename_to_idx, idx_to_filename = {}, {}
for img in data:
filename = '%d.jpg' % img['id']
filename_to_idx[filename] = next_idx
idx_to_filename[next_idx] = filename
next_idx += 1
return filename_to_idx, idx_to_filename
def encode_filenames(data, filename_to_idx):
filename_idxs = []
for img in data:
filename = '%d.jpg' % img['id']
idx = filename_to_idx[filename]
for region in img['regions']:
if region['tokens'] is None: continue
filename_idxs.append(idx)
return np.asarray(filename_idxs, dtype=np.int32)
def add_images(data, h5_file, args):
num_images = len(data)
shape = (num_images, 3, args.image_size, args.image_size)
image_dset = h5_file.create_dataset('images', shape, dtype=np.uint8)
original_heights = np.zeros(num_images, dtype=np.int32)
original_widths = np.zeros(num_images, dtype=np.int32)
image_heights = np.zeros(num_images, dtype=np.int32)
image_widths = np.zeros(num_images, dtype=np.int32)
lock = Lock()
q = Queue()
for i, img in enumerate(data):
filename = os.path.join(args.image_dir, '%s.jpg' % img['id'])
q.put((i, filename))
def worker():
while True:
i, filename = q.get()
img = imread(filename)
# handle grayscale
if img.ndim == 2:
img = img[:, :, None][:, :, [0, 0, 0]]
H0, W0 = img.shape[0], img.shape[1]
img = imresize(img, float(args.image_size) / max(H0, W0))
H, W = img.shape[0], img.shape[1]
# swap rgb to bgr. Is this the best way?
r = img[:,:,0].copy()
img[:,:,0] = img[:,:,2]
img[:,:,2] = r
lock.acquire()
if i % 1000 == 0:
print 'Writing image %d / %d' % (i, len(data))
original_heights[i] = H0
original_widths[i] = W0
image_heights[i] = H
image_widths[i] = W
image_dset[i, :, :H, :W] = img.transpose(2, 0, 1)
lock.release()
q.task_done()
print('adding images to hdf5.... (this might take a while)')
for i in xrange(args.num_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
h5_file.create_dataset('image_heights', data=image_heights)
h5_file.create_dataset('image_widths', data=image_widths)
h5_file.create_dataset('original_heights', data=original_heights)
h5_file.create_dataset('original_widths', data=original_widths)
def words_preprocess(phrase):
""" preprocess a sentence: lowercase, clean up weird chars, remove punctuation """
replacements = {
u'½': u'half',
u'—' : u'-',
u'™': u'',
u'¢': u'cent',
u'ç': u'c',
u'û': u'u',
u'é': u'e',
u'°': u' degree',
u'è': u'e',
u'…': u'',
}
for k, v in replacements.iteritems():
phrase = phrase.replace(k, v)
return str(phrase).lower().translate(None, string.punctuation).split()
def split_filter_captions(data, max_token_length, tokens_type, verbose=True):
"""
Modifies data in-place by adding a 'tokens' field to each region.
If the region's label is too long, 'tokens' will be None; otherwise
it will be a list of strings.
Splits by space when tokens_type = "words", or lists all chars when "chars"
"""
captions_kept = 0
captions_removed = 0
for i, img in enumerate(data):
if verbose and (i + 1) % 2000 == 0:
print 'Splitting tokens in image %d / %d' % (i + 1, len(data))
regions_per_image = 0
img_kept, img_removed = 0, 0
for region in img['regions']:
# create tokens array
if tokens_type == 'words':
tokens = words_preprocess(region['phrase'])
elif tokens_type == 'chars':
tokens = list(region['label'])
else:
assert False, 'tokens_type must be "words" or "chars"'
# filter by length
if max_token_length > 0 and len(tokens) <= max_token_length:
region['tokens'] = tokens
captions_kept += 1
img_kept += 1
regions_per_image = regions_per_image + 1
else:
region['tokens'] = None
captions_removed += 1
img_removed += 1
if regions_per_image == 0:
print 'kept %d, removed %d' % (img_kept, img_removed)
assert False, 'DANGER, some image has no valid regions. Not super sure this doesnt cause bugs. Think about more if it comes up'
if verbose:
print 'Keeping %d captions' % captions_kept
print 'Skipped %d captions for being too long' % captions_removed
def encode_splits(data, split_data):
""" Encode splits as intetgers and return the array. """
lookup = {'train': 0, 'val': 1, 'test': 2}
id_to_split = {}
split_array = np.zeros(len(data))
for split, idxs in split_data.iteritems():
for idx in idxs:
id_to_split[idx] = split
for i, img in enumerate(data):
split_array[i] = lookup[id_to_split[img['id']]]
return split_array
def filter_images(data, split_data):
""" Keep only images that are in some split and have some captions """
all_split_ids = set()
for split_name, ids in split_data.iteritems():
all_split_ids.update(ids)
new_data = []
for img in data:
keep = img['id'] in all_split_ids and len(img['regions']) > 0
if keep:
new_data.append(img)
return new_data
def main(args):
# read in the data
with open(args.region_data, 'r') as f:
data = json.load(f)
with open(args.split_json, 'r') as f:
split_data = json.load(f)
# Only keep images that are in a split
print 'There are %d images total' % len(data)
data = filter_images(data, split_data)
print 'After filtering for splits there are %d images' % len(data)
if args.max_images > 0:
data = data[:args.max_images]
# create the output hdf5 file handle
f = h5py.File(args.h5_output, 'w')
# add several fields to the file: images, and the original/resized widths/heights
add_images(data, f, args)
# add split information
split = encode_splits(data, split_data)
f.create_dataset('split', data=split)
# process "label" field in each region to a "tokens" field, and cap at some max length
split_filter_captions(data, args.max_token_length, args.tokens_type)
# build vocabulary
vocab = build_vocab(data, args.min_token_instances) # vocab is a set()
token_to_idx, idx_to_token = build_vocab_dict(vocab) # both mappings are dicts
# encode labels
captions_matrix, lengths_vector = encode_captions(data, token_to_idx, args.max_token_length)
f.create_dataset('labels', data=captions_matrix)
f.create_dataset('lengths', data=lengths_vector)
# encode boxes
original_heights = np.asarray(f['original_heights'])
original_widths = np.asarray(f['original_widths'])
boxes_matrix = encode_boxes(data, original_heights, original_widths, args.image_size)
f.create_dataset('boxes', data=boxes_matrix)
# integer mapping between image ids and box ids
img_to_first_box, img_to_last_box = build_img_idx_to_box_idxs(data)
f.create_dataset('img_to_first_box', data=img_to_first_box)
f.create_dataset('img_to_last_box', data=img_to_last_box)
filename_to_idx, idx_to_filename = build_filename_dict(data)
box_to_img = encode_filenames(data, filename_to_idx)
f.create_dataset('box_to_img', data=box_to_img)
f.close()
# and write the additional json file
json_struct = {
'token_to_idx': token_to_idx,
'idx_to_token': idx_to_token,
'filename_to_idx': filename_to_idx,
'idx_to_filename': idx_to_filename,
}
with open(args.json_output, 'w') as f:
json.dump(json_struct, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# INPUT settings
parser.add_argument('--region_data',
default='data/visual-genome/region_descriptions.json',
help='Input JSON file with regions and captions')
parser.add_argument('--image_dir',
default='data/visual-genome/images',
help='Directory containing all images')
parser.add_argument('--split_json',
default='info/densecap_splits.json',
help='JSON file of splits')
# OUTPUT settings
parser.add_argument('--json_output',
default='data/VG-regions-dicts.json',
help='Path to output JSON file')
parser.add_argument('--h5_output',
default='data/VG-regions.h5',
help='Path to output HDF5 file')
# OPTIONS
parser.add_argument('--image_size',
default=720, type=int,
help='Size of longest edge of preprocessed images')
parser.add_argument('--max_token_length',
default=15, type=int,
help="Set to 0 to disable filtering")
parser.add_argument('--min_token_instances',
default=15, type=int,
help="When token appears less than this times it will be mapped to <UNK>")
parser.add_argument('--tokens_type', default='words',
help="Words|chars for word or char split in captions")
parser.add_argument('--num_workers', default=5, type=int)
parser.add_argument('--max_images', default=-1, type=int,
help="Set to a positive number to limit the number of images we process")
args = parser.parse_args()
main(args)
| mit | -3,519,569,516,292,796,000 | 33.360934 | 133 | 0.653609 | false |
biocore/qiime | scripts/parallel_blast.py | 15 | 4965 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Jai Ram Rideout", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from glob import glob
from os import makedirs, system
from os.path import exists, split, splitext, isfile
from subprocess import check_call, CalledProcessError
from bfillings.formatdb import build_blast_db_from_fasta_path
from qiime.util import parse_command_line_parameters
from qiime.util import make_option
from qiime.util import load_qiime_config, get_options_lookup
from qiime.parallel.blast import ParallelBlaster
qiime_config = load_qiime_config()
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """Parallel BLAST"""
script_info['script_description'] = """This script for performing blast while\
making use of multicore/multiprocessor environments to perform analyses in\
parallel."""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Example""",
"""BLAST $PWD/inseqs.fasta (-i) against a blast database created from\
$PWD/refseqs.fasta (-r). Store the results in $PWD/blast_out/ (-o). ALWAYS\
SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will\
generally look something like /home/ubuntu/my_analysis/).""",
"""%prog -i $PWD/inseqs.fasta -r $PWD/refseqs.fasta -o $PWD/blast_out/\
-e 0.001"""))
script_info['output_description'] = """ """
script_info['required_options'] = [
make_option('-i', '--infile_path', action='store',
type='existing_filepath', dest='infile_path',
help='Path of sequences to use as queries [REQUIRED]'),
make_option('-o', '--output_dir', type='new_dirpath',
help='name of output directory for blast jobs [REQUIRED]')
]
script_info['optional_options'] = [
make_option('-c', '--disable_low_complexity_filter',
default=False, action='store_true',
help='disable filtering of low-complexity sequences '
'(i.e., -F F is passed to blast) [default: %default]'),
make_option('-e', '--e_value', action='store',
type='float', default=1e-30, dest='e_value',
help='E-value threshold for blasts [default: %default]'),
make_option('-n', '--num_hits', action='store',
type='int', default=1, dest='num_hits',
help='number of hits per query for blast results [default: %default]'),
make_option('-w', '--word_size', action='store',
type='int', default=30, dest='word_size',
help='word size for blast searches [default: %default]'),
make_option('-a', '--blastmat_dir', action='store',
type='string', help='full path to directory containing ' +
'blastmat file [default: %default]',
default=qiime_config['blastmat_dir']),
make_option(
'-r', '--refseqs_path', action='store', type='existing_filepath',
help='Path to fasta sequences to search against. Required if ' +
'-b is not provided.'),
make_option('-b', '--blast_db', type='blast_db',
help='Name of pre-formatted BLAST database. Required if ' +
'-r is not provided.'),
options_lookup['jobs_to_start'],
options_lookup['retain_temp_files'],
options_lookup['suppress_submit_jobs'],
options_lookup['poll_directly'],
options_lookup['cluster_jobs_fp'],
options_lookup['suppress_polling'],
options_lookup['job_prefix'],
options_lookup['seconds_to_sleep']
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if not (opts.refseqs_path or opts.blast_db):
option_parser.error('Either a blast db (via -b) or a collection of '
'reference sequences (via -r) must be passed')
if opts.refseqs_path and opts.blast_db:
option_parser.error('You should provide only a blast db (via -b) '
'or a collection of reference sequences (via -r), but not both')
# create dict of command-line options
params = eval(str(opts))
parallel_runner = ParallelBlaster(
cluster_jobs_fp=opts.cluster_jobs_fp,
jobs_to_start=opts.jobs_to_start,
retain_temp_files=opts.retain_temp_files,
suppress_polling=opts.suppress_polling,
seconds_to_sleep=opts.seconds_to_sleep)
parallel_runner(opts.infile_path,
opts.output_dir,
params,
job_prefix=opts.job_prefix,
poll_directly=opts.poll_directly,
suppress_submit_jobs=opts.suppress_submit_jobs)
if __name__ == "__main__":
main()
| gpl-2.0 | -5,827,415,480,407,671,000 | 40.722689 | 92 | 0.628197 | false |
JohnGriffiths/dipy | doc/examples/probabilistic_fiber_tracking.py | 9 | 4979 | """
=====================================================
An introduction to the Probabilistic Direction Getter
=====================================================
Probabilistic fiber tracking is a way of reconstructing white matter
connections using diffusion MR imaging. Like deterministic fiber tracking, the
probabilistic approach follows the trajectory of a possible pathway step by
step starting at a seed, however, unlike deterministic tracking, the tracking
direction at each point along the path is chosen at random from a distribution.
The distribution at each point is different and depends on the observed
diffusion data at that point. The distribution of tracking directions at each
point can be represented as a probability mass function (PMF) if the possible
tracking directions are restricted to discrete numbers of well distributed
points on a sphere.
This example is an extension of the "introduction to basic tracking" example.
We'll begin by repeating a few steps from that example, loading the data and
fitting a constrained spherical deconvolution (CSD) model.
"""
from dipy.data import read_stanford_labels
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
from dipy.tracking import utils
from dipy.tracking.local import (ThresholdTissueClassifier, LocalTracking)
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.get_affine()
seed_mask = labels == 2
white_matter = (labels == 1) | (labels == 2)
seeds = utils.seeds_from_mask(seed_mask, density=1, affine=affine)
csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6)
csd_fit = csd_model.fit(data, mask=white_matter)
"""
We use the GFA of the CSA model to build a tissue classifier.
"""
from dipy.reconst.shm import CsaOdfModel
csa_model = CsaOdfModel(gtab, sh_order=6)
gfa = csa_model.fit(data, mask=white_matter).gfa
classifier = ThresholdTissueClassifier(gfa, .25)
"""
The fiber orientation distribution (FOD) of the CSD model estimates the
distribution of small fiber bundles within each voxel. We can use this
distribution for probabilistic fiber tracking. One way to do this is to
represent the FOD using a discrete sphere. This discrete FOD can be used by the
Probabilistic Direction Getter as a PMF for sampling tracking directions. We
need to clip the FOD to use it as a PMF because the latter cannot have negative
values. (Ideally the FOD should be strictly positive, but because of noise
and/or model failures sometimes it can have negative values).
"""
from dipy.direction import ProbabilisticDirectionGetter
from dipy.data import small_sphere
from dipy.io.trackvis import save_trk
fod = csd_fit.odf(small_sphere)
pmf = fod.clip(min=0)
prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30.,
sphere=small_sphere)
streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5)
save_trk("probabilistic_small_sphere.trk", streamlines, affine, labels.shape)
"""
One disadvantage of using a discrete PMF to represent possible tracking
directions is that it tends to take up a lot of memory (RAM). The size of the
PMF, the FOD in this case, must be equal to the number of possible tracking
directions on the hemisphere, and every voxel has a unique PMF. In this case
the data is ``(81, 106, 76)`` and ``small_sphere`` has 181 directions so the
FOD is ``(81, 106, 76, 181)``. One way to avoid sampling the PMF and holding it
in memory is to build the direction getter directly from the spherical harmonic
representation of the FOD. By using this approach, we can also use a larger
sphere, like ``default_sphere`` which has 362 directions on the hemisphere,
without having to worry about memory limitations.
"""
from dipy.data import default_sphere
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5)
save_trk("probabilistic_shm_coeff.trk", streamlines, affine, labels.shape)
"""
Not all model fits have the ``shm_coeff`` attribute because not all models use
this basis to represent the data internally. However we can fit the ODF of any
model to the spherical harmonic basis using the ``peaks_from_model`` function.
"""
from dipy.direction import peaks_from_model
peaks = peaks_from_model(csd_model, data, default_sphere, .5, 25,
mask=white_matter, return_sh=True, parallel=True)
fod_coeff = peaks.shm_coeff
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(fod_coeff, max_angle=30.,
sphere=default_sphere)
streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5)
save_trk("probabilistic_peaks_from_model.trk", streamlines, affine,
labels.shape)
| bsd-3-clause | -2,648,044,132,355,064,300 | 44.678899 | 79 | 0.730468 | false |
bgris/ODL_bgris | lib/python3.5/site-packages/numpy/polynomial/polynomial.py | 22 | 52694 | """
Objects for dealing with polynomials.
This module provides a number of objects (mostly functions) useful for
dealing with polynomials, including a `Polynomial` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with polynomial objects is in
the docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `polydomain` -- Polynomial default domain, [-1,1].
- `polyzero` -- (Coefficients of the) "zero polynomial."
- `polyone` -- (Coefficients of the) constant polynomial 1.
- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.
Arithmetic
----------
- `polyadd` -- add two polynomials.
- `polysub` -- subtract one polynomial from another.
- `polymul` -- multiply two polynomials.
- `polydiv` -- divide one polynomial by another.
- `polypow` -- raise a polynomial to an positive integer power
- `polyval` -- evaluate a polynomial at given points.
- `polyval2d` -- evaluate a 2D polynomial at given points.
- `polyval3d` -- evaluate a 3D polynomial at given points.
- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product.
- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product.
Calculus
--------
- `polyder` -- differentiate a polynomial.
- `polyint` -- integrate a polynomial.
Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
- `polyvalfromroots` -- evalute a polynomial at given points from roots.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
- `polycompanion` -- companion matrix in power series form.
- `polyfit` -- least-squares fit returning a polynomial.
- `polytrim` -- trim leading coefficients from a polynomial.
- `polyline` -- polynomial representing given straight line.
Classes
-------
- `Polynomial` -- polynomial class.
See Also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander',
'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
polytrim = pu.trimcoef
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Polynomial default domain.
polydomain = np.array([-1, 1])
# Polynomial coefficients representing zero.
polyzero = np.array([0])
# Polynomial coefficients representing one.
polyone = np.array([1])
# Polynomial coefficients representing the identity x.
polyx = np.array([0, 1])
#
# Polynomial series functions
#
def polyline(off, scl):
"""
Returns an array representing a linear polynomial.
Parameters
----------
off, scl : scalars
The "y-intercept" and "slope" of the line, respectively.
Returns
-------
y : ndarray
This module's representation of the linear polynomial ``off +
scl*x``.
See Also
--------
chebline
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> P.polyline(1,-1)
array([ 1, -1])
>>> P.polyval(1, P.polyline(1,-1)) # should be 0
0.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def polyfromroots(roots):
"""
Generate a monic polynomial with given roots.
Return the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
where the `r_n` are the roots specified in `roots`. If a zero has
multiplicity n, then it must appear in `roots` n times. For instance,
if 2 is a root of multiplicity three and 3 is a root of multiplicity 2,
then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear
in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * x + ... + x^n
The coefficient of the last term is 1 for monic polynomials in this
form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of the polynomial's coefficients If all the roots are
real, then `out` is also real, otherwise it is complex. (see
Examples below).
See Also
--------
chebfromroots, legfromroots, lagfromroots, hermfromroots
hermefromroots
Notes
-----
The coefficients are determined by multiplying together linear factors
of the form `(x - r_i)`, i.e.
.. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n)
where ``n == len(roots) - 1``; note that this implies that `1` is always
returned for :math:`a_n`.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x
array([ 0., -1., 0., 1.])
>>> j = complex(0,1)
>>> P.polyfromroots((-j,j)) # complex returned, though values are real
array([ 1.+0.j, 0.+0.j, 1.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [polyline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [polymul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = polymul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polysub(c1, c2):
"""
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2,c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polymulx(c):
"""Multiply a polynomial by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1:] = c
return prd
def polymul(c1, c2):
"""
Multiply one polynomial by another.
Returns the product of two polynomials `c1` * `c2`. The arguments are
sequences of coefficients, from lowest order term to highest, e.g.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``
Parameters
----------
c1, c2 : array_like
1-D arrays of coefficients representing a polynomial, relative to the
"standard" basis, and ordered from lowest order term to highest.
Returns
-------
out : ndarray
Of the coefficients of their product.
See Also
--------
polyadd, polysub, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polymul(c1,c2)
array([ 3., 8., 14., 8., 3.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
ret = np.convolve(c1, c2)
return pu.trimseq(ret)
def polydiv(c1, c2):
"""
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymul, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
(array([ 3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1:
return c1/c2[-1], c1[:1]*0
elif len1 < len2:
return c1[:1]*0, c1
else:
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0:
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(c, pow, maxpower=None):
"""Raise a polynomial to a power.
Returns the polynomial `c` raised to the power `pow`. The argument
`c` is a sequence of coefficients ordered from low to high. i.e.,
[1,2,3] is the series ``1 + 2*x + 3*x**2.``
Parameters
----------
c : array_like
1-D array of array of series coefficients ordered from low to
high degree.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Power series of power.
See Also
--------
polyadd, polysub, polymul, polydiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = np.convolve(prd, c)
return prd
def polyder(c, m=1, scl=1, axis=0):
"""
Differentiate a polynomial.
Returns the polynomial coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The
argument `c` is an array of coefficients from low to high degree along
each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``
while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is
``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change
of variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Polynomial coefficients of the derivative.
See Also
--------
polyint
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
>>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
array([ 24.])
>>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
array([ 6., 24.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
cdt = c.dtype
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=cdt)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a polynomial.
Returns the polynomial coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients, from low to high degree along each axis, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
1-D array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Coefficient array of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
k = list(k) + [0]*(cnt - len(k))
c = np.rollaxis(c, iaxis)
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyval(x, c, tensor=True):
"""
Evaluate a polynomial at points x.
If `c` is of length `n + 1`, this function returns the value
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyval2d, polygrid2d, polyval3d, polygrid3d
Notes
-----
The evaluation uses Horner's method.
Examples
--------
>>> from numpy.polynomial.polynomial import polyval
>>> polyval(1, [1,2,3])
6.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyval(a, [1,2,3])
array([[ 1., 6.],
[ 17., 34.]])
>>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients
>>> coef
array([[0, 1],
[2, 3]])
>>> polyval([1,2], coef, tensor=True)
array([[ 2., 4.],
[ 4., 7.]])
>>> polyval([1,2], coef, tensor=False)
array([ 2., 7.])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
c0 = c[-1] + x*0
for i in range(2, len(c) + 1):
c0 = c[-i] + c0*x
return c0
def polyvalfromroots(x, r, tensor=True):
"""
Evaluate a polynomial specified by its roots at points x.
If `r` is of length `N`, this function returns the value
.. math:: p(x) = \\prod_{n=1}^{N} (x - r_n)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `r`.
If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r`
is multidimensional, then the shape of the result depends on the value of
`tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape;
that is, each polynomial is evaluated at every value of `x`. If `tensor` is
``False``, the shape will be r.shape[1:]; that is, each polynomial is
evaluated only for the corresponding broadcast value of `x`. Note that
scalars have shape (,).
.. versionadded:: 1.12
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `r`.
r : array_like
Array of roots. If `r` is multidimensional the first index is the
root index, while the remaining indices enumerate multiple
polynomials. For instance, in the two dimensional case the roots
of each polynomial may be thought of as stored in the columns of `r`.
tensor : boolean, optional
If True, the shape of the roots array is extended with ones on the
right, one for each dimension of `x`. Scalars have dimension 0 for this
action. The result is that every column of coefficients in `r` is
evaluated for every element of `x`. If False, `x` is broadcast over the
columns of `r` for the evaluation. This keyword is useful when `r` is
multidimensional. The default value is True.
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyroots, polyfromroots, polyval
Examples
--------
>>> from numpy.polynomial.polynomial import polyvalfromroots
>>> polyvalfromroots(1, [1,2,3])
0.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyvalfromroots(a, [-1, 0, 1])
array([[ -0., 0.],
[ 6., 24.]])
>>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients
>>> r # each column of r defines one polynomial
array([[-2, -1],
[ 0, 1]])
>>> b = [-2, 1]
>>> polyvalfromroots(b, r, tensor=True)
array([[-0., 3.],
[ 3., 0.]])
>>> polyvalfromroots(b, r, tensor=False)
array([-0., 0.])
"""
r = np.array(r, ndmin=1, copy=0)
if r.dtype.char in '?bBhHiIlLqQpP':
r = r.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray):
if tensor:
r = r.reshape(r.shape + (1,)*x.ndim)
elif x.ndim >= r.ndim:
raise ValueError("x.ndim must be < r.ndim when tensor == False")
return np.prod(x - r, axis=0)
def polyval2d(x, y, c):
"""
Evaluate a 2-D polynomial at points (x, y).
This function returns the value
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in `c[i,j]`. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
polyval, polygrid2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
return c
def polygrid2d(x, y, c):
"""
Evaluate a 2-D polynomial on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
return c
def polyval3d(x, y, z, c):
"""
Evaluate a 3-D polynomial at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
polyval, polyval2d, polygrid2d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
c = polyval(z, c, tensor=False)
return c
def polygrid3d(x, y, z, c):
"""
Evaluate a 3-D polynomial on the Cartesian product of x, y and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polygrid2d, polyval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
c = polyval(z, c)
return c
def polyvander(x, deg):
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points
`x`. The Vandermonde matrix is defined by
.. math:: V[..., i] = x^i,
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the power of `x`.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
``polyval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of polynomials of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray.
The Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where the last index is the power of `x`.
The dtype will be the same as the converted `x`.
See Also
--------
polyvander2d, polyvander3d
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x
return np.rollaxis(v, 0, v.ndim)
def polyvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = x^i * y^j,
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the powers of
`x` and `y`.
If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
v = vx[..., None]*vy[..., None,:]
# einsum bug
#v = np.einsum("...i,...j->...ij", vx, vy)
return v.reshape(v.shape[:-2] + (-1,))
def polyvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k,
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the powers of `x`, `y`, and `z`.
If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
vz = polyvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
# einsum bug
#v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz)
return v.reshape(v.shape[:-3] + (-1,))
def polyfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least-squares fit of a polynomial to data.
Return the coefficients of a polynomial of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
where `n` is `deg`.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-D array that contains
one data set per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-D,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, hermfit, hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the polynomial `p` that minimizes
the sum of the weighted squared errors
.. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) over-determined matrix equation:
.. math :: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev or Legendre series are
generally better conditioned, but much can still depend on the
distribution of the sample points and the smoothness of the data. If
the quality of the fit is inadequate, splines may be a good
alternative.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = polyvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = polyvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim == 1:
if c.ndim == 2:
cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax + 1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def polycompanion(c):
"""
Return the companion matrix of c.
The companion matrix for power series cannot be made symmetric by
scaling the basis, so this function differs from those for the
orthogonal polynomials.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
bot = mat.reshape(-1)[n::n+1]
bot[...] = 1
mat[:, -1] -= c[:-1]/c[-1]
return mat
def polyroots(c):
"""
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * x^i.
Parameters
----------
c : 1-D array_like
1-D array of polynomial coefficients.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the power series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
Examples
--------
>>> import numpy.polynomial.polynomial as poly
>>> poly.polyroots(poly.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = polycompanion(c)
r = la.eigvals(m)
r.sort()
return r
#
# polynomial class
#
class Polynomial(ABCPolyBase):
"""A power series class.
The Polynomial class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Polynomial coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(polyadd)
_sub = staticmethod(polysub)
_mul = staticmethod(polymul)
_div = staticmethod(polydiv)
_pow = staticmethod(polypow)
_val = staticmethod(polyval)
_int = staticmethod(polyint)
_der = staticmethod(polyder)
_fit = staticmethod(polyfit)
_line = staticmethod(polyline)
_roots = staticmethod(polyroots)
_fromroots = staticmethod(polyfromroots)
# Virtual properties
nickname = 'poly'
domain = np.array(polydomain)
window = np.array(polydomain)
| gpl-3.0 | 8,955,684,427,356,807,000 | 31.032827 | 79 | 0.592819 | false |
qpython-android/QPypi-numpy | numpy/f2py/cfuncs.py | 3 | 40766 | #!/usr/bin/env python
"""
C declarations, CPP macros, and C functions for f2py2e.
Only required declarations/macros/functions will be used.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
__version__ = "$Revision: 1.75 $"[10:-1]
import __version__
f2py_version = __version__.version
import types
import sys
import copy
errmess=sys.stderr.write
##################### Definitions ##################
outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[],
'userincludes':[],
'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[],
'commonhooks':[]}
needs={}
includes0={'includes0':'/*need_includes0*/'}
includes={'includes':'/*need_includes*/'}
userincludes={'userincludes':'/*need_userincludes*/'}
typedefs={'typedefs':'/*need_typedefs*/'}
typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'}
cppmacros={'cppmacros':'/*need_cppmacros*/'}
cfuncs={'cfuncs':'/*need_cfuncs*/'}
callbacks={'callbacks':'/*need_callbacks*/'}
f90modhooks={'f90modhooks':'/*need_f90modhooks*/',
'initf90modhooksstatic':'/*initf90modhooksstatic*/',
'initf90modhooksdynamic':'/*initf90modhooksdynamic*/',
}
commonhooks={'commonhooks':'/*need_commonhooks*/',
'initcommonhooks':'/*need_initcommonhooks*/',
}
############ Includes ###################
includes0['math.h']='#include <math.h>'
includes0['string.h']='#include <string.h>'
includes0['setjmp.h']='#include <setjmp.h>'
includes['Python.h']='#include "Python.h"'
needs['arrayobject.h']=['Python.h']
includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
#include "arrayobject.h"'''
includes['arrayobject.h']='#include "fortranobject.h"'
############# Type definitions ###############
typedefs['unsigned_char']='typedef unsigned char unsigned_char;'
typedefs['unsigned_short']='typedef unsigned short unsigned_short;'
typedefs['unsigned_long']='typedef unsigned long unsigned_long;'
typedefs['signed_char']='typedef signed char signed_char;'
typedefs['long_long']="""\
#ifdef _WIN32
typedef __int64 long_long;
#else
typedef long long long_long;
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['insinged_long_long']="""\
#ifdef _WIN32
typedef __uint64 long_long;
#else
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['long_double']="""\
#ifndef _LONG_DOUBLE
typedef long double long_double;
#endif
"""
typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;'
typedefs['complex_float']='typedef struct {float r,i;} complex_float;'
typedefs['complex_double']='typedef struct {double r,i;} complex_double;'
typedefs['string']="""typedef char * string;"""
############### CPP macros ####################
cppmacros['CFUNCSMESS']="""\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
\tfprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
#endif
"""
cppmacros['F_FUNC']="""\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F
#else
#define F_FUNC(f,F) _##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F##_
#else
#define F_FUNC(f,F) _##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F
#else
#define F_FUNC(f,F) f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F##_
#else
#define F_FUNC(f,F) f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
#else
#define F_FUNC_US(f,F) F_FUNC(f,F)
#endif
"""
cppmacros['F_WRAPPEDFUNC']="""\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
#else
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
#endif
"""
cppmacros['F_MODFUNC']="""\
#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
#else
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
#else
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) f ## .in. ## m
#else
#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
#endif
#endif
/*
#if defined(UPPERCASE_FORTRAN)
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
#else
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
#endif
*/
#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
"""
cppmacros['SWAPUNSAFE']="""\
#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(a) = ((size_t)(a) ^ (size_t)(b))
"""
cppmacros['SWAP']="""\
#define SWAP(a,b,t) {\\
\tt *c;\\
\tc = a;\\
\ta = b;\\
\tb = c;}
"""
#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR']="""\
#define PRINTPYOBJERR(obj)\\
\tfprintf(stderr,\"#modulename#.error is related to \");\\
\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
\tfprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX']="""\
#define max(a,b) ((a > b) ? (a) : (b))
#define min(a,b) ((a < b) ? (a) : (b))
#ifndef MAX
#define MAX(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a,b) ((a < b) ? (a) : (b))
#endif
"""
cppmacros['len..']="""\
#define rank(var) var ## _Rank
#define shape(var,dim) var ## _Dims[dim]
#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd)
#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim])
#define fshape(var,dim) shape(var,rank(var)-dim-1)
#define len(var) shape(var,0)
#define flen(var) fshape(var,0)
#define size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp))
/* #define index(i) capi_i ## i */
#define slen(var) capi_ ## var ## _len
"""
cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))'
cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))'
needs['pyobj_from_int1']=['signed_char']
cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))'
cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))'
needs['pyobj_from_long_long1']=['long_long']
cppmacros['pyobj_from_long_long1']="""\
#ifdef HAVE_LONG_LONG
#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
#else
#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
#endif
"""
needs['pyobj_from_long_double1']=['long_double']
cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
needs['pyobj_from_complex_long_double1']=['complex_long_double']
cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_double1']=['complex_double']
cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_float1']=['complex_float']
cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1']=['string']
cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))'
needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE']="""\
/* New SciPy */
#define TRYPYARRAYTEMPLATECHAR case PyArray_STRING: *(char *)(arr->data)=*v; break;
#define TRYPYARRAYTEMPLATELONG case PyArray_LONG: *(long *)(arr->data)=*v; break;
#define TRYPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\
switch (arr->descr->type_num) {\\
case PyArray_DOUBLE: *(double *)(arr->data)=*v; break;\\
case PyArray_INT: *(int *)(arr->data)=*v; break;\\
case PyArray_LONG: *(long *)(arr->data)=*v; break;\\
case PyArray_FLOAT: *(float *)(arr->data)=*v; break;\\
case PyArray_CDOUBLE: *(double *)(arr->data)=*v; break;\\
case PyArray_CFLOAT: *(float *)(arr->data)=*v; break;\\
case PyArray_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=*v; break;\\
case PyArray_SHORT: *(short *)(arr->data)=*v; break;\\
case PyArray_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\
case PyArray_UINT: *(npy_uint *)(arr->data)=*v; break;\\
case PyArray_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\
case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\
case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\
case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\
default: return -2;\\
};\\
return 1
"""
needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (arr->descr->type==typecode) {\\
*(ctype *)(arr->data)=(*v).r;\\
*(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\
return 1;\\
}\\
switch (arr->descr->type_num) {\\
case PyArray_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\
case PyArray_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\
case PyArray_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\
case PyArray_LONG: *(long *)(arr->data)=(*v).r; break;\\
case PyArray_FLOAT: *(float *)(arr->data)=(*v).r; break;\\
case PyArray_INT: *(int *)(arr->data)=(*v).r; break;\\
case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
case PyArray_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0)); break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
case PyArray_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\
case PyArray_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\
case PyArray_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\
case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\
case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\
case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\
case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\
case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\
default: return -2;\\
};\\
return -1;
"""
## cppmacros['NUMFROMARROBJ']="""\
## #define NUMFROMARROBJ(typenum,ctype) \\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
## \t}
## """
## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
## cppmacros['CNUMFROMARROBJ']="""\
## #define CNUMFROMARROBJ(typenum,ctype) \\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
## \t}
## """
needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE']="""\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
\t\tif (rv_cb_str == NULL)\\
\t\t\tgoto capi_fail;\\
\t\tif (PyString_Check(rv_cb_str)) {\\
\t\t\tstr[len-1]='\\0';\\
\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
\t\t} else {\\
\t\t\tPRINTPYOBJERR(rv_cb_str);\\
\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\
\t\t\tgoto capi_fail;\\
\t\t}\\
\t}
"""
cppmacros['GETSCALARFROMPYTUPLE']="""\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
\t\t\tgoto capi_fail;\\
\t}
"""
cppmacros['FAILNULL']="""\\
#define FAILNULL(p) do { \\
if ((p) == NULL) { \\
PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\
goto capi_fail; \\
} \\
} while (0)
"""
needs['MEMCOPY']=['string.h', 'FAILNULL']
cppmacros['MEMCOPY']="""\
#define MEMCOPY(to,from,n)\\
do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0)
"""
cppmacros['STRINGMALLOC']="""\
#define STRINGMALLOC(str,len)\\
\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
\t\tgoto capi_fail;\\
\t} else {\\
\t\t(str)[len] = '\\0';\\
\t}
"""
cppmacros['STRINGFREE']="""\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
"""
needs['STRINGCOPYN']=['string.h', 'FAILNULL']
cppmacros['STRINGCOPYN']="""\
#define STRINGCOPYN(to,from,buf_size) \\
do { \\
int _m = (buf_size); \\
char *_to = (to); \\
char *_from = (from); \\
FAILNULL(_to); FAILNULL(_from); \\
(void)strncpy(_to, _from, sizeof(char)*_m); \\
_to[_m-1] = '\\0'; \\
/* Padding with spaces instead of nulls */ \\
for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\
_to[_m] = ' '; \\
} \\
} while (0)
"""
needs['STRINGCOPY']=['string.h', 'FAILNULL']
cppmacros['STRINGCOPY']="""\
#define STRINGCOPY(to,from)\\
do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0)
"""
cppmacros['CHECKGENERIC']="""\
#define CHECKGENERIC(check,tcheck,name) \\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKARRAY']="""\
#define CHECKARRAY(check,tcheck,name) \\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKSTRING']="""\
#define CHECKSTRING(check,tcheck,name,show,var)\\
\tif (!(check)) {\\
\t\tchar errstring[256];\\
\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
\t\tPyErr_SetString(#modulename#_error, errstring);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKSCALAR']="""\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
\tif (!(check)) {\\
\t\tchar errstring[256];\\
\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
\t\tPyErr_SetString(#modulename#_error,errstring);\\
\t\t/*goto capi_fail;*/\\
\t} else """
## cppmacros['CHECKDIMS']="""\
## #define CHECKDIMS(dims,rank) \\
## \tfor (int i=0;i<(rank);i++)\\
## \t\tif (dims[i]<0) {\\
## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
## \t\t\tgoto capi_fail;\\
## \t\t}
## """
cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM']="""\
#ifdef OLDPYNUM
#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
#endif
"""
################# C functions ###############
cfuncs['calcarrindex']="""\
static int calcarrindex(int *i,PyArrayObject *arr) {
\tint k,ii = i[0];
\tfor (k=1; k < arr->nd; k++)
\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['calcarrindextr']="""\
static int calcarrindextr(int *i,PyArrayObject *arr) {
\tint k,ii = i[arr->nd-1];
\tfor (k=1; k < arr->nd; k++)
\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['forcomb']="""\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
static int initforcomb(npy_intp *dims,int nd,int tr) {
int k;
if (dims==NULL) return 0;
if (nd<0) return 0;
forcombcache.nd = nd;
forcombcache.d = dims;
forcombcache.tr = tr;
if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
for (k=1;k<nd;k++) {
forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
}
forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
return 1;
}
static int *nextforcomb(void) {
int j,*i,*i_tr,k;
int nd=forcombcache.nd;
if ((i=forcombcache.i) == NULL) return NULL;
if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
if (forcombcache.d == NULL) return NULL;
i[0]++;
if (i[0]==forcombcache.d[0]) {
j=1;
while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
if (j==nd) {
free(i);
free(i_tr);
return NULL;
}
for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
i[j]++;
i_tr[nd-j-1]++;
} else
i_tr[nd-1]++;
if (forcombcache.tr) return i_tr;
return i;
}"""
needs['try_pyarr_from_string']=['STRINGCOPYN','PRINTPYOBJERR','string']
cfuncs['try_pyarr_from_string']="""\
static int try_pyarr_from_string(PyObject *obj,const string str) {
\tPyArrayObject *arr = NULL;
\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
\t\t{ STRINGCOPYN(arr->data,str,PyArray_NBYTES(arr)); }
\treturn 1;
capi_fail:
\tPRINTPYOBJERR(obj);
\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
\treturn 0;
}
"""
needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN']
cfuncs['string_from_pyobj']="""\
static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
\tPyArrayObject *arr = NULL;
\tPyObject *tmp = NULL;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
#endif
\tif (obj == Py_None) {
\t\tif (*len == -1)
\t\t\t*len = strlen(inistr); /* Will this cause problems? */
\t\tSTRINGMALLOC(*str,*len);
\t\tSTRINGCOPYN(*str,inistr,*len+1);
\t\treturn 1;
\t}
\tif (PyArray_Check(obj)) {
\t\tif ((arr = (PyArrayObject *)obj) == NULL)
\t\t\tgoto capi_fail;
\t\tif (!ISCONTIGUOUS(arr)) {
\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
\t\t\tgoto capi_fail;
\t\t}
\t\tif (*len == -1)
\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr);
\t\tSTRINGMALLOC(*str,*len);
\t\tSTRINGCOPYN(*str,arr->data,*len+1);
\t\treturn 1;
\t}
\tif (PyString_Check(obj)) {
\t\ttmp = obj;
\t\tPy_INCREF(tmp);
\t}
\telse
\t\ttmp = PyObject_Str(obj);
\tif (tmp == NULL) goto capi_fail;
\tif (*len == -1)
\t\t*len = PyString_GET_SIZE(tmp);
\tSTRINGMALLOC(*str,*len);
\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
\tPy_DECREF(tmp);
\treturn 1;
capi_fail:
\tPy_XDECREF(tmp);
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['char_from_pyobj']=['int_from_pyobj']
cfuncs['char_from_pyobj']="""\
static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (char)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char']
cfuncs['signed_char_from_pyobj']="""\
static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (signed_char)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['short_from_pyobj']=['int_from_pyobj']
cfuncs['short_from_pyobj']="""\
static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (short)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
cfuncs['int_from_pyobj']="""\
static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyInt_Check(obj)) {
\t\t*v = (int)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Int(obj);
\tif (tmp) {
\t\t*v = PyInt_AS_LONG(tmp);
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
cfuncs['long_from_pyobj']="""\
static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyInt_Check(obj)) {
\t\t*v = PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Int(obj);
\tif (tmp) {
\t\t*v = PyInt_AS_LONG(tmp);
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['long_long_from_pyobj']=['long_long']
cfuncs['long_long_from_pyobj']="""\
static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyLong_Check(obj)) {
\t\t*v = PyLong_AsLongLong(obj);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PyInt_Check(obj)) {
\t\t*v = (long_long)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Long(obj);
\tif (tmp) {
\t\t*v = PyLong_AsLongLong(tmp);
\t\tPy_DECREF(tmp);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['long_double_from_pyobj']=['double_from_pyobj','long_double']
cfuncs['long_double_from_pyobj']="""\
static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
\tdouble d=0;
\tif (PyArray_CheckScalar(obj)){
\t\tif PyArray_IsScalar(obj, LongDouble) {
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t\treturn 1;
\t\t}
\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_LONGDOUBLE) {
\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj))
\t\t\treturn 1;
\t\t}
\t}
\tif (double_from_pyobj(&d,obj,errmess)) {
\t\t*v = (long_double)d;
\t\treturn 1;
\t}
\treturn 0;
}
"""
cfuncs['double_from_pyobj']="""\
static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyFloat_Check(obj)) {
#ifdef __sgi
\t\t*v = PyFloat_AsDouble(obj);
#else
\t\t*v = PyFloat_AS_DOUBLE(obj);
#endif
\t\treturn 1;
\t}
\ttmp = PyNumber_Float(obj);
\tif (tmp) {
#ifdef __sgi
\t\t*v = PyFloat_AsDouble(tmp);
#else
\t\t*v = PyFloat_AS_DOUBLE(tmp);
#endif
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['float_from_pyobj']=['double_from_pyobj']
cfuncs['float_from_pyobj']="""\
static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
\tdouble d=0.0;
\tif (double_from_pyobj(&d,obj,errmess)) {
\t\t*v = (float)d;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['complex_long_double_from_pyobj']=['complex_long_double','long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj']="""\
static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
\tcomplex_double cd={0.0,0.0};
\tif (PyArray_CheckScalar(obj)){
\t\tif PyArray_IsScalar(obj, CLongDouble) {
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t\treturn 1;
\t\t}
\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_CLONGDOUBLE) {
\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
\t\t\treturn 1;
\t\t}
\t}
\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
\t\t(*v).r = (long_double)cd.r;
\t\t(*v).i = (long_double)cd.i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['complex_double_from_pyobj']=['complex_double']
cfuncs['complex_double_from_pyobj']="""\
static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
\tPy_complex c;
\tif (PyComplex_Check(obj)) {
\t\tc=PyComplex_AsCComplex(obj);
\t\t(*v).r=c.real, (*v).i=c.imag;
\t\treturn 1;
\t}
\tif (PyArray_IsScalar(obj, ComplexFloating)) {
\t\tif (PyArray_IsScalar(obj, CFloat)) {
\t\t\tnpy_cfloat new;
\t\t\tPyArray_ScalarAsCtype(obj, &new);
\t\t\t(*v).r = (double)new.real;
\t\t\t(*v).i = (double)new.imag;
\t\t}
\t\telse if (PyArray_IsScalar(obj, CLongDouble)) {
\t\t\tnpy_clongdouble new;
\t\t\tPyArray_ScalarAsCtype(obj, &new);
\t\t\t(*v).r = (double)new.real;
\t\t\t(*v).i = (double)new.imag;
\t\t}
\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t}
\t\treturn 1;
\t}
\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
\t\tPyObject *arr;
\t\tif (PyArray_Check(obj)) {
\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, PyArray_CDOUBLE);
\t\t}
\t\telse {
\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(PyArray_CDOUBLE));
\t\t}
\t\tif (arr==NULL) return 0;
\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
\t\treturn 1;
\t}
\t/* Python does not provide PyNumber_Complex function :-( */
\t(*v).i=0.0;
\tif (PyFloat_Check(obj)) {
#ifdef __sgi
\t\t(*v).r = PyFloat_AsDouble(obj);
#else
\t\t(*v).r = PyFloat_AS_DOUBLE(obj);
#endif
\t\treturn 1;
\t}
\tif (PyInt_Check(obj)) {
\t\t(*v).r = (double)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\tif (PyLong_Check(obj)) {
\t\t(*v).r = PyLong_AsDouble(obj);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PySequence_Check(obj) && (!PyString_Check(obj))) {
\t\tPyObject *tmp = PySequence_GetItem(obj,0);
\t\tif (tmp) {
\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) {
\t\t\t\tPy_DECREF(tmp);
\t\t\t\treturn 1;
\t\t\t}
\t\t\tPy_DECREF(tmp);
\t\t}
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL)
\t\t\terr = PyExc_TypeError;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj']="""\
static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
\tcomplex_double cd={0.0,0.0};
\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
\t\t(*v).r = (float)cd.r;
\t\t(*v).i = (float)cd.i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char']
cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char']
cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long']
cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float']
cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double']
cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX']
cfuncs['create_cb_arglist']="""\
static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
\tPyObject *tmp = NULL;
\tPyObject *tmp_fun = NULL;
\tint tot,opt,ext,siz,i,di=0;
\tCFUNCSMESS(\"create_cb_arglist\\n\");
\ttot=opt=ext=siz=0;
\t/* Get the total number of arguments */
\tif (PyFunction_Check(fun))
\t\ttmp_fun = fun;
\telse {
\t\tdi = 1;
\t\tif (PyObject_HasAttrString(fun,\"im_func\")) {
\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\");
\t\t}
\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) {
\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\");
\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\"))
\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
\t\t\telse {
\t\t\t\ttmp_fun = fun; /* built-in function */
\t\t\t\ttot = maxnofargs;
\t\t\t\tif (xa != NULL)
\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
\t\t\t}
\t\t\tPy_XDECREF(tmp);
\t\t}
\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
\t\t\ttot = maxnofargs;
\t\t\tif (xa != NULL)
\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
\t\t\ttmp_fun = fun;
\t\t}
\t\telse if (PyCObject_Check(fun)) {
\t\t\ttot = maxnofargs;
\t\t\tif (xa != NULL)
\t\t\t\text = PyTuple_Size((PyObject *)xa);
\t\t\tif(ext>0) {
\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
\t\t\t\tgoto capi_fail;
\t\t\t}
\t\t\ttmp_fun = fun;
\t\t}
\t}
if (tmp_fun==NULL) {
fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":fun->ob_type->tp_name));
goto capi_fail;
}
\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
\t\tPy_XDECREF(tmp);
\t}
\t/* Get the number of optional arguments */
\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\"))
\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
\t\t\topt = PyTuple_Size(tmp);
\t\tPy_XDECREF(tmp);
\t/* Get the number of extra arguments */
\tif (xa != NULL)
\t\text = PyTuple_Size((PyObject *)xa);
\t/* Calculate the size of call-backs argument list */
\tsiz = MIN(maxnofargs+ext,tot);
\t*nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
#endif
\tif (siz<tot-opt) {
\t\tfprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
\t\tgoto capi_fail;
\t}
\t/* Initialize argument list */
\t*args = (PyTupleObject *)PyTuple_New(siz);
\tfor (i=0;i<*nofargs;i++) {
\t\tPy_INCREF(Py_None);
\t\tPyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
\t}
\tif (xa != NULL)
\t\tfor (i=(*nofargs);i<siz;i++) {
\t\t\ttmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
\t\t\tPy_INCREF(tmp);
\t\t\tPyTuple_SET_ITEM(*args,i,tmp);
\t\t}
\tCFUNCSMESS(\"create_cb_arglist-end\\n\");
\treturn 1;
capi_fail:
\tif ((PyErr_Occurred())==NULL)
\t\tPyErr_SetString(#modulename#_error,errmess);
\treturn 0;
}
"""
def buildcfuncs():
from capi_maps import c2capi_map
for k in c2capi_map.keys():
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m,c2capi_map[k])
k='string'
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v,dims) (PyArray_SimpleNewFromData(1,dims,PyArray_CHAR,(char *)v))'%(m)
############ Auxiliary functions for sorting needs ###################
def append_needs(need,flag=1):
global outneeds,needs
if type(need)==types.ListType:
for n in need:
append_needs(n,flag)
elif type(need)==types.StringType:
if not need: return
if need in includes0:
n = 'includes0'
elif need in includes:
n = 'includes'
elif need in typedefs:
n = 'typedefs'
elif need in typedefs_generated:
n = 'typedefs_generated'
elif need in cppmacros:
n = 'cppmacros'
elif need in cfuncs:
n = 'cfuncs'
elif need in callbacks:
n = 'callbacks'
elif need in f90modhooks:
n = 'f90modhooks'
elif need in commonhooks:
n = 'commonhooks'
else:
errmess('append_needs: unknown need %s\n'%(`need`))
return
if need in outneeds[n]: return
if flag:
tmp={}
if need in needs:
for nn in needs[need]:
t=append_needs(nn,0)
if type(t)==types.DictType:
for nnn in t.keys():
if nnn in tmp:
tmp[nnn]=tmp[nnn]+t[nnn]
else:
tmp[nnn]=t[nnn]
for nn in tmp.keys():
for nnn in tmp[nn]:
if nnn not in outneeds[nn]:
outneeds[nn]=[nnn]+outneeds[nn]
outneeds[n].append(need)
else:
tmp={}
if need in needs:
for nn in needs[need]:
t=append_needs(nn,flag)
if type(t)==types.DictType:
for nnn in t.keys():
if nnn in tmp:
tmp[nnn]=t[nnn]+tmp[nnn]
else:
tmp[nnn]=t[nnn]
if n not in tmp:
tmp[n]=[]
tmp[n].append(need)
return tmp
else:
errmess('append_needs: expected list or string but got :%s\n'%(`need`))
def get_needs():
global outneeds,needs
res={}
for n in outneeds.keys():
out=[]
saveout=copy.copy(outneeds[n])
while len(outneeds[n])>0:
if outneeds[n][0] not in needs:
out.append(outneeds[n][0])
del outneeds[n][0]
else:
flag=0
for k in outneeds[n][1:]:
if k in needs[outneeds[n][0]]:
flag=1
break
if flag:
outneeds[n]=outneeds[n][1:]+[outneeds[n][0]]
else:
out.append(outneeds[n][0])
del outneeds[n][0]
if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])):
print n,saveout
errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
out=out+saveout
break
saveout=copy.copy(outneeds[n])
if out==[]:
out=[n]
res[n]=out
return res
| bsd-3-clause | -3,851,656,783,475,992,600 | 34.082616 | 185 | 0.611294 | false |
dtiarks/ThesisPlot | Chap5/pi_phase_shift_estimate.py | 1 | 5855 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 11:25:42 2017
@author: daniel
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import json
import io
from scipy.optimize import newton
from scipy import integrate
from scipy.optimize import curve_fit
import re
c = 299792458 # m/s, speed of light CODATA 2014
a0 = 0.52917721067e-10 # m, Bohr radius
C6 = 2.3e23 * 4.36e-18 * a0**6 # Jm^6, Van-der-Waals coefficient for the 67s - 69s
hbar = 6.626070040e-34/(2 * np.pi) # Js, Planck constant, CODATA 2014
rho_peak = 1.8e12/1e-6 # peak density in cm^-3/centi^-3
d = 2.534e-29 # Cm, dipole matrix element (D. A. Steck)
Gamma_e = 2*np.pi * 6.065e6 # decay rate (D. A. Steck)
epsilon_0 = 8.854187817e-12 # dielectric constant, CODATA 2014
L = 61e-6 # medium length in m
omega_s = 2*np.pi * 384.23e12 # rad/s, transition frequency
gamma_21 = 2.2/(2*np.pi)
chi_0 = 2*rho_peak*d**2 / (epsilon_0*hbar*Gamma_e) # prefactor of the susceptibility for the cycling transition (|R> polarization)
#R_b=18e-6
def fitFunc(t, A, phi,C):
return A*np.sin(2*np.pi*20*t+phi)+C
def susceptibility(Delta_s, Delta_c, gamma_21, Omega_c, ladder=True):
delta = (Delta_s + (-1 + 2*int(ladder)) * Delta_c)
return 1j*(gamma_21 - 2j * delta)/(np.abs(Omega_c)**2 + (1 - 2j * Delta_s)*(gamma_21 - 2j * delta))
def susceptibility_off(Delta_s, Delta_c, gamma_21, Omega_c, ladder=True):
return -1/(2*Delta_s+1j)
def vdW_pot(r, r0):
return -C6 * (r-r0)**-6
def cond_phase(d_c,om_c,d_o):
im_chi_vdw = lambda x,y: omega_s/(c)*np.imag(susceptibility(y, d_c - vdW_pot(x, 1e-10)/(hbar*Gamma_e), gamma_21, om_c))
od_vdw = lambda x: integrate.quad(im_chi_vdw, -L/2, L/2,args=(x,))[0]
intersection = newton(lambda x: L*omega_s/(c)*np.imag(susceptibility(x, d_c, gamma_21, om_c) - susceptibility(x, d_c, gamma_21, 0))-d_o, -d_c)
# intersection = newton(lambda x: L*omega_s/(c)*np.imag(susceptibility(x, d_c, gamma_21, om_c)) - od_vdw(x)-d_o, -d_c)
chi_nb = susceptibility(intersection, d_c, gamma_21, om_c)
phi_0=omega_s/(2*c) * L * chi_0 * np.real(chi_nb)
r_chi_vdw = lambda x: np.real(susceptibility(intersection, d_c - vdW_pot(x, 1e-10)/(hbar*Gamma_e), gamma_21, om_c))
phi_1=omega_s/(2*c) * chi_0 *integrate.quad(r_chi_vdw, -L/2, L/2)[0]
d_phi=phi_1-phi_0
return intersection,d_phi
def cond_trans(d_c,om_c,d_o):
intersection = newton(lambda x: L*omega_s/(c)*np.imag(susceptibility(x, d_c, gamma_21, om_c) - susceptibility(x, d_c, gamma_21, 0))-d_o, -d_c)
im_chi_vdw = lambda x: np.imag(susceptibility(intersection, d_c - vdW_pot(x, 1e-10)/(hbar*Gamma_e), gamma_21, om_c))
t_1=omega_s/c * chi_0 *integrate.quad(im_chi_vdw, -L/2, L/2)[0]
return intersection,np.exp(-t_1)
# the parameters, all in units of Gamma_3
Delta_c = 2*np.pi*9.2*10**6/Gamma_e
Delta_s = -2*np.pi*10.*10**6/Gamma_e
ds_off = 2*np.pi*0.0*10**6/Gamma_e
Omega_c = 2*np.pi*10.4*10**6/Gamma_e
#Ga=Omega_c**2/(4*np.abs(Delta_s))
Ga=np.true_divide(Omega_c**2*np.abs(Delta_s),1+np.sqrt((4*np.abs(Delta_s)**2+1)**2-4*np.abs(Delta_s)**2*1))
R_b=1.0*(C6/(hbar*Gamma_e)/Ga)**(1./6.)
print("Blockade Radius: %.2f um"%(R_b*1e6))
od_new = lambda x: omega_s/c*chi_0*L*np.imag(susceptibility(x-ds_off, Delta_c , gamma_21, Omega_c))
ph_new = lambda x: 0.5*omega_s/c*chi_0*L*np.real(susceptibility(x-ds_off, Delta_c , gamma_21, Omega_c))-0.5*omega_s/c*chi_0*L/15.*np.real(susceptibility(x, Delta_c , gamma_21, Omega_c))
#od0_new = lambda x: 0.7*omega_s/(c)* L*chi_0*np.imag(susceptibility(x-ds_off, Delta_c , gamma_21, 0*Omega_c))
od0_new = lambda x: omega_s/c*chi_0*L*np.imag(susceptibility_off(x-0*ds_off, Delta_c , gamma_21, 0*Omega_c))
ph0_new = lambda x: 0.5*omega_s/c*chi_0*L*np.real(susceptibility_off(x-0*ds_off, Delta_c , gamma_21, 0*Omega_c))-0.5*omega_s/c*chi_0*L/15.*np.real(susceptibility_off(x, Delta_c , gamma_21, 0*Omega_c))
od_max=omega_s/c*chi_0*L
print("gamma_21: %.2f"%gamma_21)
print("est. od_max: %.2f"%od_max)
od_max=26.3
chi_nb = susceptibility(Delta_s, Delta_c, gamma_21, Omega_c)
phi_0=0.5*od_max*np.real(chi_nb)
r_chi_vdw = lambda x: np.real(susceptibility(Delta_s, Delta_c - vdW_pot(x, 1e-10)/(hbar*Gamma_e), gamma_21, Omega_c))
phi_1=0.5*od_max*integrate.quad(r_chi_vdw, -L/2, L/2)[0]/L
D_phi_blockaded=phi_1-phi_0
#D_phi=ph_new(Delta_s)-ph0_new(Delta_s)
#D_phi_blockaded = D_phi*2*R_b/L
print("Expected cond. phase shift: %.3f rad"%D_phi_blockaded)
#R_b=15e-6
print("Simple cond. phase shift: %.3f rad"%(6.6*2*R_b/L))
#fig=plt.figure(1, figsize=(8, 9), dpi=80,)
#fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.2)
#
#ax0 = fig.add_subplot(211)
#ax0.clear()
#ds=np.linspace(4,16,num=n)
#dsf=np.linspace(4,16,num=len(freqFitO))
##dsf2=np.linspace(4,16,num=len(freqFitO))
#ax0.plot(ds,2*np.log(aRef[:,0]/a[:,0]),'ro',label="$\Omega_c=$???")
#ax0.plot(ds,2*np.log(aRef[:,1]/a[:,1]),'bo',label="$\Omega_c=$0")
##ax0.plot(dsf,OfitOff,"b")
##ax0.plot(dsf,OfitOn,"r")
#
#ax0.plot(freqFitP,OfitOff,"b")
#ax0.plot(freqFitP,OfitOn,"r")
#ax0.plot(dsf,od_new(dsf*1e6*2*np.pi/Gamma_e),"g")
#ax0.plot(dsf,od0_new(dsf*1e6*2*np.pi/Gamma_e),"k")
#
#ax0.tick_params(axis="x",which="both",labelbottom="off")
#ax0.set_ylabel("OD")
#trans = ax0.get_xaxis_transform() # x in data untis, y in axes fraction
#ax0.annotate('(a)', xy=(4.3,0.9 ), xycoords=trans)
#
#
#ax1 = fig.add_subplot(212)
#ax1.clear()
#
#
#dsf=np.linspace(4,16,num=len(freqFitP))
#ax1.plot(ds,(p[:,0]-pRef[:,0]),'ro',label="control on")
#ax1.plot(ds,(p[:,1]-pRef[:,1]),'bo',label="control off")
##ax1.plot(dsf,pfitOff,"b")
##ax1.plot(dsf,pfitOn,"r")
#
#ax1.plot(freqFitP,pfitOff,"b")
#ax1.plot(freqFitP,pfitOn,"r")
#ax1.plot(dsf,ph_new(dsf*1e6*2*np.pi/Gamma_e)+0*phase_offset,"g")
#ax1.plot(dsf,ph0_new(dsf*1e6*2*np.pi/Gamma_e)+0*phase_offset,"k")
#
#plt.show()
| mit | -2,634,745,903,521,214,500 | 35.141975 | 200 | 0.64953 | false |
wsriley/itsimple | myAnalyzers/GENERATE-VARS/translate/instantiate.py | 6 | 3118 | #! /usr/bin/env python
# -*- coding: latin-1 -*-
from __future__ import with_statement
import build_model
import pddl_to_prolog
import pddl
import timers
def get_fluent_facts(task, model):
fluent_predicates = set()
for action in task.actions:
for effect in action.effects:
fluent_predicates.add(effect.literal.predicate)
for axiom in task.axioms:
fluent_predicates.add(axiom.name)
return set([fact for fact in model
if fact.predicate in fluent_predicates])
def get_objects_by_type(typed_objects):
result = {}
for obj in typed_objects:
result.setdefault(obj.type, []).append(obj.name)
return result
def instantiate(task, model):
relaxed_reachable = False
fluent_facts = get_fluent_facts(task, model)
init_facts = set(task.init)
type_to_objects = get_objects_by_type(task.objects)
instantiated_actions = []
instantiated_axioms = []
for atom in model:
if isinstance(atom.predicate, pddl.Action):
action = atom.predicate
parameters = action.parameters
if isinstance(action.precondition, pddl.ExistentialCondition):
parameters = list(parameters)
parameters += action.precondition.parameters
variable_mapping = dict([(par.name, arg)
for par, arg in zip(parameters, atom.args)])
inst_action = action.instantiate(variable_mapping, init_facts,
fluent_facts, type_to_objects)
if inst_action:
instantiated_actions.append(inst_action)
elif isinstance(atom.predicate, pddl.Axiom):
axiom = atom.predicate
parameters = axiom.parameters
if isinstance(axiom.condition, pddl.ExistentialCondition):
parameters = list(parameters)
parameters += axiom.condition.parameters
variable_mapping = dict([(par.name, arg)
for par, arg in zip(parameters, atom.args)])
inst_axiom = axiom.instantiate(variable_mapping, init_facts, fluent_facts)
if inst_axiom:
instantiated_axioms.append(inst_axiom)
elif atom.predicate == "@goal-reachable":
relaxed_reachable = True
return relaxed_reachable, fluent_facts, instantiated_actions, instantiated_axioms
def explore(task):
prog = pddl_to_prolog.translate(task)
model = build_model.compute_model(prog)
with timers.timing("Completing instantiation"):
return instantiate(task, model)
if __name__ == "__main__":
import pddl
task = pddl.open()
relaxed_reachable, atoms, actions, axioms = explore(task)
print "goal relaxed reachable: %s" % relaxed_reachable
print "%d atoms:" % len(atoms)
for atom in atoms:
print " ", atom
print
print "%d actions:" % len(actions)
for action in actions:
action.dump()
print
print
print "%d axioms:" % len(axioms)
for axiom in axioms:
axiom.dump()
print
| gpl-3.0 | 948,949,612,858,538,400 | 34.033708 | 86 | 0.613855 | false |
mobarski/smash | smash2/xorpad.py | 2 | 2461 | """
xor pad encoding / decoding
>>> decode(encode("secret"))
'secret'
"""
import os
import sys
import struct
import random
import base64
import array
# TODO turn into object
# TODO optimize
# TODO ochrona przed uzyciem podkladki znanego hasla
__encode = base64.b32encode
__decode = base64.b32decode
def _get_pad_path():
d=os.environ.get('appdata','') or os.environ.get('HOME','') or '.'
return os.path.join(d,".otp-secret.bin")
def _get_pad():
"get pad as array of unsigned bytes"
pad_path = _get_pad_path()
try:
f = open(pad_path,'rb')
except:
_init()
f = open(pad_path,'rb')
raw = f.read()
if sys.version_info[0]==2:
a = array.array('B')
a.fromstring(raw)
return a
else:
return array.array('B',raw)
def _xor_with_pad(text,seed=None):
"xor text with pseudo-randomly shuffled pad"
if sys.version_info[0]==3 and type(text)==str:
text=text.encode()
pad=_get_pad()
if sys.version_info[0]==3:
random.seed(seed,version=1)
else:
random.seed(seed)
random.shuffle(pad,random=random.random) # shuffle that gives the same result in py2 and py3
t = array.array('B',text)
return [a^b for a,b in zip(t,pad)]
def _init(length=4096):
"initialize pad with random data"
p = os.urandom(length)
f = open(_get_pad_path(),'wb')
f.write(p)
# TODO make file private
#########################################################
def encode(s):
"encode string"
raw_seed=os.urandom(4)
seed=struct.unpack('I',raw_seed)
n=_xor_with_pad(s,seed)
b = raw_seed+struct.pack(len(n)*'B',*n)
text = __encode(b)
text = text.decode() if sys.version_info[0]==3 else text
return text.replace('=','x')
def decode(s):
"decode string"
b = __decode(s.replace('x','=').encode())
raw_seed,b = b[:4],b[4:]
seed=struct.unpack('I',raw_seed)
n = struct.unpack(len(b)*'B',b)
x = _xor_with_pad(n,seed)
text = struct.pack(len(x)*'B',*x)
return text.decode() if sys.version_info[0]==3 else text
#########################################################
if __name__=="__main__":
if 1:
if sys.version_info[0]==2:
print(encode('test'))
else:
print(decode('Y6EJGVMBF6YI4xxx'))
if 0:
if sys.version_info[0]==3:
print(encode('test'))
else:
print(decode('G4IUAKLCNI6Y2xxx'))
if 0:
print(_xor_with_pad('test'))
if 0:
for a in ('test','takt','zzz'):
b=encode(a)
c=decode(b)
print(a,b,c,a==c)
if 0:
a=b"test"
b=b"\xff\x00\xf0\x0f"
aa=array.array('B',a)
ba=array.array('B',b)
print([a^b for a,b in zip(aa,ba)])
| mit | 2,095,734,481,560,099,000 | 21.577982 | 93 | 0.612759 | false |
unseenlaser/machinekit | src/machinetalk/tutorial/json-ws/zmq-protobuf-micromot-client.py | 18 | 1071 | import zmq
import time
import pb2json
import json # for pretty printer
import sys,os
sys.path.append("../protobuf")
from demo_pb2 import *
# create a message object:
d = DemoContainer()
# fill in what's required
d.type = DT_POLYLINE
d.velocity = 50
d.acceleration = 500
s = d.segment.add()
s.type = ST_LINE
s.end.x = 100
s.end.y = 200
s = d.segment.add()
s.type = ST_ARC
s.end.x = 50
s.end.y = 80
s.end.z = 0
s.center.x = 120
s.center.y = 150
s.center.z = 0
s.normal.x = 0
s.normal.y = 0
s.normal.z = 1
s = d.segment.add()
s.type = ST_LINE
s.end.x = 0.0
s.end.z = 10.0
s.velocity = 30.0
s.acceleration = 200.0
request = d.SerializeToString()
if len(sys.argv) > 1:
jsonout = d.SerializeToJSON()
print json.dumps(json.loads(jsonout), indent=4)
reply = DemoContainer()
context = zmq.Context()
socket = context.socket(zmq.DEALER)
socket.identity = "client-pid%d" % os.getpid()
socket.connect("tcp://127.0.0.1:5700")
for i in range(3):
socket.send(request)
r = socket.recv()
reply.ParseFromString(r)
print str(reply)
time.sleep(1)
| lgpl-2.1 | 359,807,089,869,560,900 | 16 | 51 | 0.663866 | false |
andrewcmyers/tensorflow | tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py | 20 | 78773 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceQueueingStateSaver and wrappers.
Please see the reading data how-to for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
_store_sparse = sparse_ops._add_many_sparse_to_tensors_map
# pylint: enable=protected-access
class _SequenceInputWrapper(object):
"""A wrapper object for storing sequence-related input.
The SequenceInputWapper accepts four objects:
length: A scalar int containing the length of the input sequence.
key: A scalar string containing the unique key of the input sequence.
sequences: A dict mapping labels, like `input`, to tensors
whose initial index dimension is at least size `length`.
context: A dict mapping labels, like `global_target`, to tensors
that represent data across the entire example.
"""
def __init__(self, length, key, sequences, context):
length = ops.convert_to_tensor(length, name="length")
key = ops.convert_to_tensor(key, name="key")
if not isinstance(sequences, dict):
raise TypeError("sequences must be a dict")
if not isinstance(context, dict):
raise TypeError("context must be a dict")
if not sequences:
raise ValueError("must have at least one sequence tensor")
for k in sequences.keys():
if not isinstance(k, six.string_types):
raise TypeError("sequence key must be string: %s" % k)
if ":" in k:
raise ValueError("sequence key may not have a colon: '%s'" % k)
for k in context.keys():
if not isinstance(k, six.string_types):
raise TypeError("context key must be string: %s" % k)
if ":" in k:
raise ValueError("context key may not have a colon: '%s'" % k)
sequences = dict((k, ops.convert_to_tensor(
v, name="sequence_%s" % k)) for k, v in sequences.items())
context = dict((k, ops.convert_to_tensor(
v, name="context_%s" % k)) for k, v in context.items())
self._length = length
self._key = key
self._sequences = sequences
self._context = context
@property
def length(self):
return self._length
@property
def key(self):
return self._key
@property
def sequences(self):
return self._sequences
@property
def context(self):
return self._context
def _check_multiple_of(value, multiple_of):
"""Checks that value `value` is a non-zero multiple of `multiple_of`.
Args:
value: an int32 scalar Tensor.
multiple_of: an int or int32 scalar Tensor.
Returns:
new_value: an int32 scalar Tensor matching `value`, but which includes an
assertion that `value` is a multiple of `multiple_of`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(math_ops.mod(value, multiple_of), 0),
math_ops.not_equal(value, 0)), [
string_ops.string_join([
"Tensor %s should be a multiple of: " % value.name,
string_ops.as_string(multiple_of), ", but saw value: ",
string_ops.as_string(value),
". Consider setting pad=True."
])
])
]):
new_value = array_ops.identity(value, name="multiple_of_checked")
return new_value
def _check_rank(value, expected_rank):
"""Check the rank of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_rank, array_ops.rank(value)), [
string_ops.string_join([
"Rank of tensor %s should be: " % value.name,
string_ops.as_string(expected_rank), ", shape received:"
]), array_ops.shape(value)
])
]):
new_value = array_ops.identity(value, name="rank_checked")
if isinstance(expected_rank, ops.Tensor):
expected_rank_value = tensor_util.constant_value(expected_rank)
if expected_rank_value is not None:
expected_rank = int(expected_rank_value)
if not isinstance(expected_rank, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
except ValueError as e:
raise ValueError("Rank check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_shape(value, expected_shape):
"""Check the shape of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has been set.
Raises:
ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
if isinstance(expected_shape, tensor_shape.TensorShape):
expected_shape = expected_shape.as_list()
if isinstance(expected_shape, ops.Tensor):
expected_shape_value = tensor_util.constant_value(expected_shape)
if expected_shape_value is not None:
expected_shape = [int(d) for d in expected_shape_value]
if isinstance(expected_shape, ops.Tensor):
value = _check_rank(value, array_ops.size(expected_shape))
else:
value = _check_rank(value, len(expected_shape))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(expected_shape, array_ops.shape(value))), [
string_ops.string_join([
"Shape of tensor %s should be: " % value.name,
string_ops.as_string(expected_shape),
", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
])
]):
new_value = array_ops.identity(value, name="shape_checked")
if not isinstance(expected_shape, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().merge_with(expected_shape))
except ValueError as e:
raise ValueError("Shape check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_dimensions(value, dimensions, expected_sizes, debug_prefix):
"""Check the dimensions of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, with optional / partial shape associated shape information.
dimensions: An int list, the dimensions to check.
expected_sizes: list of mixed ints and int32 scalar tensors.
Optionally also a vector `Tensor`.
debug_prefix: A string, used for naming ops and printing debugging messages.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_sizes is not a `Tensor`, then
new_value's shape has been set for all `dimensions[i]` where
`expected_sizes[i]` is not a `Tensor`.
Raises:
TypeError: if any of the input contains invalid types:
if `value` is not a `Tensor`.
if `dimensions` is not a `list` or `tuple`.
ValueError: if input has incorrect sizes or inferred shapes do not match:
if `dimensions` contains repeated dimensions.
if `expected_sizes` is not a `Tensor` and its length does not match that
`dimensions`.
if `value`'s shape has a well-defined rank, and one of the values in
`dimensions` is equal to or above this rank.
if `value`'s shape is well defined for some `dimensions[i]`, and
`expected_sizes[i]` is not a `Tensor`, and these two values do
not match.
"""
if not isinstance(dimensions, (list, tuple)):
raise TypeError("dimensions must be a list or tuple")
if len(set(dimensions)) != len(dimensions):
raise ValueError("dimensions are not unique: %s" % dimensions)
if not isinstance(value, ops.Tensor):
raise TypeError("value is not a Tensor: %s" % value)
value_shape = value.get_shape()
if not isinstance(expected_sizes, ops.Tensor):
if len(dimensions) != len(expected_sizes):
raise ValueError("len(dimensions) != len(expected_sizes): %d vs. %d" %
(len(dimensions), len(expected_sizes)))
if value_shape.ndims is not None:
if value_shape.ndims <= max(dimensions):
raise ValueError(
"%s: rank of input is not greater than max(dimensions): "
"%d vs. %d" % (debug_prefix, value.get_shape().ndims,
max(dimensions)))
value_dims = value_shape.as_list()
for d, s in zip(dimensions, expected_sizes):
if not isinstance(s, ops.Tensor):
value_dims[d] = s
try:
value.set_shape(value.get_shape().merge_with(value_dims))
except ValueError as e:
raise ValueError("Dimensions check failed for %s: %s" %
(debug_prefix, str(e)))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_size, array_ops.shape(value)[dimension]), [
string_ops.string_join([
"Dimension %d of tensor labeled %s should be: " %
(dimension, debug_prefix),
string_ops.as_string(expected_size), ", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
]) for (dimension, expected_size) in zip(dimensions, expected_sizes)
]):
new_value = array_ops.identity(value, name="dims_checked_%s" % debug_prefix)
return new_value
def _prepare_sequence_inputs(inputs, states):
"""Convert input to tensors and validate shape information.
Args:
inputs: A `_SequenceInputWrapper` instance.
states: A dictionary mapping state names to input constants or tensors.
Returns:
The tuple (length, key, sorted_states, sorted_sequences, sorted_context),
where each value has been checked for valid shape, and the sorted_* dicts
are instances of OrderedDict; with key-value pairs sorted by key.
Raises:
ValueError: if the shapes of inputs.context.values(), states.values(),
or inputs.sequences.values() are not fully defined (with the exception
of the dimension of any `Tensor` in inputs.sequences.values()).
TypeError: if the dtype of length is not int32.
"""
# Convert state initial values to tensors
states = dict((k, ops.convert_to_tensor(
v, name="state_%s" % k)) for k, v in states.items())
def _assert_fully_defined(label, dict_, ignore_first_dimension=False):
start_dimension = 1 if ignore_first_dimension else 0
for k, v in dict_.items():
if not v.get_shape()[start_dimension:].is_fully_defined():
raise ValueError("Shape for %s %s is not fully defined %s: %s" %
(label, k, "(ignoring first dimension)" if
ignore_first_dimension else "", v.get_shape()))
_assert_fully_defined("state", states)
_assert_fully_defined("context", inputs.context)
# Sequences' first dimension (time) may be variable
_assert_fully_defined(
"sequence", inputs.sequences, ignore_first_dimension=True)
# Get dictionaries' dtypes ordered by name - ordering is important
# when switching between dicts and tuples for passing to Barrier.
def _sort_by_name(d):
return collections.OrderedDict(sorted(d.items(), key=lambda k_v: k_v[0]))
sorted_sequences = _sort_by_name(inputs.sequences)
sorted_context = _sort_by_name(inputs.context)
sorted_states = _sort_by_name(states)
length = _check_rank(inputs.length, 0)
key = _check_rank(inputs.key, 0)
if length.dtype != dtypes.int32:
raise TypeError("length dtype must be int32, but received: %s" %
length.dtype)
if key.dtype != dtypes.string:
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
return (length, key, sorted_states, sorted_sequences, sorted_context)
# NextQueuedSequenceBatch works closely with
# SequenceQueueingStateSaver and requires access to its private properties
# pylint: disable=protected-access
class NextQueuedSequenceBatch(object):
"""NextQueuedSequenceBatch stores deferred SequenceQueueingStateSaver data.
This class is instantiated by `SequenceQueueingStateSaver` and is accessible
via its `next_batch` property.
"""
def __init__(self, state_saver):
self._state_saver = state_saver
@property
def total_length(self):
"""The lengths of the original (non-truncated) unrolled examples.
Returns:
An integer vector of length `batch_size`, the total lengths.
"""
return self._state_saver._received_total_length
@property
def length(self):
"""The lengths of the given truncated unrolled examples.
For initial iterations, for which `sequence * num_unroll < length`,
this number is `num_unroll`. For the remainder,
this number is between `0` and `num_unroll`.
Returns:
An integer vector of length `batch_size`, the lengths.
"""
return self._state_saver._received_length
@property
def batch_size(self):
"""The batch_size of the given batch.
Usually, this is the batch_size requested when initializing the SQSS, but
if allow_small_batch=True this will become smaller when inputs are
exhausted.
Returns:
A scalar integer tensor, the batch_size
"""
return self._state_saver._received_batch_size
@property
def insertion_index(self):
"""The insertion indices of the examples (when they were first added).
These indices start with the value -2**63 and increase with every
call to the prefetch op. Each whole example gets its own insertion
index, and this is used to prioritize the example so that its truncated
segments appear in adjacent iterations, even if new examples are inserted
by the prefetch op between iterations.
Returns:
An int64 vector of length `batch_size`, the insertion indices.
"""
return self._state_saver._received_indices
@property
def key(self):
"""The key names of the given truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence, sequence_count, original_key)
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_keys
@property
def next_key(self):
"""The key names of the next (in iteration) truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
```
if `sequence + 1 < sequence_count`, otherwise:
```python
"STOP:%s" % original_key
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_next_key
@property
def sequence(self):
"""An int32 vector, length `batch_size`: the sequence index of each entry.
When an input is split up, the sequence values
```
0, 1, ..., sequence_count - 1
```
are assigned to each split.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence
@property
def sequence_count(self):
"""An int32 vector, length `batch_size`: the sequence count of each entry.
When an input is split up, the number of splits is equal to:
`padded_length / num_unroll`. This is the sequence_count.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence_count
@property
def context(self):
"""A dict mapping keys of `input_context` to batched context.
Returns:
A dict mapping keys of `input_context` to tensors.
If we had at input:
```python
context["name"].get_shape() == [d1, d2, ...]
```
then for this property:
```python
context["name"].get_shape() == [batch_size, d1, d2, ...]
```
"""
return self._state_saver._received_context
@property
def sequences(self):
"""A dict mapping keys of `input_sequences` to split and rebatched data.
Returns:
A dict mapping keys of `input_sequences` to tensors.
If we had at input:
```python
sequences["name"].get_shape() == [None, d1, d2, ...]
```
where `None` meant the sequence time was dynamic, then for this property:
```python
sequences["name"].get_shape() == [batch_size, num_unroll, d1, d2, ...].
```
"""
return self._state_saver._received_sequences
def state(self, state_name):
"""Returns batched state tensors.
Args:
state_name: string, matches a key provided in `initial_states`.
Returns:
A `Tensor`: a batched set of states, either initial states (if this is
the first run of the given example), or a value as stored during
a previous iteration via `save_state` control flow.
Its type is the same as `initial_states["state_name"].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...],
```
then
```python
state(state_name).get_shape() == [batch_size, d1, d2, ...]
```
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
return self._state_saver._received_states[state_name]
def save_state(self, state_name, value, name=None):
"""Returns an op to save the current batch of state `state_name`.
Args:
state_name: string, matches a key provided in `initial_states`.
value: A `Tensor`.
Its type must match that of `initial_states[state_name].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...]
```
then the shape of `value` must match:
```python
tf.shape(value) == [batch_size, d1, d2, ...]
```
name: string (optional). The name scope for newly created ops.
Returns:
A control flow op that stores the new state of each entry into
the state saver. This op must be run for every iteration that
accesses data from the state saver (otherwise the state saver
will never progress through its states and run out of capacity).
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
if state_name not in self._state_saver._received_states.keys():
raise KeyError("state was not declared: %s" % state_name)
default_name = "InputQueueingStateSaver_SaveState"
with ops.name_scope(name, default_name, values=[value]):
# Place all operations on the CPU. Barriers and queues are only
# implemented for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._state_saver._capacity_queue.queue_ref):
indices_where_not_done = array_ops.reshape(
array_ops.where(
math_ops.logical_not(self._state_saver._sequence_is_done)),
[-1])
keeping_next_key = array_ops.gather(
self._state_saver._received_next_key, indices_where_not_done)
value = _check_shape(
array_ops.identity(
value, name="convert_%s" % state_name),
array_ops.shape(self._state_saver._received_states[state_name]))
keeping_state = array_ops.gather(value, indices_where_not_done)
return self._state_saver._barrier.insert_many(
self._state_saver._get_barrier_index("state", state_name),
keeping_next_key,
keeping_state,
name="BarrierInsertState_%s" % state_name)
# pylint: enable=protected-access
class SequenceQueueingStateSaver(object):
"""SequenceQueueingStateSaver provides access to stateful values from input.
This class is meant to be used instead of, e.g., a `Queue`, for splitting
variable-length sequence inputs into segments of sequences with fixed length
and batching them into mini-batches. It maintains contexts and state for a
sequence across the segments. It can be used in conjunction with a
`QueueRunner` (see the example below).
The `SequenceQueueingStateSaver` (SQSS) accepts one example at a time via the
inputs `input_length`, `input_key`, `input_sequences` (a dict),
`input_context` (a dict), and `initial_states` (a dict).
The sequences, values in `input_sequences`, may have variable first dimension
(the `padded_length`), though this dimension must always be a multiple of
`num_unroll`. All other dimensions must be fixed and accessible via
`get_shape` calls. The length prior to padding can be recorded in
`input_length`. The context values in `input_context` must all have fixed and
well defined dimensions. The initial state values must all have fixed and
well defined dimensions.
The SQSS splits the sequences of an input example into segments of length
`num_unroll`. Across examples minibatches of size `batch_size` are formed.
These minibatches contain a segment of the sequences, copy the context values,
and maintain state, length, and key information of the original input
examples. In the first segment of an example the state is still the initial
state. It can then be updated; and updated state values are accessible in
subsequent segments of the same example. After each segment
`batch.save_state()` must be called which is done by the state_saving_rnn.
Without this call, the dequeue op associated with the SQSS will not run.
Internally, SQSS has a queue for the input examples. Its `capacity` is
configurable. If set smaller than `batch_size` then the dequeue op will block
indefinitely. A small multiple of `batch_size` is a good rule of thumb to
prevent that queue from becoming a bottleneck and slowing down training.
If set too large (and note that it defaults to unbounded) memory consumption
goes up. Moreover, when iterating over the same input examples multiple times
reusing the same `key` the `capacity` must be smaller than the number of
examples.
The prefetcher, which reads one unrolled, variable-length input sequence at
a time, is accessible via `prefetch_op`. The underlying `Barrier` object
is accessible via `barrier`. Processed minibatches, as well as
state read and write capabilities are accessible via `next_batch`.
Specifically, `next_batch` provides access to all of the minibatched
data, including the following, see `NextQueuedSequenceBatch` for details:
* `total_length`, `length`, `insertion_index`, `key`, `next_key`,
* `sequence` (the index each minibatch entry's time segment index),
* `sequence_count` (the total time segment count for each minibatch entry),
* `context` (a dict of the copied minibatched context values),
* `sequences` (a dict of the split minibatched variable-length sequences),
* `state` (to access the states of the current segments of these entries)
* `save_state` (to save the states for the next segments of these entries)
Example usage:
```python
batch_size = 32
num_unroll = 20
lstm_size = 8
cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_size)
initial_state_values = tf.zeros(cell.state_size, dtype=tf.float32)
raw_data = get_single_input_from_input_reader()
length, key, sequences, context = my_parser(raw_data)
assert "input" in sequences.keys()
assert "label" in context.keys()
initial_states = {"lstm_state": initial_state_value}
stateful_reader = tf.SequenceQueueingStateSaver(
batch_size, num_unroll,
length=length, input_key=key, input_sequences=sequences,
input_context=context, initial_states=initial_states,
capacity=batch_size*100)
batch = stateful_reader.next_batch
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.Session()
num_threads = 3
queue_runner = tf.train.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads)
tf.train.add_queue_runner(queue_runner)
tf.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
**Note**: Usually the barrier is given to a QueueRunner as in the
examples above. The QueueRunner will close the barrier if the prefetch_op
receives an OutOfRange Error from upstream input queues (i.e., reaches
the end of the input). If the barrier is closed no further new examples
are added to the SQSS. The underlying barrier might, however, still
contain further unroll-steps of examples that have not undergone all
iterations. To gracefully finish all examples, the flag
`allow_small_batch` must be set to true, which causes the SQSS to issue
progressively smaller mini-batches with the remaining examples.
"""
def __init__(self,
batch_size,
num_unroll,
input_length,
input_key,
input_sequences,
input_context,
initial_states,
capacity=None,
allow_small_batch=False,
name=None):
"""Creates the SequenceQueueingStateSaver.
Args:
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
input_length: An int32 scalar `Tensor`, the length of the sequence prior
to padding. This value may be at most `padded_length` for any given
input (see below for the definition of `padded_length`).
Batched and total lengths of the current iteration are made accessible
via the `length` and `total_length` properties. The shape of
input_length (scalar) must be fully specified.
input_key: A string scalar `Tensor`, the **unique** key for the given
input. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar)
must be fully specified.
input_sequences: A dict mapping string names to `Tensor` values. The
values must all have matching first dimension, called `padded_length`.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension
`num_unroll`. Batched and segmented sequences of the current iteration
are made accessible via the `sequences` property.
**Note**: `padded_length` may be dynamic, and may vary from input
to input, but must always be a multiple of `num_unroll`. The remainder
of the shape (other than the first dimension) must be fully specified.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
capacity: The max capacity of the SQSS in number of examples. Needs to be
at least `batch_size`. Defaults to unbounded.
allow_small_batch: If true, the SQSS will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached (i.e., the underlying barrier has been
closed).
name: An op name string (optional).
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
if capacity is not None and isinstance(batch_size, ops.Tensor):
with ops.control_dependencies([check_ops.assert_greater_equal(
math_ops.cast(capacity, dtype=dtypes.int64),
math_ops.cast(batch_size, dtype=dtypes.int64),
message="capacity needs to be >= batch_size.")]):
input_key = array_ops.identity(input_key)
elif capacity is not None and capacity < batch_size:
raise ValueError("capacity %d needs to be >= batch_size %d" % (
capacity, batch_size))
# The barrier is ignorant of the number of actual examples, since a long
# example that requires many iterations produces more elements in the
# barrier than a short example. Furthermore, we don't have an upper bound
# on the length of examples, and hence have to keep the capacity of the
# barrier at infinite to avoid dead-lock. Instead we have to keep track of
# the number of active examples in this class, and block the prefetch_op
# when capacity is reached. To this end, we employ a FIFOQueue in which we
# store one token (its value doesn't matter) for each input example, and
# dequeue a token for each completed example. Since the capacity of this
# queue is limited the enqueue operation will block if capacity is reached.
self._capacity_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=[dtypes.int32], shapes=[[]])
# Place all operations on the CPU. Barriers and queues are only implemented
# for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._capacity_queue.queue_ref):
if not isinstance(initial_states, dict):
raise TypeError("initial_states must be a dictionary")
if not initial_states:
raise ValueError(
"initial_states may not be empty: at least one state variable is "
"required to properly enqueue split sequences to run in separate "
"iterations")
for k in initial_states:
if not isinstance(k, six.string_types):
raise TypeError("state name must be a string: %s" % k)
if ":" in k:
raise ValueError("state name may not have a colon: '%s'" % k)
op_vars = ([input_length, input_key] + list(input_sequences.values()) +
list(input_context.values()))
with ops.name_scope(name, "InputQueueingStateSaver", op_vars) as scope:
inputs = _SequenceInputWrapper(input_length, input_key, input_sequences,
input_context)
self._batch_size = batch_size
self._num_unroll = num_unroll
self._name = scope
# This step makes sure all shapes are well defined. We can now
# use get_shape() on any tensor in the output of this function
# and get a fully-defined shape.
(self._length, self._key, self._sorted_states, self._sorted_sequences,
self._sorted_context) = _prepare_sequence_inputs(inputs,
initial_states)
self._padded_length = array_ops.identity(
array_ops.shape(six.next(six.itervalues(self._sorted_sequences)))[
0],
name="padded_length") # The name is useful for debugging
self._padded_length = _check_multiple_of(self._padded_length,
self._num_unroll)
# sequences should have length == all matching
self._sorted_sequences = collections.OrderedDict(
(k, _check_dimensions(
v, [0], [self._padded_length],
debug_prefix="sorted_sequences_%s" % k))
for k, v in self._sorted_sequences.items())
self._uninitialized_states = self._sorted_states
# Once this is set, self._get_barrier_*_index are available for use.
self._store_index_maps(self._sorted_sequences, self._sorted_context,
self._sorted_states)
# Make sure that the length is <= the padded_length
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.less_equal(self._length, self._padded_length), [
"Input length should be <= than length from sequences:",
self._length, " vs. ", self._padded_length
])
]):
self._length = array_ops.identity(self._length)
# Only create barrier; enqueu and dequeue operations happen when you
# access prefetch_op and next_batch.
self._create_barrier()
self._scope = scope
self._allow_small_batch = allow_small_batch
self._prefetch_op = None
self._next_batch = None
@property
def name(self):
return self._name
@property
def barrier(self):
return self._barrier
@property
def batch_size(self):
return self._batch_size
@property
def num_unroll(self):
return self._num_unroll
@property
def prefetch_op(self):
"""The op used to prefetch new data into the state saver.
Running it once enqueues one new input example into the state saver.
The first time this gets called, it additionally creates the prefetch_op.
Subsequent calls simply return the previously created `prefetch_op`.
It should be run in a separate thread via e.g. a `QueueRunner`.
Returns:
An `Operation` that performs prefetching.
"""
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
return self._prefetch_op
@property
def next_batch(self):
"""The `NextQueuedSequenceBatch` providing access to batched output data.
Also provides access to the `state` and `save_state` methods.
The first time this gets called, it additionally prepares barrier reads
and creates `NextQueuedSequenceBatch` / next_batch objects. Subsequent
calls simply return the previously created `next_batch`.
In order to access data in `next_batch` without blocking, the `prefetch_op`
must have been run at least `batch_size` times (ideally in a separate
thread, or launched via a `QueueRunner`). After processing a segment in
`next_batch()`, `batch.save_state()` must be called which is done by the
state_saving_rnn. Without this call, the dequeue op associated with the SQSS
will not run.
Returns:
A cached `NextQueuedSequenceBatch` instance.
"""
# This is needed to prevent errors if next_batch is called before
# prefetch_op is created.
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
if not self._next_batch:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._prepare_barrier_reads()
return self._next_batch
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(cancel_pending_enqueues,
"BarrierClose")
fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
"FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = dict(
(name, ix)
for (ix, name) in enumerate([
"__length", "__total_length", "__next_key", "__sequence",
"__sequence_count"
] + ["__sequence__%s" % k for k in sequences.keys()] + [
"__context__%s" % k for k in context.keys()
] + ["__state__%s" % k for k in states.keys()]))
self._index_to_name = [
name
for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])
]
def _get_barrier_length_index(self):
return self._name_to_index["__length"]
def _get_barrier_total_length_index(self):
return self._name_to_index["__total_length"]
def _get_barrier_next_key_index(self):
return self._name_to_index["__next_key"]
def _get_barrier_sequence_index(self):
return self._name_to_index["__sequence"]
def _get_barrier_sequence_count_index(self):
return self._name_to_index["__sequence_count"]
def _get_barrier_index(self, index_type, name):
assert index_type in ("sequence", "context", "state")
key = "__%s__%s" % (index_type, name)
assert key in self._name_to_index, (
"Requested a name not in the value type %s: %s" % (index_type, name))
return self._name_to_index[key]
def _create_barrier(self):
"""Create the barrier.
This method initializes the Barrier object with the right types and shapes.
"""
# Create the barrier
sequence_dtypes = [v.dtype for k, v in self._sorted_sequences.items()]
context_dtypes = [v.dtype for k, v in self._sorted_context.items()]
state_dtypes = [v.dtype for k, v in self._sorted_states.items()]
types = ([
dtypes.int32, # length
dtypes.int32, # total_length
dtypes.string, # next_keys
dtypes.int32, # sequence
dtypes.int32
] # expanded_sequence_count
+ sequence_dtypes + context_dtypes + state_dtypes)
sequence_shapes = [
[self._num_unroll] + self._sorted_sequences[k].get_shape().as_list()[1:]
for k in self._sorted_sequences.keys()
]
context_shapes = [
self._sorted_context[k].get_shape().as_list()
for k in self._sorted_context.keys()
]
state_shapes = [
self._sorted_states[k].get_shape().as_list()
for k in self._sorted_states.keys()
]
shapes = ([
(), # length
(), # total_length
(), # next_keys
(), # sequence
()
] # expanded_sequence_count
+ sequence_shapes + context_shapes + state_shapes)
self._barrier = data_flow_ops.Barrier(types=types, shapes=shapes)
def _create_prefetch_op(self):
"""Group insert_many ops and create prefetch_op.
This method implements the "meat" of the logic underlying the
`SequenceQueueingStateSaver`. It performs dynamic reshaping of
sequences, copying of context, and initial insertion of these values,
as well as the key, next_key, sequence, sequence_count, and initial
states into the barrier.
"""
# Step 1: identify how many barrier entries to split this input
# into, store the result as a scalar
sequence_count = math_ops.div(self._padded_length, self._num_unroll)
sequence_count_vec = array_ops.expand_dims(sequence_count, 0)
# The final unrolled sequence's length is num_unroll only in
# the case that num_unroll divides it evenly.
ones = array_ops.ones(sequence_count_vec, dtype=dtypes.int32)
sequence = math_ops.range(sequence_count)
expanded_length = math_ops.maximum(
0, self._length - self._num_unroll * sequence)
expanded_length = math_ops.minimum(self._num_unroll, expanded_length)
expanded_total_length = self._length * ones
expanded_sequence_count = sequence_count * ones
current_keys = string_ops.string_join(
[
string_ops.as_string(
sequence, width=5, fill="0"), "_of_", string_ops.as_string(
sequence_count, width=5, fill="0"), ":", self._key
],
name="StringJoinCurrentKeys")
next_keys = array_ops.concat(
[
array_ops.slice(current_keys, [1], [-1]), array_ops.expand_dims(
string_ops.string_join(
["STOP:", self._key], name="StringJoinStop"),
0)
],
0,
name="concat_next_keys")
reshaped_sequences = collections.OrderedDict((
k,
_check_dimensions(
# Reshape sequences to sequence_count rows
array_ops.reshape(
v,
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
array_ops.expand_dims(self._num_unroll, 0),
v.get_shape().as_list()[1:]
],
0,
name="concat_sequences_%s" % k),
name="reshape_sequences_%s" % k),
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
debug_prefix="reshaped_sequences_%s" %
k)) for k, v in self._sorted_sequences.items())
expanded_context = collections.OrderedDict(
(
k,
_check_dimensions(
# Copy context to be sequence_count rows
array_ops.tile(
array_ops.expand_dims(v, 0),
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
[1] * v.get_shape().ndims
],
0,
name="concat_context_%s" % k),
name="tile_context_%s" % k),
[0] + list(range(1, v.get_shape().ndims + 1)),
[sequence_count] + v.get_shape().as_list(),
debug_prefix="expanded_context_%s" % k))
for k, v in self._sorted_context.items())
# Storing into the barrier, for each current_key:
# sequence_ix, sequence_count, next_key, length,
# context... (copied), sequences... (truncated)
# Also storing into the barrier for the first key
# states (using initial_states).
insert_sequence_op = self._barrier.insert_many(
self._get_barrier_sequence_index(),
current_keys,
sequence,
name="BarrierInsertSequence")
insert_sequence_count_op = self._barrier.insert_many(
self._get_barrier_sequence_count_index(),
current_keys,
expanded_sequence_count,
name="BarrierInsertSequenceCount")
insert_next_key_op = self._barrier.insert_many(
self._get_barrier_next_key_index(),
current_keys,
next_keys,
name="BarrierInsertNextKey")
insert_length_op = self._barrier.insert_many(
self._get_barrier_length_index(),
current_keys,
expanded_length,
name="BarrierInsertLength")
insert_total_length_op = self._barrier.insert_many(
self._get_barrier_total_length_index(),
current_keys,
expanded_total_length,
name="BarrierInsertTotalLength")
insert_context_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("context", name),
current_keys,
value,
name="BarrierInsertContext_%s" % name))
for (name, value) in expanded_context.items())
insert_sequences_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("sequence", name),
current_keys,
value,
name="BarrierInsertSequences_%s" % name))
for (name, value) in reshaped_sequences.items())
# An op that blocks if we reached capacity in number of active examples.
TOKEN_WITH_IGNORED_VALUE = 21051976 # pylint: disable=invalid-name
insert_capacity_token_op = self._capacity_queue.enqueue(
(TOKEN_WITH_IGNORED_VALUE,))
# Insert just the initial state. Specifically force this to run
# the insert sequence op *first* so that the Barrier receives
# an insert with *all* the segments and the segments all get the same index.
with ops.control_dependencies(
[insert_sequence_op, insert_capacity_token_op]):
insert_initial_state_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("state", name),
array_ops.stack([current_keys[0]]),
array_ops.stack([value]),
name="BarrierInitialInsertState_%s" % name))
for (name, value) in self._uninitialized_states.items())
all_inserts = ([
insert_capacity_token_op, insert_sequence_op, insert_sequence_count_op,
insert_next_key_op, insert_length_op, insert_total_length_op
] + list(insert_initial_state_ops.values()) +
list(insert_context_ops.values()) +
list(insert_sequences_ops.values()))
self._prefetch_op = control_flow_ops.group(
*all_inserts, name="StateSaverPrefetchGroup")
def _prepare_barrier_reads(self):
"""Creates ops for reading the barrier, as used by properties like `length`.
"""
# Ops for reading from the barrier. These ops must be run in a
# different thread than the prefetcher op to avoid blocking.
received = self._barrier.take_many(
self._batch_size, self._allow_small_batch, name="BarrierTakeMany")
self._received_indices = received[0]
self._received_keys = received[1]
received_values = received[2]
self._received_sequence = received_values[self._get_barrier_sequence_index(
)]
self._received_sequence_count = received_values[
self._get_barrier_sequence_count_index()]
self._received_next_key = received_values[self._get_barrier_next_key_index(
)]
self._received_length = received_values[self._get_barrier_length_index()]
self._received_total_length = received_values[
self._get_barrier_total_length_index()]
self._received_context = collections.OrderedDict(
(name, received_values[self._get_barrier_index("context", name)])
for name in self._sorted_context.keys())
self._received_sequences = collections.OrderedDict(
(name, received_values[self._get_barrier_index("sequence", name)])
for name in self._sorted_sequences.keys())
self._received_batch_size = array_ops.squeeze(
array_ops.shape(self._received_length))
# Which examples are we done with?
self._sequence_is_done = (
self._received_sequence + 1 >= self._received_sequence_count)
# Compute the number of finished sequences and dequeue as many tokens from
# the capacity queue.
finished_sequences = (math_ops.reduce_sum(
math_ops.cast(self._sequence_is_done, dtypes.int32)))
# TODO(ebrevdo): convert to dequeue_up_to when FIFOQueue supports it.
dequeue_op = self._capacity_queue.dequeue_many(finished_sequences)
# Tie the dequeue_op to the received_state, such that it is definitely
# carried out.
with ops.control_dependencies([dequeue_op]):
self._received_states = collections.OrderedDict(
(name, array_ops.identity(received_values[self._get_barrier_index(
"state", name)])) for name in self._sorted_states.keys())
self._next_batch = NextQueuedSequenceBatch(self)
def batch_sequences_with_states(input_key,
input_sequences,
input_context,
input_length,
initial_states,
num_unroll,
batch_size,
num_threads=3,
capacity=1000,
allow_small_batch=True,
pad=True,
make_keys_unique=False,
make_keys_unique_seed=None,
name=None):
"""Creates batches of segments of sequential input.
This method creates a `SequenceQueueingStateSaver` (SQSS) and adds it to
the queuerunners. It returns a `NextQueuedSequenceBatch`.
It accepts one example at a time identified by a unique `input_key`.
`input_sequence` is a dict with values that are tensors with time as first
dimension. This time dimension must be the same across those tensors of an
example. It can vary across examples. Although it always has to be a multiple
of `num_unroll`. Hence, padding may be necessary and it is turned on by
default by `pad=True`.
`input_length` is a Tensor scalar or an int recording the time dimension prior
to padding. It should be between 0 and the time dimension. One reason we want
to keep track of it is so that we can take it into consideration when
computing the loss. If `pad=True` then `input_length` can be `None` and will
be inferred.
This methods segments `input_sequence` into segments of length `num_unroll`.
It batches input sequences from `batch_size` many examples. These mini-batches
are available through the `sequence` property of the output. Moreover, for
each entry in the batch we can access its original `input_key` in `key` and
its input length in `total_length`. `length` records within this segment how
many non-padded time steps there are.
Static features of an example that do not vary across time can be part of the
`input_context`, a dict with Tensor values. This method copies the context for
each segment and makes it available in the `context` of the output.
This method can maintain and update a state for each example. It accepts some
initial_states as a dict with Tensor values. The first mini-batch an example
is contained has initial_states as entry of the `state`. If save_state is
called then the next segment will have the updated entry of the `state`.
See `NextQueuedSequenceBatch` for a complete list of properties and methods.
Example usage:
```python
batch_size = 32
num_unroll = 20
num_enqueue_threads = 3
lstm_size = 8
cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_size)
key, sequences, context = my_parser(raw_data)
initial_state_values = tf.zeros((state_size,), dtype=tf.float32)
initial_states = {"lstm_state": initial_state_values}
batch = tf.batch_sequences_with_states(
input_key=key,
input_sequences=sequences,
input_context=context,
input_length=tf.shape(sequences["input"])[0],
initial_states=initial_states,
num_unroll=num_unroll,
batch_size=batch_size,
num_threads=num_enqueue_threads,
capacity=batch_size * num_enqueue_threads * 2)
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.Session()
tf.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
Args:
input_key: A string scalar `Tensor`, the **unique** key for the given
input example. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar) must
be fully specified. Consider setting `make_keys_unique` to True when
iterating over the same input multiple times.
**Note**: if `make_keys_unique=False` then `input_key`s must be unique.
input_sequences: A dict mapping string names to `Tensor` values. The values
must all have matching first dimension, called `value_length`. They may
vary from input to input. The remainder of the shape (other than the first
dimension) must be fully specified.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension `num_unrolled`.
Batched and segmented sequences of the current iteration are made
accessible via the `sequences` property.
**Note**: if `pad=False`, then `value_length` must always be a multiple
of `num_unroll`.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input example,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
input_length: None or an int32 scalar `Tensor`, the length of the sequence
prior to padding. If `input_length=None` and `pad=True` then the length
will be inferred and will be equal to `value_length`. If `pad=False` then
`input_length` cannot be `None`: `input_length` must be specified. Its
shape of `input_length` (scalar) must be fully specified. Its value may be
at most `value_length` for any given input (see above for the definition
of `value_length`). Batched and total lengths of the current iteration are
made accessible via the `length` and `total_length` properties.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length k are then split into k / num_unroll many
segments.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_threads: The int number of threads enqueuing input examples into a
queue.
capacity: The max capacity of the queue in number of examples. Needs to be
at least `batch_size`. Defaults to 1000. When iterating over the same
input example multiple times reusing their keys the `capacity` must be
smaller than the number of examples.
allow_small_batch: If true, the queue will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached.
pad: If `True`, `input_sequences` will be padded to multiple of
`num_unroll`. In that case `input_length` may be `None` and is assumed to
be the length of first dimension of values in `input_sequences`
(i.e. `value_length`).
make_keys_unique: Whether to append a random integer to the `input_key` in
an effort to make it unique. The seed can be set via
`make_keys_unique_seed`.
make_keys_unique_seed: If `make_keys_unique=True` this fixes the seed with
which a random postfix is generated.
name: An op name string (optional).
Returns:
A NextQueuedSequenceBatch with segmented and batched inputs and their
states.
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
tensor_list = (list(input_sequences.values()) + list(input_context.values()) +
list(initial_states.values()))
with ops.name_scope(name, "batch_sequences_with_states", tensor_list) as name:
if pad:
length, input_sequences = _padding(input_sequences, num_unroll)
input_length = input_length if input_length is not None else length
elif input_sequences:
# Assert that value_length is a multiple of num_unroll.
checked_input_sequences = {}
for key, value in input_sequences.items():
if (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
value_length = value.dense_shape[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"SparseTensor %s first dimension should be a "
"multiple of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."])])]):
checked_input_sequences[key] = sparse_tensor.SparseTensor(
indices=array_ops.identity(
value.indices, name="multiple_of_checked"),
values=array_ops.identity(
value.values, name="multiple_of_checked"),
dense_shape=array_ops.identity(
value.dense_shape, name="multiple_of_checked"))
else:
if not isinstance(value, ops.Tensor):
try:
value = ops.convert_to_tensor(value)
except TypeError:
raise TypeError(
"Unsupported input_sequences expected Tensor or SparseTensor "
"values, got: %s for key %s" % (str(type(value)), key))
value_length = array_ops.shape(value)[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"Tensor %s first dimension should be a multiple "
"of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."
])
])
]):
checked_input_sequences[key] = array_ops.identity(
value, name="multiple_of_checked")
input_sequences = checked_input_sequences
# Move SparseTensors in context into input_sequences.
_move_sparse_tensor_out_context(input_context, input_sequences, num_unroll)
# Deconstruct SparseTensors in sequence into a dense Tensor before inputting
# to SQSS.
(transformed_input_seq,
sparse_tensor_keys,
tensor_list) = _deconstruct_sparse_tensor_seq(input_sequences)
if make_keys_unique:
input_key = string_ops.string_join([
input_key,
string_ops.as_string(
random_ops.random_uniform(
(), minval=0, maxval=100000000, dtype=dtypes.int32,
seed=make_keys_unique_seed))])
# setup stateful queue reader
stateful_reader = SequenceQueueingStateSaver(
batch_size,
num_unroll,
input_length=input_length,
input_key=input_key,
input_sequences=transformed_input_seq,
input_context=input_context,
initial_states=initial_states,
capacity=capacity,
allow_small_batch=allow_small_batch)
barrier = stateful_reader.barrier
summary.scalar("queue/%s/ready_segment_batches_" % barrier.name,
math_ops.cast(barrier.ready_size(), dtypes.float32))
q_runner = queue_runner.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError))
queue_runner.add_queue_runner(q_runner)
batch = stateful_reader.next_batch
# Reconstruct SparseTensors in sequence.
_reconstruct_sparse_tensor_seq(
batch.sequences,
sparse_tensor_keys,
tensor_list,
batch_size,
num_unroll)
# Move select SparseTensors back to context.
_move_sparse_tensor_in_context(batch.context, batch.sequences)
return batch
def _padding(sequences, num_unroll):
"""For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.
Args:
sequences: dictionary with `Tensor` values.
num_unroll: int specifying to what multiple to pad sequences to.
Returns:
length: Scalar `Tensor` of dimension 0 of all the values in sequences.
padded_sequence: Dictionary of sequences that are padded to a multiple of
`num_unroll`.
Raises:
ValueError: If `num_unroll` not an int or sequences not a dictionary from
string to `Tensor`.
"""
if not isinstance(num_unroll, numbers.Integral):
raise ValueError("Unsupported num_unroll expected int, got: %s" %
str(num_unroll))
if not isinstance(sequences, dict):
raise TypeError("Unsupported sequences expected dict, got: %s" %
str(sequences))
for key, value in sequences.items():
if not isinstance(key, six.string_types):
raise TypeError("Unsupported sequences key expected string, got: %s" %
str(key))
if not sequences:
return 0, {}
sequences_dict = {}
for key, value in sequences.items():
if not (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
sequences_dict[key] = ops.convert_to_tensor(value)
else:
sequences_dict[key] = value
lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()
if isinstance(value, ops.Tensor)]
if lengths:
length = lengths[0]
all_lengths_equal = [
control_flow_ops.Assert(
math_ops.equal(l, length), [string_ops.string_join(
["All sequence lengths must match, but received lengths: ",
string_ops.as_string(lengths)])])
for l in lengths]
length = control_flow_ops.with_dependencies(all_lengths_equal, length)
else: # Only have SparseTensors
sparse_lengths = [value.dense_shape[0] for value in sequences_dict.values()
if isinstance(value, sparse_tensor.SparseTensor)]
length = math_ops.maximum(sparse_lengths)
unroll = array_ops.constant(num_unroll)
padded_length = length + ((unroll - (length % unroll)) % unroll)
padded_sequences = {}
for key, value in sequences_dict.items():
if isinstance(value, ops.Tensor):
# 1. create shape of paddings
# first dimension of value will be increased by num_paddings to
# padded_length
num_paddings = [padded_length - array_ops.shape(value)[0]]
# the shape of the paddings that we concat with the original value will be
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
# tf.shape(value)[tf.rank(value) - 1])]
padding_shape = array_ops.concat(
(num_paddings, array_ops.shape(value)[1:]), 0)
# 2. fill padding shape with dummies
dummy = array_ops.constant(
"" if value.dtype == dtypes.string else 0, dtype=value.dtype)
paddings = array_ops.fill(dims=padding_shape, value=dummy)
# 3. concat values with paddings
padded_sequences[key] = array_ops.concat([value, paddings], 0)
else:
padded_shape = array_ops.concat([[math_ops.to_int64(padded_length)],
value.dense_shape[1:]], 0)
padded_sequences[key] = sparse_tensor.SparseTensor(
indices=value.indices,
values=value.values,
dense_shape=padded_shape)
return length, padded_sequences
_SPARSE_CONTEXT_PREFIX_KEY = "_context_in_seq_"
def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
"""Moves `SparseTensor`s from `input_context` into `input_sequences` as seq.
For `key, value` pairs in `input_context` with `SparseTensor` `value` removes
them from `input_context` and transforms the `value` into a sequence and
then adding `key`, transformed `value` into `input_seuqences`.
The transformation is done by adding a new first dimension of `value_length`
equal to that of the other values in input_sequences` and tiling the `value`
every `num_unroll` steps.
Args:
input_context: dictionary with `Tensor` or `SparseTensor` values. To be
modified to take out `SparseTensor` values.
input_sequences: dictionary with `Tensor` or `SparseTensor` values. To be
modified to add transformed `SparseTensor` values from `input_context`.
num_unroll: int specifying to what multiple to pad sequences to.
"""
value_length = array_ops.constant(1)
if input_sequences:
seq = list(input_sequences.values())[0]
if isinstance(seq, ops.Tensor):
value_length = array_ops.shape(seq)[0]
else:
value_length = seq.dense_shape[0]
value_length = math_ops.cast(value_length, dtype=dtypes.int64)
def _copy_sparse_tensor(sp_tensor):
"""Operation to tile a sparse tensor along a newly added 0 dimension.
Adding a new first dimension of `value_length` and tiling the `sp_tensor`
every `num_unroll` steps.
Args:
sp_tensor: `SparseTensor`.
Returns:
`SparseTensor` sequence with `sp_tensor` tiled.
"""
n = value_length // num_unroll
n = math_ops.cast(n, dtype=dtypes.int32)
values = array_ops.tile(sp_tensor.values, array_ops.expand_dims(n, 0))
shape = array_ops.concat(
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
# Construct new indices by multiplying old ones and prepending [0, n).
# First multiply indices n times along a newly created 0-dimension.
multiplied_indices = array_ops.tile(
array_ops.expand_dims(sp_tensor.indices, 0),
array_ops.stack([n, 1, 1]))
# Construct indicator for [0, n).
# [ [ [0] [0] ... [0] ]
# [ [num_unroll] [num_unroll] ... [num_unroll] ]
# ...
# [ [num_unroll*(n-1)] [num_unroll*(n-1)] ... [num_unroll*(n-1)] ] ]
# of shape [n, shape(sp_tensor.indices)[0], 1]
# Get current dimensions of indices.
dim0 = array_ops.shape(sp_tensor.indices)[0]
dim1 = array_ops.shape(sp_tensor.indices)[1]
ind = math_ops.range(start=0, limit=value_length, delta=num_unroll)
# ind.set_shape([n])
ind = array_ops.expand_dims(ind, 1)
ind = array_ops.expand_dims(ind, 2)
ind = array_ops.tile(ind, [1, dim0, 1])
# Concatenate both and reshape.
indices = array_ops.concat([ind, multiplied_indices], 2)
indices = array_ops.reshape(indices, [dim0 * n, dim1 + 1])
return sparse_tensor.SparseTensor(indices=indices,
values=values,
dense_shape=shape)
sparse_tensor_keys = [
k for k in sorted(input_context.keys())
if (isinstance(input_context[k], sparse_tensor.SparseTensor) or
isinstance(input_context[k], sparse_tensor.SparseTensorValue))]
for key in sparse_tensor_keys:
input_sequences[_SPARSE_CONTEXT_PREFIX_KEY + key] = _copy_sparse_tensor(
input_context[key])
del input_context[key]
def _move_sparse_tensor_in_context(context, sequences):
sparse_tensor_keys = [
k for k in sorted(sequences) if k.startswith(_SPARSE_CONTEXT_PREFIX_KEY)]
for key in sparse_tensor_keys:
new_key = key[len(_SPARSE_CONTEXT_PREFIX_KEY):]
sp_tensor = sequences[key]
# Take out time dimension.
sp_tensor = sparse_tensor.SparseTensor(
sp_tensor.indices, # with only 0s at column 1 representing time.
sp_tensor.values,
array_ops.concat(
[[sp_tensor.dense_shape[0]], # batch
[1], # time
sp_tensor.dense_shape[2:]], # SparseTensor shape prior to batching
0))
new_shape = array_ops.concat(
[[sp_tensor.dense_shape[0]], sp_tensor.dense_shape[2:]], 0)
context[new_key] = sparse_ops.sparse_reshape(sp_tensor, new_shape)
del sequences[key]
def _deconstruct_sparse_tensor_seq(input_sequence, shared_name=None):
"""Converts `SparseTensor` values into `Tensors` of IDs and meta data.
Given a dict of keys -> `Tensor` or `SparseTensor` transforms the
`SparseTensor` values into `Tensor` values of IDs by calling `_store_sparse`.
The IDs are pointers into and underlying `SparseTensorsMap` that is being
constructed. Additional meta data is returned in order to be able to
reconstruct `SparseTensor` values after batching and segmenting the IDs
`Tensor`.
Args:
input_sequence: dictionary with `Tensor` or `SparseTensor` values.
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
Returns:
A tuple `(sequence, sparse_tensor_keys, tensor_list)` where `sequence` is
dictionary with the same keys as `input_sequence` but only `Tensor` values,
`sparse_tensor_keys` is a list of the keys of the `SparseTensor` values that
were converted, and `tensor_list` is a list of the same length with
`Tensor` objects.
"""
sparse_tensor_keys = [
k for k in sorted(input_sequence.keys())
if (isinstance(input_sequence[k], sparse_tensor.SparseTensor) or
isinstance(input_sequence[k], sparse_tensor.SparseTensorValue))]
if not sparse_tensor_keys:
return input_sequence, None, sparse_tensor_keys
sparse_tensor_list = [input_sequence[k] for k in sparse_tensor_keys]
tensor_list = [_store_sparse(sp_tensor, shared_name=shared_name)
for sp_tensor in sparse_tensor_list]
transformed_input_seq = dict(input_sequence)
tensor_op_list = []
for i, k in enumerate(sparse_tensor_keys):
transformed_input_seq[k] = tensor_list[i]
tensor_op_list += [tensor_list[i].op]
return transformed_input_seq, sparse_tensor_keys, tensor_op_list
def _reconstruct_sparse_tensor_seq(sequence,
sparse_tensor_keys,
tensor_op_list,
batch_size,
num_unroll):
"""Inverse of _deconstruct_sparse_tensor_seq.
Given a dict of keys -> `Tensor` reconstructs `SparseTensor` values for keys
in `sparse_tensor_keys`. Their `Tensor` values are assumed to be IDs into the
underlying `SparseTensorsMap`. The `dense_shape` of the `SparseTensor`s is
`[batch_size, num_unroll, d_0, d_1, ..., d_n]` when the original
`SparseTensor` that got deconstructed with `_deconstruct_sparse_tensor_seq`
has a `dense_shape` of `[None, d_0, d_1, ..., d_n]`.
Args:
sequence: dictionary with only `Tensor` values that is being updated.
sparse_tensor_keys: list of the keys present in `sequence` identifying
`SparseTensor` values that should be reconstructed.
tensor_op_list: list of the same length as `sparse_tensor_keys` with
`Tensor` objects.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be.
num_unroll: Python integer, how many time steps were unrolled at a time.
"""
def _flatten_tensor(tensor):
"""Flattens `Tensor` of `shape [batch_size, num_unroll]` into 1D `Tensor`.
The main use of this function is to work around the limitation of
`_restore_sparse` to only accept 1D handles.
Args:
tensor: 2D `Tensor` of `shape [batch_size, num_unroll]`
Returns:
1D `Tensor`.
"""
return array_ops.reshape(tensor, [-1])
def _unflatten_sparse_tensor(sp_tensor):
"""Recreates `[batch_size, num_unroll]` dimensions in the `SparseTensor`.
Counter-part of `_flatten_tensor` which is called on the input of
`_restore_sparse` while this method is called on the output of it.
Together they work around the limitation of `_restore_sparse` to only
accept 1D handles.
The `indices` in `sp_tensor` is a 2D `Tensor` of `shape [N, ndims]`, where
`N` is the number of `values` and `ndims` is the number of dimension in its
dense counterpart. Among `ndims` the first entry corresponds to the batch
dimension `[0, num_unroll * batch_size)` from which we need to recreate the
2 dimensions `batch_size` and `num_unroll`.
The reason this reconstruction works is because the output of
`_restore_sparse` despite being a `SparseTensor` is actually dense w.r.t.
that first entry.
Args:
sp_tensor: A SparseTensor.
Returns:
A SparseTensor with a +1 higher rank than the input.
"""
idx_batch = math_ops.to_int64(
math_ops.floor(sp_tensor.indices[:, 0] / num_unroll))
idx_time = math_ops.mod(sp_tensor.indices[:, 0], num_unroll)
indices = array_ops.concat(
[
array_ops.expand_dims(idx_batch, 1),
array_ops.expand_dims(idx_time, 1), sp_tensor.indices[:, 1:]
],
axis=1)
dense_shape = array_ops.concat(
[[math_ops.cast(batch_size, dtype=dtypes.int64)],
[math_ops.cast(num_unroll, dtype=dtypes.int64)],
sp_tensor.dense_shape[1:]], axis=0)
return sparse_tensor.SparseTensor(
indices=indices,
values=sp_tensor.values,
dense_shape=dense_shape)
if not sparse_tensor_keys:
return
tensor_list = [sequence[k] for k in sparse_tensor_keys]
sp_tensors = [
_restore_sparse(sparse_map_op=i,
# Flatten the 2D Tensor [batch_size, num_unroll] of
# handles to a 1D Tensor.
# Reconstruct the dimensions later.
# TODO(b/34247140): Remove this workaround.
sparse_handles=_flatten_tensor(s), rank=None)
for i, s in zip(tensor_op_list, tensor_list)]
num_unroll = ops.convert_to_tensor(num_unroll, dtype=dtypes.int64,
name="num_unroll_int64")
# Recreate the [batch_size, num_unroll] dimensions in the SparseTensors.
# The dense_shape will have a +1 higher rank.
# TODO(b/34247140): Remove this workaround.
sp_tensors_higher_dim = [_unflatten_sparse_tensor(s) for s in sp_tensors]
# Set values to SparseTensors for sparse_tensor_keys.
for i, key in enumerate(sparse_tensor_keys):
sequence[key] = sp_tensors_higher_dim[i]
return
| apache-2.0 | 3,781,946,012,043,635,700 | 41.012267 | 80 | 0.648826 | false |
thinkopensolutions/odoo-brazil-banking | l10n_br_account_banking_payment_bradesco_tributos/wizard/payment_order_create.py | 1 | 5580 | # -*- coding: utf-8 -*-
# ###########################################################################
#
# Author: Luis Felipe Mileo
# Luiz Felipe do Divino
# Copyright 2015 KMEE - www.kmee.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
from datetime import datetime, timedelta
from openerp.addons import decimal_precision as dp
class PaymentOrderCreate(models.TransientModel):
_inherit = 'payment.order.create'
@api.multi
def extend_payment_order_domain(self, payment_order, domain):
super(PaymentOrderCreate, self).extend_payment_order_domain(
payment_order, domain)
if payment_order.mode.type.code == 'gnre':
if payment_order.mode.payment_order_type == 'tributos':
domain += [
('debit', '>', 0),
('account_id.type', '=', 'receivable'),
]
# for i in domain:
# del i
# tax_code_ids = self.env[
# 'account.tax.code'].search([('domain', '=', 'icms')])
# domain.append(('tax_code_id', 'in', tax_code_ids.ids))
return True
@api.multi
def _prepare_payment_line(self, payment, line):
res = super(PaymentOrderCreate, self)._prepare_payment_line(
payment, line
)
if payment.mode.type.code == 'gnre':
value = getattr(line.invoice, payment.mode.gnre_value_field.name)
res['amount_currency'] = value
res['date'] = line.invoice.date_invoice
res['ml_maturity_date'] = (
datetime.strptime(
line.invoice.date_invoice, "%Y-%m-%d") +
timedelta(days=line.invoice.gnre_due_days))
return res
# @api.multi
# def create_payment(self):
# super(PaymentOrderCreate, self).create_payment()
#
# parser_gnre = bradesco_tax.BradescoGnre()
#
# arq = open('/tmp/testeGNRE', 'w')
#
# texto = ''
#
# for line in self.entries:
# if line.partner_id.is_company:
# tipo_inscricao = '2'
# else:
# tipo_inscricao = '1'
#
# if str(line.credit)[-2] == '.':
# valor_tributo = str(line.credit).replace('.', '') + '0'
#
# endereco01 = str(line.partner_id.street)
# # endereco02 = str(line.partner_id.street2.replace('º', ''))
# endereco_cliente = endereco01
# vals = {
# 'identificador_tributo': 'G',
# 'nome_cliente': str(line.partner_id.name),
# 'endereco_cliente': endereco_cliente,
# 'cep_cliente': str(line.partner_id.zip.replace('-', '')),
# 'uf_cliente': str(line.partner_id.state_id.code),
# 'autoriza_pagamento': 'S',
# 'tipo_inscricao': tipo_inscricao,
# 'uf_favorecida': str(line.partner_id.state_id.code),
# 'telefone_cliente': str(line.partner_id.phone
# .replace('(', '').replace(')', '')
# .replace('-', '').replace(' ', '')),
# 'numero_inscricao': str(line.partner_id.cnpj_cpf
# .replace('.', '').replace('/', '')
# .replace('-', '')),
# 'valor_do_principal': valor_tributo,
# 'data_pagamento_tributo': line.date.replace('-', ''),
# 'data_vencimento_tributo': line.date.replace('-', ''),
# 'num_doc_origem': str(line.invoice.display_name),
# }
#
# linha_arquivo = parser_gnre.remessa(**vals)
# texto += linha_arquivo
# texto += '\n'
# print linha_arquivo
#
# arq.write(texto)
# arq.close()
#
# return True
#
#
# class AccountInvoiceSeparetedTaxes(models.Model):
# _inherit = 'account.invoice'
#
# icms_st_value_total = fields.Float(
# 'Total de Subsituição Tributária antecipada em nome do cliente',
# digits=dp.get_precision('Account'), default=0.00)
# gerar_gnre = fields.Boolean("Gerar GNRE", related='partner_id.gerar_gnre',
# store=True)
# @api.multi
# @api.depends('invoice_line', 'tax_line.amount')
# def _compute_amount(self):
# super(AccountInvoiceSeparetedTaxes, self)._compute_amount()
#
# self.amount_total = self.amount_untaxed + \
# self.amount_costs + self.amount_insurance + self.amount_freight
#
# self.write({'icms_st_value_total': self.icms_st_value})
#
# class AccountInvoiceProductIcmsst(models.Model):
# _inherit = 'account.invoice.line'
| agpl-3.0 | -3,927,212,068,343,900,700 | 38.546099 | 80 | 0.526901 | false |
girving/tensorflow | tensorflow/python/kernel_tests/scalar_test.py | 27 | 4822 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for scalar strictness and scalar leniency."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class ScalarTest(test.TestCase):
def check(self, op, args, error, correct=None):
# Within Google, the switch to scalar strict occurred at version 6.
lenient = []
strict = [5, 6]
# Use placeholders to bypass shape inference, since only the C++
# GraphDef level is ever scalar lenient.
def placeholders(args, feed):
if isinstance(args, tuple):
return [placeholders(x, feed) for x in args]
else:
x = ops.convert_to_tensor(args).eval()
fake = array_ops.placeholder(np.asarray(x).dtype)
feed[fake] = x
return fake
# Test various GraphDef versions
for version in strict + lenient:
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, version)
with self.session(graph=g) as sess:
feed = {}
xs = placeholders(args, feed)
x = op(*xs)
if version in strict:
with self.assertRaisesOpError(error):
sess.run(x, feed_dict=feed)
else:
r = sess.run(x, feed_dict=feed)
if correct is not None:
self.assertAllEqual(r, correct)
def testConcat(self):
self.check(array_ops.concat, (([2], [3], [7]), [0]),
'axis tensor should be a scalar integer', [2, 3, 7])
for data in (2, 3, 7), (2, [3], 7), (2, 3, [7]):
self.check(array_ops.concat, (data, 0),
r'Expected \w+ dimensions in the range \[0, 0\)', [2, 3, 7])
for data in ([2], 3, 7), ([2], [3], 7):
self.check(array_ops.concat, (data, 0),
r'Ranks of all input tensors should match', [2, 3, 7])
def testFill(self):
self.check(array_ops.fill, (2, 3), 'dims must be a vector', [3, 3])
self.check(array_ops.fill, ([2], [3]), 'value must be a scalar', [3, 3])
def testPad(self):
self.check(array_ops.pad, (7, [[1, 2]]),
'The first dimension of paddings must be the rank of inputs',
[0, 7, 0, 0])
def testRandom(self):
self.check(random_ops.random_uniform, (3,), 'shape must be a vector')
def testReshape(self):
self.check(array_ops.reshape, (7, 1), 'sizes input must be 1-D', [7])
def testShardedFilename(self):
self.check(gen_io_ops.sharded_filename, ('foo', 4, [100]),
'must be a scalar', b'foo-00004-of-00100')
def testShardedFilespec(self):
self.check(gen_io_ops.sharded_filespec, ('foo', [100]), 'must be a scalar',
b'foo-?????-of-00100')
def testUnsortedSegmentSum(self):
self.check(math_ops.unsorted_segment_sum, (7, 1, [4]),
'num_segments should be a scalar', [0, 7, 0, 0])
def testRange(self):
self.check(math_ops.range, ([0], 3, 2), 'start must be a scalar', [0, 2])
self.check(math_ops.range, (0, [3], 2), 'limit must be a scalar', [0, 2])
self.check(math_ops.range, (0, 3, [2]), 'delta must be a scalar', [0, 2])
def testSlice(self):
data = np.arange(10)
error = 'Expected begin and size arguments to be 1-D tensors'
self.check(array_ops.slice, (data, 2, 3), error, [2, 3, 4])
self.check(array_ops.slice, (data, [2], 3), error, [2, 3, 4])
self.check(array_ops.slice, (data, 2, [3]), error, [2, 3, 4])
def testSparseToDense(self):
self.check(sparse_ops.sparse_to_dense, (1, 4, 7),
'output_shape should be a vector', [0, 7, 0, 0])
def testTile(self):
self.check(array_ops.tile, ([7], 2), 'Expected multiples to be 1-D', [7, 7])
if __name__ == '__main__':
test.main()
| apache-2.0 | -3,881,259,461,830,751,700 | 37.269841 | 80 | 0.619453 | false |
eharney/cinder | cinder/tests/unit/brick/test_brick_lvm.py | 1 | 19008 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_concurrency import processutils
from cinder.brick.local_dev import lvm as brick
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
@ddt.ddt
class BrickLvmTestCase(test.TestCase):
def setUp(self):
if not hasattr(self, 'configuration'):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.lvm_suppress_fd_warnings = False
self.configuration.volume_group_name = 'fake-vg'
super(BrickLvmTestCase, self).setUp()
self.mock_object(processutils, 'execute', self.fake_execute)
self.vg = brick.LVM(
self.configuration.volume_group_name,
'sudo',
False, None,
'default',
self.fake_execute,
suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings)
def failed_fake_execute(obj, *cmd, **kwargs):
return ("\n", "fake-error")
def fake_pretend_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.03.00 (2012-03-06)\n", "")
def fake_old_lvm_version(obj, *cmd, **kwargs):
# Does not support thin prov or snap activation
return (" LVM version: 2.02.65(2) (2012-03-06)\n", "")
def fake_customised_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "")
def fake_execute(obj, *cmd, **kwargs): # noqa
if obj.configuration.lvm_suppress_fd_warnings:
_lvm_prefix = 'env, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=1, '
else:
_lvm_prefix = 'env, LC_ALL=C, '
cmd_string = ', '.join(cmd)
data = "\n"
if (_lvm_prefix + 'vgs, --noheadings, --unit=g, -o, name' ==
cmd_string):
data = " fake-vg\n"
data += " some-other-vg\n"
elif (_lvm_prefix + 'vgs, --noheadings, -o, name, fake-vg' ==
cmd_string):
data = " fake-vg\n"
elif _lvm_prefix + 'vgs, --version' in cmd_string:
data = " LVM version: 2.02.103(2) (2012-03-06)\n"
elif(_lvm_prefix + 'vgs, --noheadings, -o, uuid, fake-vg' in
cmd_string):
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif(_lvm_prefix + 'vgs, --noheadings, --unit=g, '
'-o, name,size,free,lv_count,uuid, '
'--separator, :, --nosuffix' in cmd_string):
data = (" test-prov-cap-vg-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-unit' in cmd_string:
return (data, "")
data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-no-unit' in cmd_string:
return (data, "")
data = " fake-vg:10.00:10.00:0:"\
"kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
if 'fake-vg' in cmd_string:
return (data, "")
data += " fake-vg-2:10.00:10.00:0:"\
"lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n"
data += " fake-vg-3:10.00:10.00:0:"\
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n"
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-nothere' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="One or more specified logical volume(s) not found.")
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-newerror' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="Failed to find logical volume \"fake-vg/lv-newerror\"")
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size' in cmd_string):
if 'fake-unknown' in cmd_string:
raise processutils.ProcessExecutionError(
stderr="One or more volume(s) not found."
)
if 'test-prov-cap-vg-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-unit 9.50g\n"
data += " fake-vg fake-volume-1 1.00g\n"
data += " fake-vg fake-volume-2 2.00g\n"
elif 'test-prov-cap-vg-no-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-no-unit 9.50\n"
data += " fake-vg fake-volume-1 1.00\n"
data += " fake-vg fake-volume-2 2.00\n"
elif 'test-found-lv-name' in cmd_string:
data = " fake-vg test-found-lv-name 9.50\n"
else:
data = " fake-vg fake-1 1.00g\n"
data += " fake-vg fake-2 1.00g\n"
elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data = ' wi-a-'
elif 'snapshot' in cmd_string:
data = ' swi-a-s--'
elif 'open' in cmd_string:
data = ' -wi-ao---'
else:
data = ' owi-a-'
elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Origin' in
cmd_string):
if 'snapshot' in cmd_string:
data = ' fake-volume-1'
else:
data = ' '
elif _lvm_prefix + 'pvs, --noheadings' in cmd_string:
data = " fake-vg|/dev/sda|10.00|1.00\n"
data += " fake-vg|/dev/sdb|10.00|1.00\n"
data += " fake-vg|/dev/sdc|10.00|8.99\n"
data += " fake-vg-2|/dev/sdd|10.00|9.99\n"
if '--ignoreskippedcluster' not in cmd_string:
raise processutils.ProcessExecutionError(
stderr="Skipping clustered volume group",
stdout=data,
exit_code=5
)
elif _lvm_prefix + 'lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
if 'test-prov-cap-pool' in cmd_string:
data = " 9.5:20\n"
else:
data = " 9:12\n"
elif 'lvcreate, -T, -L, ' in cmd_string:
pass
elif 'lvcreate, -T, -V, ' in cmd_string:
pass
elif 'lvcreate, -n, ' in cmd_string:
pass
elif 'lvcreate, --name, ' in cmd_string:
pass
elif 'lvextend, -L, ' in cmd_string:
pass
else:
raise AssertionError('unexpected command called: %s' % cmd_string)
return (data, "")
def test_create_lv_snapshot(self):
self.assertIsNone(self.vg.create_lv_snapshot('snapshot-1', 'fake-1'))
with mock.patch.object(self.vg, 'get_volume', return_value=None):
try:
self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent')
except exception.VolumeDeviceNotFound as e:
self.assertEqual('fake-non-existent', e.kwargs['device'])
else:
self.fail("Exception not raised")
def test_vg_exists(self):
self.assertTrue(self.vg._vg_exists())
def test_get_all_volumes(self):
out = self.vg.get_volumes()
self.assertEqual('fake-1', out[0]['name'])
self.assertEqual('1.00g', out[0]['size'])
self.assertEqual('fake-vg', out[0]['vg'])
def test_get_volume(self):
self.assertEqual('fake-1', self.vg.get_volume('fake-1')['name'])
def test_get_volume_none(self):
self.assertIsNone(self.vg.get_volume('fake-unknown'))
def test_get_lv_info_notfound(self):
# lv-nothere will raise lvm < 2.102.112 exception
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-nothere')
)
# lv-newerror will raise lvm > 2.102.112 exception
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-newerror')
)
def test_get_lv_info_found(self):
lv_info = [{'size': '9.50', 'name': 'test-found-lv-name',
'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg',
lv_name='test-found-lv-name')
)
def test_get_lv_info_no_lv_name(self):
lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'},
{'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg')
)
def test_get_all_physical_volumes(self):
# Filtered VG version
pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg')
self.assertEqual(3, len(pvs))
# Non-Filtered, all VG's
pvs = self.vg.get_all_physical_volumes('sudo')
self.assertEqual(4, len(pvs))
def test_get_volume_groups(self):
self.assertEqual(3, len(self.vg.get_all_volume_groups('sudo')))
self.assertEqual(1,
len(self.vg.get_all_volume_groups('sudo', 'fake-vg')))
def test_thin_support(self):
# lvm.supports_thin() is a static method and doesn't
# use the self._executor fake we pass in on init
# so we need to stub processutils.execute appropriately
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
with mock.patch.object(processutils, 'execute',
self.fake_pretend_lvm_version):
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
with mock.patch.object(processutils, 'execute',
self.fake_old_lvm_version):
self.assertFalse(self.vg.supports_thin_provisioning('sudo'))
with mock.patch.object(processutils, 'execute',
self.fake_customised_lvm_version):
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
def test_snapshot_lv_activate_support(self):
self.vg._supports_snapshot_lv_activation = None
self.assertTrue(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
with mock.patch.object(processutils, 'execute',
self.fake_old_lvm_version):
self.assertFalse(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
def test_lvchange_ignskipact_support_yes(self):
"""Tests if lvchange -K is available via a lvm2 version check."""
self.vg._supports_lvchange_ignoreskipactivation = None
with mock.patch.object(processutils, 'execute',
self.fake_pretend_lvm_version):
self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
with mock.patch.object(processutils, 'execute',
self.fake_old_lvm_version):
self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
def test_pvs_ignoreskippedcluster_support(self):
"""Tests if lvm support ignoreskippedcluster option."""
brick.LVM._supports_pvs_ignoreskippedcluster = None
with mock.patch.object(processutils, 'execute',
self.fake_pretend_lvm_version):
self.assertTrue(brick.LVM.supports_pvs_ignoreskippedcluster(
'sudo'))
brick.LVM._supports_pvs_ignoreskippedcluster = None
with mock.patch.object(processutils, 'execute',
self.fake_old_lvm_version):
self.assertFalse(brick.LVM.supports_pvs_ignoreskippedcluster(
'sudo'))
brick.LVM._supports_pvs_ignoreskippedcluster = None
def test_thin_pool_creation(self):
# The size of fake-vg volume group is 10g, so the calculated thin
# pool size should be 9.5g (95% of 10g).
self.assertEqual("9.5g", self.vg.create_thin_pool())
# Passing a size parameter should result in a thin pool of that exact
# size.
for size in ("1g", "1.2g", "1.75g"):
self.assertEqual(size, self.vg.create_thin_pool(size_str=size))
def test_thin_pool_provisioned_capacity(self):
self.vg.vg_thin_pool = "test-prov-cap-pool-unit"
self.vg.vg_name = 'test-prov-cap-vg-unit'
self.assertEqual(
"9.5g",
self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual("9.50", self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit"
self.vg.vg_name = 'test-prov-cap-vg-no-unit'
self.assertEqual(
"9.5g",
self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual("9.50", self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
def test_thin_pool_free_space(self):
# The size of fake-vg-pool is 9g and the allocated data sums up to
# 12% so the calculated free space should be 7.92
self.assertEqual(float("7.92"),
self.vg._get_thin_pool_free_space("fake-vg",
"fake-vg-pool"))
def test_volume_create_after_thin_creation(self):
"""Test self.vg.vg_thin_pool is set to pool_name
See bug #1220286 for more info.
"""
vg_name = "vg-name"
pool_name = vg_name + "-pool"
self.vg.create_thin_pool(pool_name, "1G")
with mock.patch.object(self.vg, '_execute'):
self.vg.create_volume("test", "1G", lv_type='thin')
if self.configuration.lvm_suppress_fd_warnings is False:
self.vg._execute.assert_called_once_with(
'env', 'LC_ALL=C', 'lvcreate', '-T', '-V',
'1G', '-n', 'test', 'fake-vg/vg-name-pool',
root_helper='sudo', run_as_root=True)
else:
self.vg._execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LVM_SUPPRESS_FD_WARNINGS=1',
'lvcreate', '-T', '-V', '1G', '-n', 'test',
'fake-vg/vg-name-pool', root_helper='sudo',
run_as_root=True)
self.assertEqual(pool_name, self.vg.vg_thin_pool)
def test_volume_create_when_executor_failed(self):
def fail(*args, **kwargs):
raise processutils.ProcessExecutionError()
self.vg._execute = fail
with mock.patch.object(self.vg, 'get_all_volume_groups') as m_gavg:
self.assertRaises(
processutils.ProcessExecutionError,
self.vg.create_volume, "test", "1G"
)
m_gavg.assert_called()
def test_lv_has_snapshot(self):
self.assertTrue(self.vg.lv_has_snapshot('fake-vg'))
self.assertFalse(self.vg.lv_has_snapshot('test-volumes'))
def test_lv_is_snapshot(self):
self.assertTrue(self.vg.lv_is_snapshot('fake-snapshot'))
self.assertFalse(self.vg.lv_is_snapshot('test-volumes'))
def test_lv_is_open(self):
self.assertTrue(self.vg.lv_is_open('fake-open'))
self.assertFalse(self.vg.lv_is_open('fake-snapshot'))
def test_lv_get_origin(self):
self.assertEqual('fake-volume-1',
self.vg.lv_get_origin('fake-snapshot'))
self.assertFalse(None, self.vg.lv_get_origin('test-volumes'))
def test_activate_lv(self):
with mock.patch.object(self.vg, '_execute'):
self.vg._supports_lvchange_ignoreskipactivation = True
self.vg._execute('lvchange', '-a', 'y', '--yes', '-K',
'fake-vg/my-lv',
root_helper='sudo', run_as_root=True)
self.vg.activate_lv('my-lv')
def test_get_mirrored_available_capacity(self):
self.assertEqual(2.0, self.vg.vg_mirror_free_space(1))
@ddt.data(True, False)
def test_lv_extend(self, has_snapshot):
with mock.patch.object(self.vg, '_execute'):
with mock.patch.object(self.vg, 'lv_has_snapshot'):
self.vg.deactivate_lv = mock.MagicMock()
self.vg.activate_lv = mock.MagicMock()
self.vg.lv_has_snapshot.return_value = has_snapshot
self.vg.extend_volume("test", "2G")
self.vg.lv_has_snapshot.assert_called_once_with("test")
if has_snapshot:
self.vg.activate_lv.assert_called_once_with("test")
self.vg.deactivate_lv.assert_called_once_with("test")
else:
self.vg.activate_lv.assert_not_called()
self.vg.deactivate_lv.assert_not_called()
def test_lv_deactivate(self):
with mock.patch.object(self.vg, '_execute'):
is_active_mock = mock.Mock()
is_active_mock.return_value = False
self.vg._lv_is_active = is_active_mock
self.vg.create_volume('test', '1G')
self.vg.deactivate_lv('test')
@mock.patch('time.sleep')
def test_lv_deactivate_timeout(self, _mock_sleep):
with mock.patch.object(self.vg, '_execute'):
is_active_mock = mock.Mock()
is_active_mock.return_value = True
self.vg._lv_is_active = is_active_mock
self.vg.create_volume('test', '1G')
self.assertRaises(exception.VolumeNotDeactivated,
self.vg.deactivate_lv, 'test')
class BrickLvmTestCaseIgnoreFDWarnings(BrickLvmTestCase):
def setUp(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.lvm_suppress_fd_warnings = True
super(BrickLvmTestCaseIgnoreFDWarnings, self).setUp()
| apache-2.0 | 6,732,046,989,841,363,000 | 41.053097 | 79 | 0.557607 | false |
pombredanne/cubes | cubes/workspace.py | 5 | 24385 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
from .metadata import read_model_metadata
from .auth import NotAuthorized
from .common import read_json_file
from .errors import ConfigurationError, ArgumentError, CubesError
from .logging import get_logger
from .calendar import Calendar
from .namespace import Namespace
from .providers import find_dimension
from .localization import LocalizationContext
import os.path
from .compat import ConfigParser
from copy import copy
from collections import OrderedDict, defaultdict
from . import ext
from . import compat
__all__ = [
"Workspace",
]
SLICER_INFO_KEYS = (
"name",
"label",
"description", # Workspace model description
"copyright", # Copyright for the data
"license", # Data license
"maintainer", # Name (and maybe contact) of data maintainer
"contributors", # List of contributors
"visualizers", # List of dicts with url and label of server's visualizers
"keywords", # List of keywords describing server's cubes
"related" # List of dicts with related servers
)
def interpret_config_value(value):
if value is None:
return value
if isinstance(value, compat.string_type):
if value.lower() in ('yes', 'true', 'on'):
return True
elif value.lower() in ('no', 'false', 'off'):
return False
return value
def config_items_to_dict(items):
return dict([ (k, interpret_config_value(v)) for (k, v) in items ])
class Workspace(object):
def __init__(self, config=None, stores=None, load_base_model=True,
**_options):
"""Creates a workspace. `config` should be a `ConfigParser` or a
path to a config file. `stores` should be a dictionary of store
configurations, a `ConfigParser` or a path to a ``stores.ini`` file.
Properties:
* `stores` – dictionary of stores
* `store_infos` – dictionary of store configurations
* `namespace` – default namespace
* `logger` – workspace logegr
* `rot_dir` – root directory where all relative paths are looked for
* `models_dir` – directory with models (if relative, then relative to
the root directory)
* `info` – info dictionary from the info file or info section
* `calendar` – calendar object providing date and time functions
* `ns_languages` – dictionary where keys are namespaces and values
are language to translation path mappings.
"""
# FIXME: **_options is temporary solution/workaround before we get
# better configuration. Used internally. Don't use!
if isinstance(config, compat.string_type):
cp = ConfigParser()
try:
cp.read(config)
except Exception as e:
raise ConfigurationError("Unable to load config %s. "
"Reason: %s" % (config, str(e)))
config = cp
elif not config:
# Read ./slicer.ini
config = ConfigParser()
self.store_infos = {}
self.stores = {}
# Logging
# =======
#Log to file or console
if config.has_option("workspace", "log"):
self.logger = get_logger(path=config.get("workspace", "log"))
else:
self.logger = get_logger()
#Change to log level if necessary
if config.has_option("workspace", "log_level"):
level = config.get("workspace", "log_level").upper()
self.logger.setLevel(level)
# Set the default models path
if config.has_option("workspace", "root_directory"):
self.root_dir = config.get("workspace", "root_directory")
elif "cubes_root" in _options:
# FIXME: this is quick workaround, see note at the beginning of
# this method
self.root_dir = _options["cubes_root"]
else:
self.root_dir = ""
if config.has_option("workspace", "models_directory"):
self.models_dir = config.get("workspace", "models_directory")
elif config.has_option("workspace", "models_path"):
self.models_dir = config.get("workspace", "models_path")
else:
self.models_dir = ""
if self.root_dir and not os.path.isabs(self.models_dir):
self.models_dir = os.path.join(self.root_dir, self.models_dir)
if self.models_dir:
self.logger.debug("Models root: %s" % self.models_dir)
else:
self.logger.debug("Models root set to current directory")
# Namespaces and Model Objects
# ============================
self.namespace = Namespace()
# Cache of created global objects
self._cubes = {}
# Note: providers are responsible for their own caching
# Info
# ====
self.info = OrderedDict()
if config.has_option("workspace", "info_file"):
path = config.get("workspace", "info_file")
if self.root_dir and not os.path.isabs(path):
path = os.path.join(self.root_dir, path)
info = read_json_file(path, "Slicer info")
for key in SLICER_INFO_KEYS:
self.info[key] = info.get(key)
elif config.has_section("info"):
info = dict(config.items("info"))
if "visualizer" in info:
info["visualizers"] = [ {"label": info.get("label",
info.get("name", "Default")),
"url": info["visualizer"]} ]
for key in SLICER_INFO_KEYS:
self.info[key] = info.get(key)
# Register stores from external stores.ini file or a dictionary
if not stores and config.has_option("workspace", "stores_file"):
stores = config.get("workspace", "stores_file")
# Prepend the root directory if stores is relative
if self.root_dir and not os.path.isabs(stores):
stores = os.path.join(self.root_dir, stores)
if isinstance(stores, compat.string_type):
store_config = ConfigParser()
try:
store_config.read(stores)
except Exception as e:
raise ConfigurationError("Unable to read stores from %s. "
"Reason: %s" % (stores, str(e) ))
for store in store_config.sections():
self._register_store_dict(store,
dict(store_config.items(store)))
elif isinstance(stores, dict):
for name, store in stores.items():
self._register_store_dict(name, store)
elif stores is not None:
raise ConfigurationError("Unknown stores description object: %s" %
(type(stores)))
# Calendar
# ========
if config.has_option("workspace", "timezone"):
timezone = config.get("workspace", "timezone")
else:
timezone = None
if config.has_option("workspace", "first_weekday"):
first_weekday = config.get("workspace", "first_weekday")
else:
first_weekday = 0
self.logger.debug("Workspace calendar timezone: %s first week day: %s"
% (timezone, first_weekday))
self.calendar = Calendar(timezone=timezone,
first_weekday=first_weekday)
# Register Stores
# ===============
#
# * Default store is [store] in main config file
# * Stores are also loaded from main config file from sections with
# name [store_*] (not documented feature)
default = None
if config.has_section("store"):
default = dict(config.items("store"))
if default:
self._register_store_dict("default", default)
# Register [store_*] from main config (not documented)
for section in config.sections():
if section != "store" and section.startswith("store"):
name = section[6:]
self._register_store_dict(name, dict(config.items(section)))
if config.has_section("browser"):
self.browser_options = dict(config.items("browser"))
else:
self.browser_options = {}
if config.has_section("main"):
self.options = dict(config.items("main"))
else:
self.options = {}
# Register Languages
# ==================
#
# Register [language *]
self.ns_languages = defaultdict(dict)
for section in config.sections():
if section.startswith("locale"):
lang = section[9:]
# namespace -> path
for nsname, path in config.items(section):
if nsname == "defalt":
ns = self.namespace
else:
(ns, _) = self.namespace.namespace(nsname)
ns.add_translation(lang, path)
# Authorizer
# ==========
if config.has_option("workspace", "authorization"):
auth_type = config.get("workspace", "authorization")
options = dict(config.items("authorization"))
options["cubes_root"] = self.root_dir
self.authorizer = ext.authorizer(auth_type, **options)
else:
self.authorizer = None
# Configure and load models
# =========================
# Models are searched in:
# [model]
# [models] * <- depreciated!
# TODO: add this for nicer zero-conf
# root/model.json
# root/main.cubesmodel
# models/*.cubesmodel
models = []
# Undepreciated
if config.has_section("model"):
if not config.has_option("model", "path"):
raise ConfigurationError("No model path specified")
path = config.get("model", "path")
models.append(("main", path))
# TODO: Depreciate this too
if config.has_section("models"):
models += config.items("models")
for model, path in models:
self.logger.debug("Loading model %s" % model)
self.import_model(path)
def flush_lookup_cache(self):
"""Flushes the cube lookup cache."""
self._cubes.clear()
# TODO: flush also dimensions
def _get_namespace(self, ref):
"""Returns namespace with ference `ref`"""
if not ref or ref == "default":
return self.namespace
return self.namespace(ref)[0]
def add_translation(self, locale, trans, ns="default"):
"""Add translation `trans` for `locale`. `ns` is a namespace. If no
namespace is specified, then default (global) is used."""
namespace = self._get_namespace(ns)
namespace.add_translation(locale, trans)
def _register_store_dict(self, name, info):
info = dict(info)
try:
type_ = info.pop("type")
except KeyError:
try:
type_ = info.pop("backend")
except KeyError:
raise ConfigurationError("Store '%s' has no type specified" % name)
else:
self.logger.warn("'backend' is depreciated, use 'type' for "
"store (in %s)." % str(name))
self.register_store(name, type_, **info)
def register_default_store(self, type_, **config):
"""Convenience function for registering the default store. For more
information see `register_store()`"""
self.register_store("default", type_, **config)
def register_store(self, name, type_, include_model=True, **config):
"""Adds a store configuration."""
config = dict(config)
if name in self.store_infos:
raise ConfigurationError("Store %s already registered" % name)
self.store_infos[name] = (type_, config)
# Model and provider
# ------------------
# If store brings a model, then include it...
if include_model and "model" in config:
model = config.pop("model")
else:
model = None
# Get related model provider or override it with configuration
store_factory = ext.store.factory(type_)
if hasattr(store_factory, "related_model_provider"):
provider = store_factory.related_model_provider
else:
provider = None
provider = config.pop("model_provider", provider)
nsname = config.pop("namespace", None)
if model:
self.import_model(model, store=name, namespace=nsname,
provider=provider)
elif provider:
# Import empty model and register the provider
self.import_model({}, store=name, namespace=nsname,
provider=provider)
self.logger.debug("Registered store '%s'" % name)
def _store_for_model(self, metadata):
"""Returns a store for model specified in `metadata`. """
store_name = metadata.get("store")
if not store_name and "info" in metadata:
store_name = metadata["info"].get("store")
store_name = store_name or "default"
return store_name
# TODO: this is very confusing process, needs simplification
# TODO: change this to: add_model_provider(provider, info, store, languages, ns)
def import_model(self, model=None, provider=None, store=None,
translations=None, namespace=None):
"""Registers the `model` in the workspace. `model` can be a
metadata dictionary, filename, path to a model bundle directory or a
URL.
If `namespace` is specified, then the model's objects are stored in
the namespace of that name.
`store` is an optional name of data store associated with the model.
If not specified, then the one from the metadata dictionary will be
used.
Model's provider is registered together with loaded metadata. By
default the objects are registered in default global namespace.
Note: No actual cubes or dimensions are created at the time of calling
this method. The creation is deferred until
:meth:`cubes.Workspace.cube` or :meth:`cubes.Workspace.dimension` is
called.
"""
# 1. Metadata
# -----------
# Make sure that the metadata is a dictionary
#
# TODO: Use "InlineModelProvider" and "FileBasedModelProvider"
if store and not isinstance(store, compat.string_type):
raise ArgumentError("Store should be provided by name "
"(as a string).")
# 1. Model Metadata
# -----------------
# Make sure that the metadata is a dictionary
#
# TODO: Use "InlineModelProvider" and "FileBasedModelProvider"
if isinstance(model, compat.string_type):
self.logger.debug("Importing model from %s. "
"Provider: %s Store: %s NS: %s"
% (model, provider, store, namespace))
path = model
if self.models_dir and not os.path.isabs(path):
path = os.path.join(self.models_dir, path)
model = read_model_metadata(path)
elif isinstance(model, dict):
self.logger.debug("Importing model from dictionary. "
"Provider: %s Store: %s NS: %s"
% (provider, store, namespace))
elif model is None:
model = {}
else:
raise ConfigurationError("Unknown model '%s' "
"(should be a filename or a dictionary)"
% model)
# 2. Model provider
# -----------------
# Create a model provider if name is given. Otherwise assume that the
# `provider` is a ModelProvider subclass instance
if isinstance(provider, compat.string_type):
provider = ext.model_provider(provider, model)
# TODO: remove this, if provider is external, it should be specified
if not provider:
provider_name = model.get("provider", "default")
provider = ext.model_provider(provider_name, model)
# 3. Store
# --------
# Link the model with store
store = store or model.get("store")
if store or (hasattr(provider, "requires_store") \
and provider.requires_store()):
provider.bind(self.get_store(store))
# 4. Namespace
# ------------
if namespace:
if namespace == "default":
ns = self.namespace
elif isinstance(namespace, compat.string_type):
(ns, _) = self.namespace.namespace(namespace, create=True)
else:
ns = namespace
elif store == "default":
ns = self.namespace
else:
# Namespace with the same name as the store.
(ns, _) = self.namespace.namespace(store, create=True)
ns.add_provider(provider)
def add_slicer(self, name, url, **options):
"""Register a slicer as a model and data provider."""
self.register_store(name, "slicer", url=url, **options)
self.import_model({}, provider="slicer", store=name)
def cube_names(self, identity=None):
"""Return names all available cubes."""
return [cube["name"] for cube in self.list_cubes()]
# TODO: this is not loclized!!!
def list_cubes(self, identity=None):
"""Get a list of metadata for cubes in the workspace. Result is a list
of dictionaries with keys: `name`, `label`, `category`, `info`.
The list is fetched from the model providers on the call of this
method.
If the workspace has an authorizer, then it is used to authorize the
cubes for `identity` and only authorized list of cubes is returned.
"""
all_cubes = self.namespace.list_cubes(recursive=True)
if self.authorizer:
by_name = dict((cube["name"], cube) for cube in all_cubes)
names = [cube["name"] for cube in all_cubes]
authorized = self.authorizer.authorize(identity, names)
all_cubes = [by_name[name] for name in authorized]
return all_cubes
def cube(self, ref, identity=None, locale=None):
"""Returns a cube with full cube namespace reference `ref` for user
`identity` and translated to `locale`."""
if not isinstance(ref, compat.string_type):
raise TypeError("Reference is not a string, is %s" % type(ref))
if self.authorizer:
authorized = self.authorizer.authorize(identity, [ref])
if not authorized:
raise NotAuthorized
# If we have a cached cube, return it
# See also: flush lookup
cube_key = (ref, identity, locale)
if cube_key in self._cubes:
return self._cubes[cube_key]
# Find the namespace containing the cube – we will need it for linking
# later
(namespace, provider, basename) = self.namespace.find_cube(ref)
cube = provider.cube(basename, locale=locale, namespace=namespace)
cube.namespace = namespace
cube.store = provider.store
# TODO: cube.ref -> should be ref and cube.name should be basename
cube.basename = basename
cube.name = ref
lookup = namespace.translation_lookup(locale)
if lookup:
# TODO: pass lookup instead of jsut first found translation
context = LocalizationContext(lookup[0])
trans = context.object_localization("cubes", cube.name)
cube = cube.localized(trans)
# Cache the cube
self._cubes[cube_key] = cube
return cube
def dimension(self, name, locale=None, namespace=None, provider=None):
"""Returns a dimension with `name`. Raises `NoSuchDimensionError` when
no model published the dimension. Raises `RequiresTemplate` error when
model provider requires a template to be able to provide the
dimension, but such template is not a public dimension.
The standard lookup when linking a cube is:
1. look in the cube's provider
2. look in the cube's namespace – all providers within that namespace
3. look in the default (global) namespace
"""
return find_dimension(name, locale,
namespace or self.namespace,
provider)
def _browser_options(self, cube):
"""Returns browser configuration options for `cube`. The options are
taken from the configuration file and then overriden by cube's
`browser_options` attribute."""
options = dict(self.browser_options)
if cube.browser_options:
options.update(cube.browser_options)
return options
def browser(self, cube, locale=None, identity=None):
"""Returns a browser for `cube`."""
# TODO: bring back the localization
# model = self.localized_model(locale)
if isinstance(cube, compat.string_type):
cube = self.cube(cube, identity=identity)
locale = locale or cube.locale
if isinstance(cube.store, compat.string_type):
store_name = cube.store or "default"
store = self.get_store(store_name)
store_type = self.store_infos[store_name][0]
store_info = self.store_infos[store_name][1]
elif cube.store:
store = cube.store
store_info = store.options or {}
else:
store = self.get_store("default")
store_info = store.options or {}
store_type = store.store_type
if not store_type:
raise CubesError("Store %s has no store_type set" % store)
cube_options = self._browser_options(cube)
# TODO: merge only keys that are relevant to the browser!
options = dict(store_info)
options.update(cube_options)
# TODO: Construct options for the browser from cube's options
# dictionary and workspece default configuration
browser_name = cube.browser
if not browser_name and hasattr(store, "default_browser_name"):
browser_name = store.default_browser_name
if not browser_name:
browser_name = store_type
if not browser_name:
raise ConfigurationError("No store specified for cube '%s'" % cube)
browser = ext.browser(browser_name, cube, store=store,
locale=locale, calendar=self.calendar,
**options)
# TODO: remove this once calendar is used in all backends
browser.calendar = self.calendar
return browser
def cube_features(self, cube, identity=None):
"""Returns browser features for `cube`"""
# TODO: this might be expensive, make it a bit cheaper
# recycle the feature-providing browser or something. Maybe use class
# method for that
return self.browser(cube, identity).features()
def get_store(self, name=None):
"""Opens a store `name`. If the store is already open, returns the
existing store."""
name = name or "default"
if name in self.stores:
return self.stores[name]
try:
type_, options = self.store_infos[name]
except KeyError:
raise ConfigurationError("Unknown store '{}'".format(name))
# TODO: temporary hack to pass store name and store type
store = ext.store(type_, store_type=type_, **options)
self.stores[name] = store
return store
def close(self):
"""Closes the workspace with all open stores and other associated
resources."""
for store in self.open_stores:
store.close()
| mit | 7,782,696,129,232,110,000 | 35.093333 | 84 | 0.572138 | false |
donspaulding/adspygoogle | examples/adspygoogle/adwords/v201306/optimization/get_keyword_bid_simulations.py | 3 | 3274 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets a bid landscape for an ad group and a criterion. To get ad
groups, run get_ad_groups.py. To get criteria, run
get_keywords.py.
Tags: BidLandscapeService.getBidLandscape
Api: AdWordsOnly
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
criterion_id = 'INSERT_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
data_service = client.GetDataService(version='v201306')
# Construct bid landscape selector object and retrieve bid landscape.
selector = {
'fields': ['AdGroupId', 'CriterionId', 'StartDate', 'EndDate', 'Bid',
'LocalClicks', 'LocalCost', 'MarginalCpc', 'LocalImpressions'],
'predicates': [
{
'field': 'AdGroupId',
'operator': 'EQUALS',
'values': [ad_group_id]
},
{
'field': 'CriterionId',
'operator': 'EQUALS',
'values': [criterion_id]
}
]
}
bid_landscapes = data_service.GetCriterionBidLandscape(selector)[0]
# Display results.
if 'entries' in bid_landscapes:
for bid_landscape in bid_landscapes['entries']:
if bid_landscape['BidLandscape_Type'] == 'CriterionBidLandscape':
print ('Criterion bid landscape with ad group id \'%s\', criterion id '
'\'%s\', start date \'%s\', end date \'%s\', with landscape '
'points was found:'
% (bid_landscape['adGroupId'], bid_landscape['criterionId'],
bid_landscape['startDate'], bid_landscape['endDate']))
for bid_landscape_point in bid_landscape['landscapePoints']:
print (' bid: %s => clicks: %s, cost: %s, marginalCpc: %s, '
'impressions: %s'
% (bid_landscape_point['bid']['microAmount'],
bid_landscape_point['clicks'],
bid_landscape_point['cost']['microAmount'],
bid_landscape_point['marginalCpc']['microAmount'],
bid_landscape_point['impressions']))
else:
print 'No bid landscapes found.'
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id, criterion_id)
| apache-2.0 | 4,154,781,893,915,000,300 | 34.978022 | 80 | 0.613317 | false |
eproche/mrSleuthy | sleuth_crunch.py | 1 | 3424 | import sys
import logging
import numpy as np
from numpy import *
from sklearn.metrics.pairwise import cosine_similarity
def vocab_build(corpus):
"""Builds set of unique words """
lexicon = set()
for doc in corpus:
doc = doc.split()
lexicon.update([word for word in doc])
return lexicon
def attach_tfidf_weights(storage, vocab, tf_arr):
"""Appends tf-idf weights to each word """
wordlist = vocab
storage_weighted = []
for i in range(len(storage)):
sys.stdout.write(str(i)+",")
sys.stdout.flush()
docweights = []
stor_list = storage[i].split()
for word in stor_list:
words = [word,0]
for j in range(len(wordlist)):
if (wordlist[j] == word):
words[1] = tf_arr[i][j]
docweights.append(words)
storage_weighted.append(docweights)
return storage_weighted
def featureVec(storage, model, num_features):
"""creates a vector representation for each image's descriptions (document)
"""
index2word_set = set(model.index2word)
realWords = [] #in model
notfound = []
feature_vecs = []
tot_wei = 0.0 #tf-idf weight total
for i in range(len(storage)):
realWords.append([])
for word in storage[i]:
#cap = word[0].capitalize() catch if capitalized proper noun in model
#word[0] = "/en/"+word[0] if using freebase_skipgram_1000.bin.gz
if (word[0] in index2word_set):
realWords[i].append(word)
tot_wei += word[1]
continue
print tot_wei
for i in range(len(realWords)):
feature_vec = np.zeros((num_features), dtype="float32")
num_words = 0
for realword in realWords[i]:
weighted_vec = model[realword[0]]*(realword[1] / tot_wei) #normalized tf-idf weight
feature_vec = np.add(feature_vec, weighted_vec)
num_words += 1
feature_vec = np.divide(feature_vec, num_words) #average of each word vector
feature_vecs.append(feature_vec)
return feature_vecs
def featureVec_unweighted(storage, model, num_features):
""" Same as featureVec, but no tf-idf weights"""
index2word_set = set(model.index2word)
realWords = []
feature_vecs = []
for i in range(len(storage)):
realWords.append([])
storage[i] = storage[i].split()
for word in storage[i]:
#word = "/en/"+word if using freebase_skipgram_1000.bin.gz
if word in index2word_set:
realWords[i].append(word)
else:
click.secho("notfound: ", fg='red')
click.echo(word)
for i in range(len(realWords)):
feature_vec = np.zeros((num_features), dtype="float32")
num_words = 0
for realword in realWords[i]:
weighted_vec = model[realword]
feature_vec = np.add(feature_vec, weighted_vec)
num_words += 1
feature_vec = np.divide(feature_vec, num_words)
feature_vecs.append(feature_vec)
return feature_vecs
def compare(storage, feature_vecs):
results = zeros((len(storage),len(storage)))
min_result = 1.0
max_result = 0.0
for i in range(len(storage)):
for j in range(len(storage)):
result = cosine_similarity(feature_vecs[i], feature_vecs[j])
results[i][j] = result
if result < min_result:
min_result = result
if result > max_result:
max_result = result
# sys.stdout.write('.') #progress
# sys.stdout.flush()
# sys.stdout.write('\n')
#used normalize similarity scores from 0 to 1
print 'max: ' + str(max_result)
print 'min: ' + str(min_result)
max_result -= min_result
for i in range(len(results)): #normalization
for j in range(len(results[i])):
results[i][j] = (results[i][j] - min_result) / max_result
return results | mit | -3,500,204,896,196,443,600 | 30.136364 | 86 | 0.680783 | false |
adityacs/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_template.py | 12 | 10067 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: tower_job_template
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job_template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job_template.
required: True
description:
description:
- Description to use for the job_template.
required: False
default: null
job_type:
description:
- The job_type to use for the job_template.
required: True
choices: ["run", "check", "scan"]
inventory:
description:
- Inventory to use for the job_template.
required: False
default: null
project:
description:
- Project to use for the job_template.
required: True
playbook:
description:
- Playbook to use for the job_template.
required: True
machine_credential:
description:
- Machine_credential to use for the job_template.
required: False
default: null
cloud_credential:
description:
- Cloud_credential to use for the job_template.
required: False
default: null
network_credential:
description:
- The network_credential to use for the job_template.
required: False
default: null
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
required: False
default: null
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
required: False
default: null
verbosity:
description:
- Control the output level Ansible produces as the playbook runs.
required: False
choices: ["verbose", "debug"]
default: null
job_tags:
description:
- The job_tags to use for the job_template.
required: False
default: null
skip_tags:
description:
- The skip_tags to use for the job_template.
required: False
default: null
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
required: False
default: null
extra_vars_path:
description:
- Path to the extra_vars yaml file.
required: False
default: null
ask_extra_vars:
description:
- Prompt user for extra_vars on launch.
required: False
default: False
ask_tags:
description:
- Prompt user for job tags on launch.
required: False
default: False
ask_job_type:
description:
- Prompt user for job type on launch.
required: False
default: False
ask_inventory:
description:
- Propmt user for inventory on launch.
required: False
default: False
ask_credential:
description:
- Prompt user for credential on launch.
required: False
default: False
become_enabled:
description:
- Should become_enabled.
required: False
default: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: Ping
job_type: run
inventory: Local
project: Demo
playbook: ping.yml
machine_credential: Local
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def update_fields(p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit' :'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'machine_credential': 'credential',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars_path')
if extra_vars is not None:
params_update['extra_vars'] = '@' + extra_vars
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'machine_credential': 'name',
'network_credential': 'name',
'cloud_credential': 'name',
}
for k,v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v:params[k]})
params[k] = result['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
description = dict(),
job_type = dict(choices=['run', 'check', 'scan'], required=True),
inventory = dict(),
project = dict(required=True),
playbook = dict(required=True),
machine_credential = dict(),
cloud_credential = dict(),
network_credential = dict(),
forks = dict(type='int'),
limit = dict(),
verbosity = dict(choices=['verbose', 'debug']),
job_tags = dict(),
skip_tags = dict(),
host_config_key = dict(),
extra_vars_path = dict(type='path', required=False),
ask_extra_vars = dict(type='bool', default=False),
ask_limit = dict(type='bool', default=False),
ask_tags = dict(type='bool', default=False),
ask_job_type = dict(type='bool', default=False),
ask_inventory = dict(type='bool', default=False),
ask_credential = dict(type='bool', default=False),
become_enabled = dict(type='bool', default=False),
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
state = dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
state = module.params.get('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 | -1,241,861,879,647,672,800 | 29.785933 | 102 | 0.608026 | false |
jluissandovalm/lammps_smd | tools/moltemplate/src/remove_duplicate_atoms.py | 30 | 1277 | #!/usr/bin/env python
"""
Get rid of lines containing duplicate copies of the same atom in the "Atoms"
section of a LAMMPS data file. Duplicate lines which occur later are
preserved and the earlier lines are erased.
The file is read from sys.stdin. This program does not parse the entire
data file. The text from the "Atoms" section of the LAMMPS file must
be extracted in advance before it is sent to this program.)
"""
import sys
in_stream = sys.stdin
f = None
fname = None
if len(sys.argv) == 2:
fname = sys.argv[1]
f = open(fname, 'r')
in_stream = f
atom_ids_in_use = set([])
lines = in_stream.readlines()
# Start at the end of the file and read backwards.
# If duplicate lines exist, eliminate the ones that occur earlier in the file.
i = len(lines)
while i > 0:
i -= 1
line_orig = lines[i]
line = line_orig.rstrip('\n')
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
tokens = line.strip().split()
if len(tokens) > 0:
atom_id = tokens[0]
if atom_id in atom_ids_in_use:
del lines[i]
else:
atom_ids_in_use.add(atom_id)
else:
del lines[i]
for line in lines:
sys.stdout.write(line)
if f != None:
f.close()
| gpl-2.0 | -1,005,610,992,487,647,600 | 23.09434 | 79 | 0.620987 | false |
wangjeaf/CSSCheckStyle | ckstyle/entity/EntityUtil.py | 1 | 2039 | from ckstyle.browsers.BinaryRule import ALL
import re
replacer1 = re.compile('\s*{\s*')
replacer2 = re.compile('\s*:\s*')
replacer3 = re.compile('\s*;\s*}\s*')
replacer4 = re.compile('\s*;\s*')
replacer5 = re.compile('\s\s+')
replacer6 = re.compile('\(\s+')
replacer7 = re.compile('\s+\)')
replacer8 = re.compile('\s+,')
replacer9 = re.compile(',\s+')
class Cleaner():
@staticmethod
def clean(msg):
msg = msg.strip().replace('\r', '').replace('\n', '').replace(' ' * 4, ' ')
msg = replacer1.sub('{', msg)
msg = replacer2.sub(':', msg)
msg = replacer3.sub('}', msg)
msg = replacer4.sub(';', msg)
msg = replacer5.sub(' ', msg)
msg = replacer6.sub('(', msg)
msg = replacer7.sub(')', msg)
msg = replacer8.sub(',', msg)
msg = replacer9.sub(',', msg)
msg = msg.strip()
return msg
@staticmethod
def clearName(name):
name = name.strip()
# #padding: 10px???
if name.startswith('_') or name.startswith('*') or name.startswith('+') or name.startswith('#'):
name = name[1:]
if name.startswith('-'):
if name.startswith('-moz-') or name.startswith('-webkit-') or name.startswith('-ms-') or name.startswith('-o-') or name.startswith('-khtml-'):
name = '-'.join(name.split('-')[2:])
return name.lower()
@staticmethod
def clearValue(value):
value = value.strip()
if value.endswith(';'):
value = value[0: - 1]
return value
@staticmethod
def clearValues(values):
values = values.strip()
return values
@staticmethod
def clearSelector(selector):
return ' '.join(selector.split('\n')).strip()
@staticmethod
def clearComment(comment):
comment = comment.strip()
if len(comment) != 0 and comment.find('\n') == -1:
comment = comment.replace('/*', '').replace('*/', '').strip()
comment = '/* ' + comment + ' */'
return comment
| bsd-3-clause | 3,422,913,676,587,707,400 | 31.365079 | 154 | 0.538009 | false |
joachimmetz/l2tdevtools | tests/build_helpers/dpkg.py | 2 | 3373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the helper for building projects from source."""
import os
import unittest
from l2tdevtools import projects
from l2tdevtools.build_helpers import dpkg
from tests import test_lib
class DPKGBuildHelperTest(test_lib.BaseTestCase):
"""Tests for the helper to build dpkg packages (.deb)."""
# pylint: disable=protected-access
# TODO: add tests for _BuildPrepare
# TODO: add tests for _BuildFinalize
# TODO: add tests for _CheckIsInstalled
# TODO: add tests for _CreateOriginalSourcePackage
# TODO: add tests for _CreateOriginalSourcePackageFromZip
# TODO: add tests for _CreatePackagingFiles
# TODO: add tests for _GetBuildHostDistribution
def testReadLSBReleaseConfigurationFile(self):
"""Tests the _ReadLSBReleaseConfigurationFile function."""
test_path = self._GetTestFilePath(['lsb-release'])
self._SkipIfPathNotExists(test_path)
project_definition = projects.ProjectDefinition('test')
l2tdevtools_path = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
test_build_helper = dpkg.DPKGBuildHelper(
project_definition, l2tdevtools_path, {})
lsb_release_values = test_build_helper._ReadLSBReleaseConfigurationFile(
test_path)
self.assertEqual(len(lsb_release_values), 4)
expected_keys = [
'distrib_codename', 'distrib_description', 'distrib_id',
'distrib_release']
self.assertEqual(sorted(lsb_release_values.keys()), expected_keys)
# TODO: add tests for _RemoveOlderDPKGPackages
# TODO: add tests for _RemoveOlderOriginalSourcePackage
# TODO: add tests for _RemoveOlderSourceDPKGPackages
def testRunLSBReleaseCommand(self):
"""Tests the _RunLSBReleaseCommand function."""
project_definition = projects.ProjectDefinition('test')
l2tdevtools_path = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
test_build_helper = dpkg.DPKGBuildHelper(
project_definition, l2tdevtools_path, {})
output = test_build_helper._RunLSBReleaseCommand()
if os.path.exists('/usr/bin/lsb_release'):
self.assertIsNotNone(output)
else:
self.assertIsNone(output)
# TODO: add tests for CheckBuildDependencies
class ConfigureMakeDPKGBuildHelperTest(test_lib.BaseTestCase):
"""Tests for the helper to build dpkg packages (.deb)."""
# TODO: add tests for Build
# TODO: add tests for CheckBuildRequired
# TODO: add tests for Clean
class ConfigureMakeSourceDPKGBuildHelperTest(test_lib.BaseTestCase):
"""Tests for the helper to build source dpkg packages (.deb)."""
# TODO: add tests for Build
# TODO: add tests for CheckBuildRequired
# TODO: add tests for Clean
class SetupPyDPKGBuildHelperTest(test_lib.BaseTestCase):
"""Tests for the helper to build dpkg packages (.deb)."""
# TODO: add tests for _GetFilenameSafeProjectInformation
# TODO: add tests for Build
# TODO: add tests for CheckBuildRequired
# TODO: add tests for Clean
class SetupPySourceDPKGBuildHelperTest(test_lib.BaseTestCase):
"""Tests for the helper to build source dpkg packages (.deb)."""
# TODO: add tests for _GetFilenameSafeProjectInformation
# TODO: add tests for Build
# TODO: add tests for CheckBuildRequired
# TODO: add tests for Clean
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,068,163,193,492,786,000 | 30.231481 | 76 | 0.725763 | false |
aristanetworks/arista-ovs-quantum | quantum/agent/common/config.py | 4 | 1122 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.common import config
from quantum.openstack.common import cfg
def setup_conf():
bind_opts = [
cfg.StrOpt('state_path',
default='/var/lib/quantum',
help='Top-level directory for maintaining dhcp state'),
]
conf = cfg.CommonConfigOpts()
conf.register_opts(bind_opts)
return conf
# add a logging setup method here for convenience
setup_logging = config.setup_logging
| apache-2.0 | 5,218,720,528,014,542,000 | 31.057143 | 78 | 0.699643 | false |
SigPloiter/SigPloit | gtp/gtp_v2_core/utilities/configuration_parser.py | 1 | 15713 | # configuration_parser.py
#
# Copyright 2018 Rosalia d'Alessandro
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-a
import os
import sys
sys.path.insert(0, os.path.join(os.getcwd(), 'gtp/'))
from configobj import ConfigObj, ConfigObjError
from gtp_v2_core.tunnel_mgmt_messages.create_bearer import CreateBearerRequest, CreateBearerResponse
from gtp_v2_core.path_mgmt_messages.echo import EchoRequest, EchoResponse
from gtp_v2_core.tunnel_mgmt_messages.create_session import CreateSessionRequest, CreateSessionResponse
from gtp_v2_core.tunnel_mgmt_messages.delete_session import DeleteSessionRequest, DeleteSessionResponse
from gtp_v2_core.commons.gtp_v2_commons import GTPmessageTypeDigit
from gtp_v2_core.tunnel_mgmt_messages.modify_bearer import ModifyBearerRequest, ModifyBearerResponse
from gtp_v2_core.tunnel_mgmt_messages.delete_bearer import DeleteBearerRequest, DeleteBearerResponse
from gtp_v2_core.restoration_and_recovery.delete_pdn_connection_set import DeletePDNConnectionSetRequest
from gtp_v2_core.utilities.utilities import logNormal, logErr, logOk, logWarn
##
## @brief Class implementing a Configuration Parser
##
class parseConfigs(object):
'''
classdocs
'''
def __init__(self, config_file, verbose = True):
'''
Constructor
'''
self.__msgs = []
if config_file is None or config_file is "":
raise Exception("No config file provided")
self.__cfg = config_file
self.__configs = {'interface': None,
'base_message_list': [],
'3gpp_messages_list': [],
'IES': []}
self.__gtp_port = 2123
self.__version = 0x02
self.__verbose = verbose
self.__parseConfigs()
def __parseConfigs(self):
confobj = ConfigObj(self.__cfg)
if 'GENERIC' not in confobj.sections:
raise ConfigObjError('Section GENERIC is required')
if 'port' in confobj['GENERIC']:
self.__gtp_port = int(confobj['GENERIC']['port'])
if 'version' in confobj['GENERIC']:
self.__version= int(confobj['GENERIC']['version'])
if 'num_msg' in confobj['GENERIC'] :
self.__num_msg = int(confobj['GENERIC']['num_msg'])
else :
self.__num_msg = 1
self.__msgs = self.__create_messages(confobj)
def __format_base_messages(self, confobj):
if 'base_message_list' not in confobj['GENERIC']:
logWarn("Base message list empty",
verbose = self.__verbose,
TAG = "parseConfig")
return []
self.__configs['base_message_list'] = confobj['GENERIC']['base_message_list']
msgs = []
for msg_type in self.__configs['base_message_list']:
if int(msg_type) == GTPmessageTypeDigit["echo-request"] :
i = 0
while i < self.__num_msg :
msgs.append(EchoRequest())
i += 1
elif int(msg_type) == GTPmessageTypeDigit["echo-response"] :
i = 0
while i < self.__num_msg :
msgs.append(EchoResponse(1))
i += 1
else:
raise Exception("%s:%s - Invalid base msg type "
"%d"%(self.__class__.__name__,
"__format_base_messages",
int(msg_type)))
return msgs
def __format_interface_msg(self, confobj):
msgs = []
if confobj is None:
raise Exception("%s:%s - Configuration Object is None. "
%(self.__class__.__name__, "__format_interface_msg"))
if '3gpp_messages_list' not in confobj['GENERIC']:
logWarn("3gpp message list empty",
verbose = self.__verbose,
TAG = "parseConfig")
return []
self.__configs['3gpp_messages_list'] = confobj['GENERIC']['3gpp_messages_list']
if 'IES' not in confobj.sections:
raise ConfigObjError('Section IES is required')
if 'interface' not in confobj['GENERIC']:
raise ConfigObjError('Value "GENERIC.interface" is required')
self.__configs['interface'] = confobj['GENERIC']['interface']
recovery = True
if 'recovery' in confobj['IES']:
recovery = int(confobj['IES']['recovery'])
for msg_type in self.__configs['3gpp_messages_list']:
if int(msg_type) == GTPmessageTypeDigit["create-session-request"] :
i = 0
while i < self.__num_msg :
msgs.append(CreateSessionRequest(
source_ip = confobj['GENERIC']['source_ip'],
interface = int(self.__configs['interface']),
imsi = confobj['IES']['imsi'],
mcc = confobj['IES']['mcc'],
mnc = confobj['IES']['mnc'],
lac = int(confobj['IES']['lac']),
rac = int(confobj['IES']['rac']),
apn = confobj['IES']['apn'],
p_dns = confobj['IES']['primary_dns'],
s_dns = confobj['IES']['secondary_dns'],
gsn = confobj['IES']['gsn'],
phone= confobj['IES']['msisdn'],
imei = confobj['IES']['imei'],
rat_type = confobj['IES']['rat_type'],
ebi = int(confobj['IES']['ebi']),
recovery = recovery
)
)
i += 1
elif int(msg_type) == GTPmessageTypeDigit["create-session-response"] :
i = 0
while i < self.__num_msg :
msgs.append(CreateSessionResponse(
int(confobj['GENERIC']['teid'], 16),
int(confobj['GENERIC']['sqn'], 16),
confobj['GENERIC']['source_ip'],
int(self.__configs['interface']),
p_dns = confobj['IES']['primary_dns'],
s_dns = confobj['IES']['secondary_dns'],
)
)
i += 1
elif int(msg_type) == GTPmessageTypeDigit["delete-session-request"] :
mcc = confobj['IES']['mcc']
mnc = confobj['IES']['mnc']
lac = int(confobj['IES']['lac'])
rac = int(confobj['IES']['rac'])
ebi = int(confobj['IES']['ebi'])
interface = int(self.__configs['interface'])
for t in confobj['GENERIC']['teid']:
msgs.append(DeleteSessionRequest(int(t, 16),
source_ip = confobj['GENERIC']['source_ip'],
mcc = mcc,
mnc = mnc,
lac = lac,
rac = rac,
ebi = ebi,
interface = interface
)
)
elif int(msg_type) == GTPmessageTypeDigit["delete-session-response"] :
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(DeleteSessionResponse(teid = int(t, 16),
sqn = int(s, 16)
)
)
elif int(msg_type) == GTPmessageTypeDigit["create-bearer-request"] :
source_ip = confobj['GENERIC']['source_ip']
interface = int(self.__configs['interface'])
ebi = int(confobj['IES']['ebi'])
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(CreateBearerRequest(teid = int(t, 16),
source_ip = source_ip,
interface = interface,
ebi = ebi,
sqn = int(s, 16)
)
)
elif int(msg_type) == GTPmessageTypeDigit["modify-bearer-request"] :
source_ip = confobj['GENERIC']['source_ip']
interface = int(self.__configs['interface'])
ebi = int(confobj['IES']['ebi'])
nit = int(confobj['IES']['node_id_type'])
mcc = int(confobj['IES']['mcc'])
mnc = int(confobj['IES']['mnc'])
if 'fteid' in confobj['IES']:
fteid = int(confobj['IES']['fteid'])
else:
fteid = 0
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(ModifyBearerRequest(teid = int(t, 16),
source_ip = source_ip,
interface = interface,
ebi = ebi,
sqn = int(s, 16),
nit = nit,
fteid = fteid,
mcc = mcc,
mnc = mnc
)
)
elif int(msg_type) == GTPmessageTypeDigit["modify-bearer-response"] :
source_ip = confobj['GENERIC']['source_ip']
interface = int(self.__configs['interface'])
ebi = int(confobj['IES']['ebi'])
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(ModifyBearerResponse(teid = int(t, 16),
source_ip = source_ip,
interface = interface,
ebi = ebi,
sqn = int(s, 16)
)
)
elif int(msg_type) == GTPmessageTypeDigit["create-bearer-response"] :
source_ip = confobj['GENERIC']['source_ip']
interface = int(self.__configs['interface'])
ebi = int(confobj['IES']['ebi'])
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(CreateBearerResponse(teid = int(t, 16),
source_ip = source_ip,
interface = interface,
ebi = ebi,
sqn = int(s, 16)
)
)
elif int(msg_type) == GTPmessageTypeDigit["delete-bearer-request"] :
source_ip = confobj['GENERIC']['source_ip']
interface = int(self.__configs['interface'])
ebi = int(confobj['IES']['ebi'])
nit = int(confobj['IES']['node_id_type'])
cause = int(confobj['IES']['cause'])
mcc = int(confobj['IES']['mcc'])
mnc = int(confobj['IES']['mnc'])
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(DeleteBearerRequest(teid = int(t, 16),
source_ip = source_ip,
interface = interface,
ebi = ebi,
sqn = int(s, 16),
nit = nit,
cause = cause,
mcc = mcc,
mnc = mnc
)
)
elif int(msg_type) == GTPmessageTypeDigit["delete-bearer-response"] :
source_ip = confobj['GENERIC']['source_ip']
interface = int(self.__configs['interface'])
ebi = int(confobj['IES']['ebi'])
for t,s in zip(confobj['GENERIC']['teid'],
confobj['GENERIC']['sqn']):
msgs.append(DeleteBearerResponse(teid = int(t, 16),
source_ip = source_ip,
interface = interface,
ebi = ebi,
sqn = int(s, 16)
)
)
elif int(msg_type) == GTPmessageTypeDigit["delete-pdn-connection-set-request"] :
source_ip = confobj['GENERIC']['source_ip']
nit = int(confobj['IES']['node_id_type'])
mcc = int(confobj['IES']['mcc'])
mnc = int(confobj['IES']['mnc'])
for s in confobj['GENERIC']['sqn']:
msgs.append(DeletePDNConnectionSetRequest(
source_ip = source_ip,sqn = int(s, 16),
nit = nit, mcc = mcc, mnc = mnc)
)
return msgs
def __create_messages(self, confobj):
msgs = []
msgs.extend(self.__format_base_messages(confobj))
msgs.extend(self.__format_interface_msg(confobj))
return msgs
def get_unpacked_messages(self):
return self.__msgs
def get_gtp_port(self):
return self.__gtp_port
def get_version(self):
return self.__version
| mit | -3,185,248,154,489,224,000 | 44.810496 | 152 | 0.46242 | false |
coreyabshire/color-names | colornames/diagnostics.py | 1 | 5521 | import numpy as np
import pandas as pd
from colormath.color_conversions import convert_color
from colormath.color_objects import LabColor, sRGBColor
from scipy.spatial import ConvexHull
from scipy.spatial import Delaunay
def point_lab(p):
return LabColor(p[0],p[1],p[2])
def point_rgb(p):
return convert_color(point_lab(p), sRGBColor)
def point_rgb255(p):
rgb = point_rgb(p)
return np.array([rgb.clamped_rgb_r * 255.0,
rgb.clamped_rgb_g * 255.0,
rgb.clamped_rgb_b * 255.0])
def random_cie_colors(n):
return pd.DataFrame({'cie_lstar': np.round(np.random.randn(n) * 10.0 + 60.0, 2),
'cie_astar': np.round(np.random.randn(n) * 30, 2),
'cie_bstar': np.round(np.random.randn(n) * 30, 2)},
columns=['cie_lstar','cie_astar','cie_bstar'])
def show_hull(cname, ccol):
ccoords = coords[names==cname]
hull = ConvexHull(ccoords)
xs = ccoords.ix[:,0]
ys = ccoords.ix[:,1]
zs = ccoords.ix[:,2]
#ax.scatter(xs, ys, zs, c=ccol, marker='o')
for simplex in hull.simplices:
s = ccoords.irow(simplex)
#print s
sx = list(s.ix[:,0])
sy = list(s.ix[:,1])
sz = list(s.ix[:,2])
sx.append(sx[0])
sy.append(sy[0])
sz.append(sz[0])
ax.plot(sx, sy, sz, ccol, alpha=0.2)
hulld = Delaunay(ccoords.irow(hull.vertices))
hulld.find_simplex(coords)
hcol = ['grey' if x<0 else 'green' for x in hulld.find_simplex(coords)]
hxs = coords.ix[:,0]
hys = coords.ix[:,1]
hzs = coords.ix[:,2]
ax.scatter(hxs, hys, hzs, c=hcol, marker='o', alpha=0.2)
def write_diagnostic_html(filename, coords, names, y, ynames, dist, thresh, inhull, numhulls, selected=None):
with open(filename, 'w') as outfile:
outfile.write('<!doctype html>')
outfile.write('<html>')
outfile.write('<head>')
outfile.write('<link type="text/css" rel="stylesheet" href="color_page.css">')
outfile.write('</head>')
outfile.write('<body>')
outfile.write('<table>')
outfile.write('<tr>')
outfile.write('<th>patch</th>')
if names is not None:
outfile.write('<th>name</th>')
outfile.write('<th>linenum</th>')
outfile.write('<th>yname</th>')
outfile.write('<th>L*</th>')
outfile.write('<th>a*</th>')
outfile.write('<th>b*</th>')
outfile.write('<th>r</th>')
outfile.write('<th>g</th>')
outfile.write('<th>b</th>')
if selected is not None:
outfile.write('<th>selected</th>')
outfile.write('<th>inhull</th>')
outfile.write('<th>numhulls</th>')
for k in y:
outfile.write('<th>Y-%s</th>' % k[:2])
for k in dist:
outfile.write('<th>D-%s</th>' % k[:2])
for k in thresh:
outfile.write('<th>T-%s</th>' % k[:2])
outfile.write('</tr>')
for i in range(len(ynames)):
lab = LabColor(coords.iloc[i,0],coords.iloc[i,1],coords.iloc[i,2])
rgb = convert_color(lab, sRGBColor, target_illuminant='d50')
r = rgb.clamped_rgb_r
g = rgb.clamped_rgb_g
b = rgb.clamped_rgb_b
h = sRGBColor(r,g,b).get_rgb_hex()
outfile.write('<tr>')
outfile.write('<td style="background: %s"></td>' % h)
if names is not None:
outfile.write('<td>%s</td>' % names.iloc[i])
outfile.write('<td class="num">%s</td>' % (i + 1))
outfile.write('<td>%s</td>' % ynames.iloc[i])
outfile.write('<td class="num">%.2f</td>' % coords.iloc[i,0])
outfile.write('<td class="num">%.2f</td>' % coords.iloc[i,1])
outfile.write('<td class="num">%.2f</td>' % coords.iloc[i,2])
outfile.write('<td class="num">%.2f</td>' % r)
outfile.write('<td class="num">%.2f</td>' % g)
outfile.write('<td class="num">%.2f</td>' % b)
if selected is not None:
outfile.write('<td>%s</td>' % selected.iloc[i])
outfile.write('<td>%s</td>' % inhull.iloc[i])
outfile.write('<td>%s</td>' % numhulls.iloc[i])
for k in y:
outfile.write('<td class="num">%.2f</td>' % y.iloc[i][k])
for k in dist:
outfile.write('<td class="num">%.2f</td>' % dist.iloc[i][k])
for k in thresh:
t = thresh.iloc[i][k]
if t > 9999999.00:
outfile.write('<td class="num">%.2f</td>' % t)
else:
outfile.write('<td class="num">-</td>')
outfile.write('</tr>')
outfile.write('</table>')
outfile.write('</body>')
outfile.write('</html>')
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#show_hull('NEUTRAL', 'grey')
#show_hull('RED', 'red')
#show_hull('YELLOW', 'yellow')
#show_hull('ORANGE', 'orange')
#show_hull('BROWN', 'brown')
#show_hull('PURPLEVIOLET', 'purple')
#show_hull('MAGENTA', 'magenta')
#show_hull('GREEN', 'green')
#show_hull('BLUECYAN', 'blue')
def make_cie_gradient(n, a, b):
x = np.linspace(1.0, 0.0, n)
return pd.DataFrame({'cie_lstar': a[0] * x + b[0] * (1.0-x),
'cie_astar': a[1] * x + b[1] * (1.0-x),
'cie_bstar': a[2] * x + b[2] * (1.0-x)},
columns=['cie_lstar','cie_astar','cie_bstar'])
| mit | -2,020,991,193,454,116,400 | 36.304054 | 109 | 0.520014 | false |
Reflexe/doc_to_pdf | Windows/program/python-core-3.5.0/lib/traceback.py | 3 | 22175 | """Extract, format and print information about Python stack traces."""
import collections
import itertools
import linecache
import sys
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb', 'clear_frames',
'FrameSummary', 'StackSummary', 'TracebackException',
'walk_stack', 'walk_tb']
#
# Formatting and printing lists of traceback lines.
#
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for item in StackSummary.from_list(extracted_list).format():
print(item, file=file, end="")
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
return StackSummary.from_list(extracted_list).format()
#
# Printing and Extracting Tracebacks.
#
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
print_list(extract_tb(tb, limit=limit), file=file)
def format_tb(tb, limit=None):
"""A shorthand for 'format_list(extract_tb(tb, limit))'."""
return extract_tb(tb, limit=limit).format()
def extract_tb(tb, limit=None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
return StackSummary.extract(walk_tb(tb), limit=limit)
#
# Exception formatting and output.
#
_cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
_context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
def print_exception(etype, value, tb, limit=None, file=None, chain=True):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
# format_exception has ignored etype for some time, and code such as cgitb
# passes in bogus values as a result. For compatibility with such code we
# ignore it here (rather than in the new TracebackException API).
if file is None:
file = sys.stderr
for line in TracebackException(
type(value), value, tb, limit=limit).format(chain=chain):
print(line, file=file, end="")
def format_exception(etype, value, tb, limit=None, chain=True):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
# format_exception has ignored etype for some time, and code such as cgitb
# passes in bogus values as a result. For compatibility with such code we
# ignore it here (rather than in the new TracebackException API).
return list(TracebackException(
type(value), value, tb, limit=limit).format(chain=chain))
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
return list(TracebackException(etype, value, None).format_exception_only())
# -- not official API but folk probably use these two functions.
def _format_final_exc_line(etype, value):
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except:
return '<unprintable %s object>' % type(value).__name__
# --
def print_exc(limit=None, file=None, chain=True):
"""Shorthand for 'print_exception(*sys.exc_info(), limit, file)'."""
print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)
def format_exc(limit=None, chain=True):
"""Like print_exc() but return a string."""
return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))
def print_last(limit=None, file=None, chain=True):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file, chain)
#
# Printing and Extracting Stacks.
#
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
if f is None:
f = sys._getframe().f_back
print_list(extract_stack(f, limit=limit), file=file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
if f is None:
f = sys._getframe().f_back
return format_list(extract_stack(f, limit=limit))
def extract_stack(f=None, limit=None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
if f is None:
f = sys._getframe().f_back
stack = StackSummary.extract(walk_stack(f), limit=limit)
stack.reverse()
return stack
def clear_frames(tb):
"Clear all references to local variables in the frames of a traceback."
while tb is not None:
try:
tb.tb_frame.clear()
except RuntimeError:
# Ignore the exception raised if the frame is still executing.
pass
tb = tb.tb_next
class FrameSummary:
"""A single frame from a traceback.
- :attr:`filename` The filename for the frame.
- :attr:`lineno` The line within filename for the frame that was
active when the frame was captured.
- :attr:`name` The name of the function or method that was executing
when the frame was captured.
- :attr:`line` The text from the linecache module for the
of code that was running when the frame was captured.
- :attr:`locals` Either None if locals were not supplied, or a dict
mapping the name to the repr() of the variable.
"""
__slots__ = ('filename', 'lineno', 'name', '_line', 'locals')
def __init__(self, filename, lineno, name, *, lookup_line=True,
locals=None, line=None):
"""Construct a FrameSummary.
:param lookup_line: If True, `linecache` is consulted for the source
code line. Otherwise, the line will be looked up when first needed.
:param locals: If supplied the frame locals, which will be captured as
object representations.
:param line: If provided, use this instead of looking up the line in
the linecache.
"""
self.filename = filename
self.lineno = lineno
self.name = name
self._line = line
if lookup_line:
self.line
self.locals = \
dict((k, repr(v)) for k, v in locals.items()) if locals else None
def __eq__(self, other):
if isinstance(other, FrameSummary):
return (self.filename == other.filename and
self.lineno == other.lineno and
self.name == other.name and
self.locals == other.locals)
if isinstance(other, tuple):
return (self.filename, self.lineno, self.name, self.line) == other
return NotImplemented
def __getitem__(self, pos):
return (self.filename, self.lineno, self.name, self.line)[pos]
def __iter__(self):
return iter([self.filename, self.lineno, self.name, self.line])
def __repr__(self):
return "<FrameSummary file {filename}, line {lineno} in {name}>".format(
filename=self.filename, lineno=self.lineno, name=self.name)
@property
def line(self):
if self._line is None:
self._line = linecache.getline(self.filename, self.lineno).strip()
return self._line
def walk_stack(f):
"""Walk a stack yielding the frame and line number for each frame.
This will follow f.f_back from the given frame. If no frame is given, the
current stack is used. Usually used with StackSummary.extract.
"""
if f is None:
f = sys._getframe().f_back.f_back
while f is not None:
yield f, f.f_lineno
f = f.f_back
def walk_tb(tb):
"""Walk a traceback yielding the frame and line number for each frame.
This will follow tb.tb_next (and thus is in the opposite order to
walk_stack). Usually used with StackSummary.extract.
"""
while tb is not None:
yield tb.tb_frame, tb.tb_lineno
tb = tb.tb_next
class StackSummary(list):
"""A stack of frames."""
@classmethod
def extract(klass, frame_gen, *, limit=None, lookup_lines=True,
capture_locals=False):
"""Create a StackSummary from a traceback or stack object.
:param frame_gen: A generator that yields (frame, lineno) tuples to
include in the stack.
:param limit: None to include all frames or the number of frames to
include.
:param lookup_lines: If True, lookup lines for each frame immediately,
otherwise lookup is deferred until the frame is rendered.
:param capture_locals: If True, the local variables from each frame will
be captured as object representations into the FrameSummary.
"""
if limit is None:
limit = getattr(sys, 'tracebacklimit', None)
if limit is not None and limit < 0:
limit = 0
if limit is not None:
if limit >= 0:
frame_gen = itertools.islice(frame_gen, limit)
else:
frame_gen = collections.deque(frame_gen, maxlen=-limit)
result = klass()
fnames = set()
for f, lineno in frame_gen:
co = f.f_code
filename = co.co_filename
name = co.co_name
fnames.add(filename)
linecache.lazycache(filename, f.f_globals)
# Must defer line lookups until we have called checkcache.
if capture_locals:
f_locals = f.f_locals
else:
f_locals = None
result.append(FrameSummary(
filename, lineno, name, lookup_line=False, locals=f_locals))
for filename in fnames:
linecache.checkcache(filename)
# If immediate lookup was desired, trigger lookups now.
if lookup_lines:
for f in result:
f.line
return result
@classmethod
def from_list(klass, a_list):
"""Create a StackSummary from a simple list of tuples.
This method supports the older Python API. Each tuple should be a
4-tuple with (filename, lineno, name, line) elements.
"""
# While doing a fast-path check for isinstance(a_list, StackSummary) is
# appealing, idlelib.run.cleanup_traceback and other similar code may
# break this by making arbitrary frames plain tuples, so we need to
# check on a frame by frame basis.
result = StackSummary()
for frame in a_list:
if isinstance(frame, FrameSummary):
result.append(frame)
else:
filename, lineno, name, line = frame
result.append(FrameSummary(filename, lineno, name, line=line))
return result
def format(self):
"""Format the stack ready for printing.
Returns a list of strings ready for printing. Each string in the
resulting list corresponds to a single frame from the stack.
Each string ends in a newline; the strings may contain internal
newlines as well, for those items with source text lines.
"""
result = []
for frame in self:
row = []
row.append(' File "{}", line {}, in {}\n'.format(
frame.filename, frame.lineno, frame.name))
if frame.line:
row.append(' {}\n'.format(frame.line.strip()))
if frame.locals:
for name, value in sorted(frame.locals.items()):
row.append(' {name} = {value}\n'.format(name=name, value=value))
result.append(''.join(row))
return result
class TracebackException:
"""An exception ready for rendering.
The traceback module captures enough attributes from the original exception
to this intermediary form to ensure that no references are held, while
still being able to fully print or format it.
Use `from_exception` to create TracebackException instances from exception
objects, or the constructor to create TracebackException instances from
individual components.
- :attr:`__cause__` A TracebackException of the original *__cause__*.
- :attr:`__context__` A TracebackException of the original *__context__*.
- :attr:`__suppress_context__` The *__suppress_context__* value from the
original exception.
- :attr:`stack` A `StackSummary` representing the traceback.
- :attr:`exc_type` The class of the original traceback.
- :attr:`filename` For syntax errors - the filename where the error
occurred.
- :attr:`lineno` For syntax errors - the linenumber where the error
occurred.
- :attr:`text` For syntax errors - the text where the error
occurred.
- :attr:`offset` For syntax errors - the offset into the text where the
error occurred.
- :attr:`msg` For syntax errors - the compiler error message.
"""
def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None,
lookup_lines=True, capture_locals=False, _seen=None):
# NB: we need to accept exc_traceback, exc_value, exc_traceback to
# permit backwards compat with the existing API, otherwise we
# need stub thunk objects just to glue it together.
# Handle loops in __cause__ or __context__.
if _seen is None:
_seen = set()
_seen.add(exc_value)
# Gracefully handle (the way Python 2.4 and earlier did) the case of
# being called with no type or value (None, None, None).
if (exc_value and exc_value.__cause__ is not None
and exc_value.__cause__ not in _seen):
cause = TracebackException(
type(exc_value.__cause__),
exc_value.__cause__,
exc_value.__cause__.__traceback__,
limit=limit,
lookup_lines=False,
capture_locals=capture_locals,
_seen=_seen)
else:
cause = None
if (exc_value and exc_value.__context__ is not None
and exc_value.__context__ not in _seen):
context = TracebackException(
type(exc_value.__context__),
exc_value.__context__,
exc_value.__context__.__traceback__,
limit=limit,
lookup_lines=False,
capture_locals=capture_locals,
_seen=_seen)
else:
context = None
self.exc_traceback = exc_traceback
self.__cause__ = cause
self.__context__ = context
self.__suppress_context__ = \
exc_value.__suppress_context__ if exc_value else False
# TODO: locals.
self.stack = StackSummary.extract(
walk_tb(exc_traceback), limit=limit, lookup_lines=lookup_lines,
capture_locals=capture_locals)
self.exc_type = exc_type
# Capture now to permit freeing resources: only complication is in the
# unofficial API _format_final_exc_line
self._str = _some_str(exc_value)
if exc_type and issubclass(exc_type, SyntaxError):
# Handle SyntaxError's specially
self.filename = exc_value.filename
self.lineno = str(exc_value.lineno)
self.text = exc_value.text
self.offset = exc_value.offset
self.msg = exc_value.msg
if lookup_lines:
self._load_lines()
@classmethod
def from_exception(self, exc, *args, **kwargs):
"""Create a TracebackException from an exception."""
return TracebackException(
type(exc), exc, exc.__traceback__, *args, **kwargs)
def _load_lines(self):
"""Private API. force all lines in the stack to be loaded."""
for frame in self.stack:
frame.line
if self.__context__:
self.__context__._load_lines()
if self.__cause__:
self.__cause__._load_lines()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __str__(self):
return self._str
def format_exception_only(self):
"""Format the exception part of the traceback.
The return value is a generator of strings, each ending in a newline.
Normally, the generator emits a single string; however, for
SyntaxError exceptions, it emites several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the output.
"""
if self.exc_type is None:
yield _format_final_exc_line(None, self._str)
return
stype = self.exc_type.__qualname__
smod = self.exc_type.__module__
if smod not in ("__main__", "builtins"):
stype = smod + '.' + stype
if not issubclass(self.exc_type, SyntaxError):
yield _format_final_exc_line(stype, self._str)
return
# It was a syntax error; show exactly where the problem was found.
filename = self.filename or "<string>"
lineno = str(self.lineno) or '?'
yield ' File "{}", line {}\n'.format(filename, lineno)
badline = self.text
offset = self.offset
if badline is not None:
yield ' {}\n'.format(badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')
offset = min(len(caretspace), offset) - 1
caretspace = caretspace[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
yield ' {}^\n'.format(''.join(caretspace))
msg = self.msg or "<no detail available>"
yield "{}: {}\n".format(stype, msg)
def format(self, *, chain=True):
"""Format the exception.
If chain is not *True*, *__cause__* and *__context__* will not be formatted.
The return value is a generator of strings, each ending in a newline and
some containing internal newlines. `print_exception` is a wrapper around
this method which just prints the lines to a file.
The message indicating which exception occurred is always the last
string in the output.
"""
if chain:
if self.__cause__ is not None:
yield from self.__cause__.format(chain=chain)
yield _cause_message
elif (self.__context__ is not None and
not self.__suppress_context__):
yield from self.__context__.format(chain=chain)
yield _context_message
if self.exc_traceback is not None:
yield 'Traceback (most recent call last):\n'
yield from self.stack.format()
yield from self.format_exception_only()
| mpl-2.0 | 276,555,414,976,538,800 | 37.365052 | 87 | 0.622728 | false |
jijoy/cuteparty-registrar | cuteparty-registrar.py | 1 | 1611 | import os
import json
from threading import Thread
import time
from time import sleep
from flask import Flask, json, render_template, request
import redis
from collections import OrderedDict
from Queue import Queue
app = Flask(__name__)
port = int(os.getenv("PORT"))
vcap = json.loads(os.environ['VCAP_SERVICES'])
svc = vcap['rediscloud'][0]['credentials']
db = redis.StrictRedis(host=svc["hostname"], port=svc["port"], password=svc["password"],db=0)
@app.route('/update',methods=['POST'])
def update():
"""
This is the entry point for updating the aggregator info
Each of the invidividual apps will call this endpoint with their latest info
"""
appname = request.form['applicationname']
appdetails = request.form['appinfo']
obj = json.loads(appdetails)
if appname and obj:
db.hset('applications', appname, appdetails)
return json.dumps({'message':'success'})
@app.route('/applicationsdetails')
def applicationsdetails():
"""
This is the endpoint for providing all info about the applications
This is an internal method for registrator through which index.html loads all info
"""
appdicts = db.hgetall('applications')
finaldict = OrderedDict()
for appname in sorted(appdicts):
instances = json.loads(appdicts.get(appname))
finaldict.__setitem__(appname,instances)
return render_template('robots.html', appdicts=finaldict)
@app.route('/')
def index():
"""
Main entry point
"""
return render_template('index.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port, debug=True)
| mit | -6,469,690,560,357,807,000 | 27.263158 | 93 | 0.689634 | false |
treycausey/scikit-learn | sklearn/cluster/dbscan_.py | 1 | 9439 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_random_state
from ..neighbors import NearestNeighbors
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Parameters
----------
X: array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is given as
'precomputed'.
eps: float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples: int, optional
The number of samples in a neighborhood for a point to be considered
as a core point.
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
algorithm: {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size: int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p: float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
random_state: numpy.RandomState, optional
The generator used to initialize the centers. Defaults to numpy.random.
Returns
-------
core_samples: array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = np.asarray(X)
n = X.shape[0]
# If index order not given, create random order.
random_state = check_random_state(random_state)
index_order = np.arange(n)
random_state.shuffle(index_order)
# check for known metric powers
distance_matrix = True
if metric == 'precomputed':
D = pairwise_distances(X, metric=metric)
else:
distance_matrix = False
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is the
# neighborhood of point i. While True, its useless information)
neighborhoods = []
if distance_matrix:
neighborhoods = [np.where(x <= eps)[0] for x in D]
# Initially, all samples are noise.
labels = -np.ones(n, dtype=np.int)
# A list of all core samples found.
core_samples = []
# label_num is the label given to the new cluster
label_num = 0
# Look at all samples and determine if they are core.
# If they are then build a new cluster from them.
for index in index_order:
# Already classified
if labels[index] != -1:
continue
# get neighbors from neighborhoods or ballTree
index_neighborhood = []
if distance_matrix:
index_neighborhood = neighborhoods[index]
else:
index_neighborhood = neighbors_model.radius_neighbors(
X[index], eps, return_distance=False)[0]
# Too few samples to be core
if len(index_neighborhood) < min_samples:
continue
core_samples.append(index)
labels[index] = label_num
# candidates for new core samples in the cluster.
candidates = [index]
while len(candidates) > 0:
new_candidates = []
# A candidate is a core point in the current cluster that has
# not yet been used to expand the current cluster.
for c in candidates:
c_neighborhood = []
if distance_matrix:
c_neighborhood = neighborhoods[c]
else:
c_neighborhood = neighbors_model.radius_neighbors(
X[c], eps, return_distance=False)[0]
noise = np.where(labels[c_neighborhood] == -1)[0]
noise = c_neighborhood[noise]
labels[noise] = label_num
for neighbor in noise:
n_neighborhood = []
if distance_matrix:
n_neighborhood = neighborhoods[neighbor]
else:
n_neighborhood = neighbors_model.radius_neighbors(
X[neighbor], eps, return_distance=False)[0]
# check if its a core point as well
if len(n_neighborhood) >= min_samples:
# is new core point
new_candidates.append(neighbor)
core_samples.append(neighbor)
# Update candidates for next round of cluster expansion.
candidates = new_candidates
# Current cluster finished.
# Next core point found will start a new cluster.
label_num += 1
return core_samples, labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples in a neighborhood for a point to be considered
as a core point.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
random_state : numpy.RandomState, optional
The generator used to initialize the centers. Defaults to numpy.random.
Attributes
----------
`core_sample_indices_` : array, shape = [n_core_samples]
Indices of core samples.
`components_` : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
`labels_` : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/plot_dbscan.py for an example.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X: array [n_samples, n_samples] or [n_samples, n_features]
Array of distances between samples, or a feature array.
The array is treated as a feature array unless the metric is
given as 'precomputed'.
params: dict
Overwrite keywords from __init__.
"""
clust = dbscan(X, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
self.components_ = X[self.core_sample_indices_].copy()
return self
| bsd-3-clause | 4,781,715,433,747,642,000 | 36.456349 | 79 | 0.62263 | false |
alshedivat/tensorflow | tensorflow/python/kernel_tests/bitcast_op_test.py | 3 | 2799 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BitcastTest(test.TestCase):
def _testBitcast(self, x, datatype, shape):
with self.session(use_gpu=True):
tf_ans = array_ops.bitcast(x, datatype)
out = tf_ans.eval()
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)
self.assertEqual(tf_ans.get_shape(), shape)
self.assertEqual(tf_ans.dtype, datatype)
def testSmaller(self):
x = np.random.rand(3, 2)
datatype = dtypes.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
datatype = dtypes.int32
shape = [4]
self._testBitcast(x, datatype, shape)
def testSameDtype(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, x.dtype, shape)
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, dtypes.int64, shape)
def testErrors(self):
x = np.zeros([1, 1], np.int8)
datatype = dtypes.int32
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
array_ops.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
datatype = dtypes.int8
shape = [4]
self._testBitcast(x, datatype, shape)
def testUnknown(self):
x = array_ops.placeholder(dtypes.float32)
datatype = dtypes.int8
array_ops.bitcast(x, datatype, None)
def testQuantizedType(self):
shape = [3, 4]
x = np.zeros(shape, np.uint16)
datatype = dtypes.quint16
self._testBitcast(x, datatype, shape)
def testUnsignedType(self):
shape = [3, 4]
x = np.zeros(shape, np.int64)
datatype = dtypes.uint64
self._testBitcast(x, datatype, shape)
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,329,594,955,507,657,700 | 29.096774 | 80 | 0.66095 | false |
michaelkirk/QGIS | python/plugins/processing/algs/lidar/fusion/Csv2Grid.py | 2 | 2476 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Csv2Grid.py
---------------------
Date : June 2014
Copyright : (C) 2014 by Agresta S. Coop
Email : iescamochero at agresta dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Agresta S. Coop - www.agresta.org'
__date__ = 'June 2014'
__copyright__ = '(C) 2014, Agresta S. Coop'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputFile
from FusionAlgorithm import FusionAlgorithm
from FusionUtils import FusionUtils
class Csv2Grid(FusionAlgorithm):
INPUT = 'INPUT'
COLUMN = 'COLUMN'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Csv2Grid')
self.group, self.i18n_group = self.trAlgorithm('Points')
self.addParameter(ParameterFile(self.INPUT, self.tr('CSV Files')))
self.addParameter(ParameterString(self.COLUMN, self.tr('Column')))
self.addOutput(OutputFile( self.OUTPUT, self.tr('Raster Output file'), 'asc'))
def processAlgorithm(self, progress):
commands = [os.path.join(FusionUtils.FusionPath(), 'CSV2Grid.exe')]
commands.append('/verbose')
files = self.getParameterValue(self.INPUT).split(';')
if len(files) == 1:
commands.append(self.getParameterValue(self.INPUT))
else:
FusionUtils.createFileList(files)
commands.append(FusionUtils.tempFileListFilepath())
commands.append(self.getParameterValue(self.COLUMN))
commands.append(self.getOutputValue(self.OUTPUT))
FusionUtils.runFusion(commands, progress)
| gpl-2.0 | 2,524,326,199,955,766,000 | 41.689655 | 86 | 0.554927 | false |
trishnaguha/ansible | contrib/inventory/packet_net.py | 15 | 18382 | #!/usr/bin/env python
'''
Packet.net external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Packet.net using the Packet library.
NOTE: This script assumes Ansible is being executed where the environment
variable needed for Packet API Token already been set:
export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
This script also assumes there is a packet_net.ini file alongside it. To specify a
different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable:
export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini
'''
# (c) 2016, Peter Sankauskas
# (c) 2017, Tomas Karasek
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import six
from six.moves import configparser
try:
import packet
except ImportError as e:
sys.exit("failed=True msg='`packet-python` library required for this script'")
import traceback
import json
ini_section = 'packet'
class PacketInventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by device IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to device ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of devices for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the packet_net.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
_ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')
if _ini_path_raw:
packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw))
else:
packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
config.read(packet_ini_path)
# items per page
self.items_per_page = 999
if config.has_option(ini_section, 'items_per_page'):
config.get(ini_section, 'items_per_page')
# Instance states to be gathered in inventory. Default is all of them.
packet_valid_device_states = [
'active',
'inactive',
'queued',
'provisioning'
]
self.packet_device_states = []
if config.has_option(ini_section, 'device_states'):
for device_state in config.get(ini_section, 'device_states').split(','):
device_state = device_state.strip()
if device_state not in packet_valid_device_states:
continue
self.packet_device_states.append(device_state)
else:
self.packet_device_states = packet_valid_device_states
# Cache related
cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-packet.cache"
self.cache_path_index = cache_dir + "/ansible-packet.index"
self.cache_max_age = config.getint(ini_section, 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option(ini_section, 'nested_groups'):
self.nested_groups = config.getboolean(ini_section, 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option(ini_section, 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_device_id',
'group_by_hostname',
'group_by_facility',
'group_by_project',
'group_by_operating_system',
'group_by_plan_type',
'group_by_tags',
'group_by_tag_none',
]
for option in group_by_options:
if config.has_option(ini_section, option):
setattr(self, option, config.getboolean(ini_section, option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get(ini_section, 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get(ini_section, 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Projects
self.projects = []
configProjects = config.get(ini_section, 'projects')
configProjects_exclude = config.get(ini_section, 'projects_exclude')
if (configProjects == 'all'):
for projectInfo in self.get_projects():
if projectInfo.name not in configProjects_exclude:
self.projects.append(projectInfo.name)
else:
self.projects = configProjects.split(",")
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
parser.add_argument('--list', action='store_true', default=True,
help='List Devices (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific device')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
for projectInfo in self.get_projects():
if projectInfo.name in self.projects:
self.get_devices_by_project(projectInfo)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self):
''' create connection to api server'''
token = os.environ.get('PACKET_API_TOKEN')
if token is None:
raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
manager = packet.Manager(auth_token=token)
return manager
def get_projects(self):
'''Makes a Packet API call to get the list of projects'''
try:
manager = self.connect()
projects = manager.list_projects()
return projects
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet projects')
def get_devices_by_project(self, project):
''' Makes an Packet API call to the list of devices in a particular
project '''
params = {
'per_page': self.items_per_page
}
try:
manager = self.connect()
devices = manager.list_devices(project_id=project.id, params=params)
for device in devices:
self.add_device(device, project)
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet devices')
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_device(self, device_id):
manager = self.connect()
device = manager.get_device(device_id)
return device
def add_device(self, device, project):
''' Adds a device to the inventory and index, as long as it is
addressable '''
# Only return devices with desired device states
if device.state not in self.packet_device_states:
return
# Select the best destination address. Only include management
# addresses as non-management (elastic) addresses need manual
# host configuration to be routable.
# See https://help.packet.net/article/54-elastic-ips.
dest = None
for ip_address in device.ip_addresses:
if ip_address['public'] is True and \
ip_address['address_family'] == 4 and \
ip_address['management'] is True:
dest = ip_address['address']
if not dest:
# Skip devices we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(device.hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(device.hostname):
return
# Add to index
self.index[dest] = [project.id, device.id]
# Inventory: Group by device ID (always a group of 1)
if self.group_by_device_id:
self.inventory[device.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'devices', device.id)
# Inventory: Group by device name (hopefully a group of 1)
if self.group_by_hostname:
self.push(self.inventory, device.hostname, dest)
if self.nested_groups:
self.push_group(self.inventory, 'hostnames', project.name)
# Inventory: Group by project
if self.group_by_project:
self.push(self.inventory, project.name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'projects', project.name)
# Inventory: Group by facility
if self.group_by_facility:
self.push(self.inventory, device.facility['code'], dest)
if self.nested_groups:
if self.group_by_facility:
self.push_group(self.inventory, project.name, device.facility['code'])
# Inventory: Group by OS
if self.group_by_operating_system:
self.push(self.inventory, device.operating_system.slug, dest)
if self.nested_groups:
self.push_group(self.inventory, 'operating_systems', device.operating_system.slug)
# Inventory: Group by plan type
if self.group_by_plan_type:
self.push(self.inventory, device.plan['slug'], dest)
if self.nested_groups:
self.push_group(self.inventory, 'plans', device.plan['slug'])
# Inventory: Group by tag keys
if self.group_by_tags:
for k in device.tags:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
# Global Tag: devices without tags
if self.group_by_tag_none and len(device.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all Packet devices
self.push(self.inventory, 'packet', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
def get_host_info_dict_from_device(self, device):
device_vars = {}
for key in vars(device):
value = getattr(device, key)
key = self.to_safe('packet_' + key)
# Handle complex types
if key == 'packet_state':
device_vars[key] = device.state or ''
elif key == 'packet_hostname':
device_vars[key] = value
elif isinstance(value, (int, bool)):
device_vars[key] = value
elif isinstance(value, six.string_types):
device_vars[key] = value.strip()
elif value is None:
device_vars[key] = ''
elif key == 'packet_facility':
device_vars[key] = value['code']
elif key == 'packet_operating_system':
device_vars[key] = value.slug
elif key == 'packet_plan':
device_vars[key] = value['slug']
elif key == 'packet_tags':
for k in value:
key = self.to_safe('packet_tag_' + k)
device_vars[key] = k
else:
pass
# print key
# print type(value)
# print value
return device_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(project_id, device_id) = self.index[self.args.host]
device = self.get_device(device_id)
return self.json_format_dict(self.get_host_info_dict_from_device(device), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += r"\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
PacketInventory()
| gpl-3.0 | 4,347,594,445,811,612,700 | 35.690619 | 126 | 0.587205 | false |
ml-lab/pylearn2 | pylearn2/scripts/icml_2013_wrepl/black_box/black_box_dataset.py | 5 | 5055 | """
A Pylearn2 Dataset class for accessing the data for the
facial expression recognition Kaggle contest for the ICML
2013 workshop on representation learning.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import csv
import numpy as np
import os
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.utils import serial
from pylearn2.utils.string_utils import preprocess
class BlackBoxDataset(DenseDesignMatrix):
"""
A Pylearn2 Dataset class for accessing the data for the
facial expression recognition Kaggle contest for the ICML
2013 workshop on representation learning.
"""
def __init__(self, which_set,
base_path = '${PYLEARN2_DATA_PATH}/icml_2013_black_box',
start = None,
stop = None,
preprocessor = None,
fit_preprocessor = False,
fit_test_preprocessor = False):
"""
which_set: A string specifying which portion of the dataset
to load. Valid values are 'train' or 'public_test'
base_path: The directory containing the .csv files from kaggle.com.
This directory should be writable; if the .csv files haven't
already been converted to npy, this class will convert them
to save memory the next time they are loaded.
fit_preprocessor: True if the preprocessor is allowed to fit the
data.
fit_test_preprocessor: If we construct a test set based on this
dataset, should it be allowed to fit the test set?
"""
self.test_args = locals()
self.test_args['which_set'] = 'public_test'
self.test_args['fit_preprocessor'] = fit_test_preprocessor
del self.test_args['start']
del self.test_args['stop']
del self.test_args['self']
files = {'train': 'train.csv', 'public_test' : 'test.csv'}
sizes = {'train': 1000, 'public_test' : 10000, 'extra': 135735 }
if which_set == 'extra':
path = base_path + '/' + 'extra_unsupervised_data.npy'
X = serial.load(path).T
y = None
else:
try:
filename = files[which_set]
except KeyError:
raise ValueError("Unrecognized dataset name: " + which_set)
path = base_path + '/' + filename
path = preprocess(path)
expect_labels = which_set == 'train'
X, y = self._load_data(path, expect_labels)
size = sizes[which_set]
if X.shape[0] != size:
raise ValueError("Expected "+str(size)+" examples, got "+str(X.shape[0]))
if start is not None:
assert which_set != 'test'
assert isinstance(start, int)
assert isinstance(stop, int)
assert start >= 0
assert start < stop
if not (stop <= X.shape[0]):
raise ValueError("stop must be less than the # of examples but " +
"stop is " + str(stop) + " and there are " + str(X.shape[0]) +
" examples.")
X = X[start:stop, :]
if y is not None:
y = y[start:stop, :]
super(BlackBoxDataset, self).__init__(X=X, y=y)
if preprocessor:
preprocessor.apply(self, can_fit=fit_preprocessor)
def adjust_for_viewer(self, X):
return (X - 127.5) / 127.5
def get_test_set(self):
return BlackBoxDataset(**self.test_args)
def _load_data(self, path, expect_labels):
assert path.endswith('.csv')
# If a previous call to this method has already converted
# the data to numpy format, load the numpy directly
X_path = path[:-4] + '.X.npy'
Y_path = path[:-4] + '.Y.npy'
if os.path.exists(X_path):
X = np.load(X_path)
if expect_labels:
y = np.load(Y_path)
else:
y = None
return X, y
# Convert the .csv file to numpy
csv_file = open(path, 'r')
reader = csv.reader(csv_file)
# Discard header
row = reader.next()
y_list = []
X_list = []
for row in reader:
if expect_labels:
y_str = row[0]
row = row[1:]
y = int(float(y_str))
y_list.append(y)
X_row = map(lambda x: float(x), row)
X_list.append(X_row)
X = np.asarray(X_list).astype('float32')
if expect_labels:
y = np.asarray(y_list)
one_hot = np.zeros((y.shape[0],9),dtype='float32')
for i in xrange(y.shape[0]):
one_hot[i,y[i] - 1] = 1.
y = one_hot
else:
y = None
np.save(X_path, X)
if y is not None:
np.save(Y_path, y)
return X, y
| bsd-3-clause | 1,077,197,747,946,040,600 | 31.612903 | 86 | 0.54362 | false |
maryklayne/Funcao | sympy/functions/elementary/tests/test_trigonometric.py | 5 | 42117 | from sympy import (symbols, Symbol, nan, oo, zoo, I, sinh, sin, pi, atan,
acos, Rational, sqrt, asin, acot, coth, E, S, tan, tanh, cos,
cosh, atan2, exp, log, asinh, acoth, atanh, O, cancel, Matrix, re, im,
Float, Pow, gcd, sec, csc, cot, diff, simplify, Heaviside, arg,
conjugate, series, FiniteSet, asec, acsc)
from sympy.utilities.pytest import XFAIL, slow, raises
from sympy.core.compatibility import xrange
x, y, z = symbols('x y z')
r = Symbol('r', real=True)
k = Symbol('k', integer=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
a = Symbol('a', algebraic=True)
na = Symbol('na', nonzero=True, algebraic=True)
def test_sin():
x, y = symbols('x y')
assert sin.nargs == FiniteSet(1)
assert sin(nan) == nan
assert sin(oo*I) == oo*I
assert sin(-oo*I) == -oo*I
assert sin(oo).args[0] == oo
assert sin(0) == 0
assert sin(asin(x)) == x
assert sin(atan(x)) == x / sqrt(1 + x**2)
assert sin(acos(x)) == sqrt(1 - x**2)
assert sin(acot(x)) == 1 / (sqrt(1 + 1 / x**2) * x)
assert sin(atan2(y, x)) == y / sqrt(x**2 + y**2)
assert sin(pi*I) == sinh(pi)*I
assert sin(-pi*I) == -sinh(pi)*I
assert sin(-2*I) == -sinh(2)*I
assert sin(pi) == 0
assert sin(-pi) == 0
assert sin(2*pi) == 0
assert sin(-2*pi) == 0
assert sin(-3*10**73*pi) == 0
assert sin(7*10**103*pi) == 0
assert sin(pi/2) == 1
assert sin(-pi/2) == -1
assert sin(5*pi/2) == 1
assert sin(7*pi/2) == -1
ne = symbols('ne', integer=True, even=False)
e = symbols('e', even=True)
assert sin(pi*ne/2) == (-1)**(ne/2 - S.Half)
assert sin(pi*k/2).func == sin
assert sin(pi*e/2) == 0
assert sin(pi*k) == 0
assert sin(pi*k).subs(k, 3) == sin(pi*k/2).subs(k, 6) # issue 8298
assert sin(pi/3) == S.Half*sqrt(3)
assert sin(-2*pi/3) == -S.Half*sqrt(3)
assert sin(pi/4) == S.Half*sqrt(2)
assert sin(-pi/4) == -S.Half*sqrt(2)
assert sin(17*pi/4) == S.Half*sqrt(2)
assert sin(-3*pi/4) == -S.Half*sqrt(2)
assert sin(pi/6) == S.Half
assert sin(-pi/6) == -S.Half
assert sin(7*pi/6) == -S.Half
assert sin(-5*pi/6) == -S.Half
assert sin(1*pi/5) == sqrt((5 - sqrt(5)) / 8)
assert sin(2*pi/5) == sqrt((5 + sqrt(5)) / 8)
assert sin(3*pi/5) == sin(2*pi/5)
assert sin(4*pi/5) == sin(1*pi/5)
assert sin(6*pi/5) == -sin(1*pi/5)
assert sin(8*pi/5) == -sin(2*pi/5)
assert sin(-1273*pi/5) == -sin(2*pi/5)
assert sin(pi/8) == sqrt((2 - sqrt(2))/4)
assert sin(104*pi/105) == sin(pi/105)
assert sin(106*pi/105) == -sin(pi/105)
assert sin(-104*pi/105) == -sin(pi/105)
assert sin(-106*pi/105) == sin(pi/105)
assert sin(x*I) == sinh(x)*I
assert sin(k*pi) == 0
assert sin(17*k*pi) == 0
assert sin(k*pi*I) == sinh(k*pi)*I
assert sin(r).is_real is True
assert sin(0, evaluate=False).is_algebraic
assert sin(a).is_algebraic is None
assert sin(na).is_algebraic is False
q = Symbol('q', rational=True)
assert sin(pi*q).is_algebraic
assert isinstance(sin( re(x) - im(y)), sin) is True
assert isinstance(sin(-re(x) + im(y)), sin) is False
for d in list(range(1, 22)) + [60, 85]:
for n in xrange(0, d*2 + 1):
x = n*pi/d
e = abs( float(sin(x)) - sin(float(x)) )
assert e < 1e-12
def test_sin_cos():
for d in [1, 2, 3, 4, 5, 6, 10, 12]: # list is not exhaustive...
for n in xrange(-2*d, d*2):
x = n*pi/d
assert sin(x + pi/2) == cos(x), "fails for %d*pi/%d" % (n, d)
assert sin(x - pi/2) == -cos(x), "fails for %d*pi/%d" % (n, d)
assert sin(x) == cos(x - pi/2), "fails for %d*pi/%d" % (n, d)
assert -sin(x) == cos(x + pi/2), "fails for %d*pi/%d" % (n, d)
def test_sin_series():
assert sin(x).series(x, 0, 9) == \
x - x**3/6 + x**5/120 - x**7/5040 + O(x**9)
def test_sin_rewrite():
assert sin(x).rewrite(exp) == -I*(exp(I*x) - exp(-I*x))/2
assert sin(x).rewrite(tan) == 2*tan(x/2)/(1 + tan(x/2)**2)
assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2)
assert sin(sinh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sinh(3)).n()
assert sin(cosh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cosh(3)).n()
assert sin(tanh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tanh(3)).n()
assert sin(coth(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, coth(3)).n()
assert sin(sin(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sin(3)).n()
assert sin(cos(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cos(3)).n()
assert sin(tan(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tan(3)).n()
assert sin(cot(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cot(3)).n()
assert sin(log(x)).rewrite(Pow) == I*x**-I / 2 - I*x**I /2
assert sin(x).rewrite(csc) == 1/csc(x)
def test_sin_expansion():
# Note: these formulas are not unique. The ones here come from the
# Chebyshev formulas.
assert sin(x + y).expand(trig=True) == sin(x)*cos(y) + cos(x)*sin(y)
assert sin(x - y).expand(trig=True) == sin(x)*cos(y) - cos(x)*sin(y)
assert sin(y - x).expand(trig=True) == cos(x)*sin(y) - sin(x)*cos(y)
assert sin(2*x).expand(trig=True) == 2*sin(x)*cos(x)
assert sin(3*x).expand(trig=True) == -4*sin(x)**3 + 3*sin(x)
assert sin(4*x).expand(trig=True) == -8*sin(x)**3*cos(x) + 4*sin(x)*cos(x)
assert sin(2).expand(trig=True) == 2*sin(1)*cos(1)
assert sin(3).expand(trig=True) == -4*sin(1)**3 + 3*sin(1)
def test_trig_symmetry():
assert sin(-x) == -sin(x)
assert cos(-x) == cos(x)
assert tan(-x) == -tan(x)
assert cot(-x) == -cot(x)
assert sin(x + pi) == -sin(x)
assert sin(x + 2*pi) == sin(x)
assert sin(x + 3*pi) == -sin(x)
assert sin(x + 4*pi) == sin(x)
assert sin(x - 5*pi) == -sin(x)
assert cos(x + pi) == -cos(x)
assert cos(x + 2*pi) == cos(x)
assert cos(x + 3*pi) == -cos(x)
assert cos(x + 4*pi) == cos(x)
assert cos(x - 5*pi) == -cos(x)
assert tan(x + pi) == tan(x)
assert tan(x - 3*pi) == tan(x)
assert cot(x + pi) == cot(x)
assert cot(x - 3*pi) == cot(x)
assert sin(pi/2 - x) == cos(x)
assert sin(3*pi/2 - x) == -cos(x)
assert sin(5*pi/2 - x) == cos(x)
assert cos(pi/2 - x) == sin(x)
assert cos(3*pi/2 - x) == -sin(x)
assert cos(5*pi/2 - x) == sin(x)
assert tan(pi/2 - x) == cot(x)
assert tan(3*pi/2 - x) == cot(x)
assert tan(5*pi/2 - x) == cot(x)
assert cot(pi/2 - x) == tan(x)
assert cot(3*pi/2 - x) == tan(x)
assert cot(5*pi/2 - x) == tan(x)
assert sin(pi/2 + x) == cos(x)
assert cos(pi/2 + x) == -sin(x)
assert tan(pi/2 + x) == -cot(x)
assert cot(pi/2 + x) == -tan(x)
def test_cos():
x, y = symbols('x y')
assert cos.nargs == FiniteSet(1)
assert cos(nan) == nan
assert cos(oo*I) == oo
assert cos(-oo*I) == oo
assert cos(0) == 1
assert cos(acos(x)) == x
assert cos(atan(x)) == 1 / sqrt(1 + x**2)
assert cos(asin(x)) == sqrt(1 - x**2)
assert cos(acot(x)) == 1 / sqrt(1 + 1 / x**2)
assert cos(atan2(y, x)) == x / sqrt(x**2 + y**2)
assert cos(pi*I) == cosh(pi)
assert cos(-pi*I) == cosh(pi)
assert cos(-2*I) == cosh(2)
assert cos(pi/2) == 0
assert cos(-pi/2) == 0
assert cos(pi/2) == 0
assert cos(-pi/2) == 0
assert cos((-3*10**73 + 1)*pi/2) == 0
assert cos((7*10**103 + 1)*pi/2) == 0
n = symbols('n', integer=True, even=False)
e = symbols('e', even=True)
assert cos(pi*n/2) == 0
assert cos(pi*e/2) == (-1)**(e/2)
assert cos(pi) == -1
assert cos(-pi) == -1
assert cos(2*pi) == 1
assert cos(5*pi) == -1
assert cos(8*pi) == 1
assert cos(pi/3) == S.Half
assert cos(-2*pi/3) == -S.Half
assert cos(pi/4) == S.Half*sqrt(2)
assert cos(-pi/4) == S.Half*sqrt(2)
assert cos(11*pi/4) == -S.Half*sqrt(2)
assert cos(-3*pi/4) == -S.Half*sqrt(2)
assert cos(pi/6) == S.Half*sqrt(3)
assert cos(-pi/6) == S.Half*sqrt(3)
assert cos(7*pi/6) == -S.Half*sqrt(3)
assert cos(-5*pi/6) == -S.Half*sqrt(3)
assert cos(1*pi/5) == (sqrt(5) + 1)/4
assert cos(2*pi/5) == (sqrt(5) - 1)/4
assert cos(3*pi/5) == -cos(2*pi/5)
assert cos(4*pi/5) == -cos(1*pi/5)
assert cos(6*pi/5) == -cos(1*pi/5)
assert cos(8*pi/5) == cos(2*pi/5)
assert cos(-1273*pi/5) == -cos(2*pi/5)
assert cos(pi/8) == sqrt((2 + sqrt(2))/4)
assert cos(104*pi/105) == -cos(pi/105)
assert cos(106*pi/105) == -cos(pi/105)
assert cos(-104*pi/105) == -cos(pi/105)
assert cos(-106*pi/105) == -cos(pi/105)
assert cos(x*I) == cosh(x)
assert cos(k*pi*I) == cosh(k*pi)
assert cos(r).is_real is True
assert cos(0, evaluate=False).is_algebraic
assert cos(a).is_algebraic is None
assert cos(na).is_algebraic is False
q = Symbol('q', rational=True)
assert cos(pi*q).is_algebraic
assert cos(2*pi/7).is_algebraic
assert cos(k*pi) == (-1)**k
assert cos(2*k*pi) == 1
for d in list(range(1, 22)) + [60, 85]:
for n in xrange(0, 2*d + 1):
x = n*pi/d
e = abs( float(cos(x)) - cos(float(x)) )
assert e < 1e-12
def test_issue_6190():
c = Float('123456789012345678901234567890.25', '')
for cls in [sin, cos, tan, cot]:
assert cls(c*pi) == cls(pi/4)
assert cls(4.125*pi) == cls(pi/8)
assert cls(4.7*pi) == cls((4.7 % 2)*pi)
def test_cos_series():
assert cos(x).series(x, 0, 9) == \
1 - x**2/2 + x**4/24 - x**6/720 + x**8/40320 + O(x**9)
def test_cos_rewrite():
assert cos(x).rewrite(exp) == exp(I*x)/2 + exp(-I*x)/2
assert cos(x).rewrite(tan) == (1 - tan(x/2)**2)/(1 + tan(x/2)**2)
assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2)
assert cos(sinh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sinh(3)).n()
assert cos(cosh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cosh(3)).n()
assert cos(tanh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tanh(3)).n()
assert cos(coth(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, coth(3)).n()
assert cos(sin(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sin(3)).n()
assert cos(cos(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cos(3)).n()
assert cos(tan(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tan(3)).n()
assert cos(cot(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cot(3)).n()
assert cos(log(x)).rewrite(Pow) == x**I/2 + x**-I/2
assert cos(x).rewrite(sec) == 1/sec(x)
def test_cos_expansion():
assert cos(x + y).expand(trig=True) == cos(x)*cos(y) - sin(x)*sin(y)
assert cos(x - y).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y)
assert cos(y - x).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y)
assert cos(2*x).expand(trig=True) == 2*cos(x)**2 - 1
assert cos(3*x).expand(trig=True) == 4*cos(x)**3 - 3*cos(x)
assert cos(4*x).expand(trig=True) == 8*cos(x)**4 - 8*cos(x)**2 + 1
assert cos(2).expand(trig=True) == 2*cos(1)**2 - 1
assert cos(3).expand(trig=True) == 4*cos(1)**3 - 3*cos(1)
def test_tan():
assert tan(nan) == nan
assert tan.nargs == FiniteSet(1)
assert tan(oo*I) == I
assert tan(-oo*I) == -I
assert tan(0) == 0
assert tan(atan(x)) == x
assert tan(asin(x)) == x / sqrt(1 - x**2)
assert tan(acos(x)) == sqrt(1 - x**2) / x
assert tan(acot(x)) == 1 / x
assert tan(atan2(y, x)) == y/x
assert tan(pi*I) == tanh(pi)*I
assert tan(-pi*I) == -tanh(pi)*I
assert tan(-2*I) == -tanh(2)*I
assert tan(pi) == 0
assert tan(-pi) == 0
assert tan(2*pi) == 0
assert tan(-2*pi) == 0
assert tan(-3*10**73*pi) == 0
assert tan(pi/2) == zoo
assert tan(3*pi/2) == zoo
assert tan(pi/3) == sqrt(3)
assert tan(-2*pi/3) == sqrt(3)
assert tan(pi/4) == S.One
assert tan(-pi/4) == -S.One
assert tan(17*pi/4) == S.One
assert tan(-3*pi/4) == S.One
assert tan(pi/6) == 1/sqrt(3)
assert tan(-pi/6) == -1/sqrt(3)
assert tan(7*pi/6) == 1/sqrt(3)
assert tan(-5*pi/6) == 1/sqrt(3)
assert tan(x*I) == tanh(x)*I
assert tan(k*pi) == 0
assert tan(17*k*pi) == 0
assert tan(k*pi*I) == tanh(k*pi)*I
assert tan(r).is_real is True
assert tan(0, evaluate=False).is_algebraic
assert tan(a).is_algebraic is None
assert tan(na).is_algebraic is False
assert tan(10*pi/7) == tan(3*pi/7)
assert tan(11*pi/7) == -tan(3*pi/7)
assert tan(-11*pi/7) == tan(3*pi/7)
def test_tan_series():
assert tan(x).series(x, 0, 9) == \
x + x**3/3 + 2*x**5/15 + 17*x**7/315 + O(x**9)
def test_tan_rewrite():
neg_exp, pos_exp = exp(-x*I), exp(x*I)
assert tan(x).rewrite(exp) == I*(neg_exp - pos_exp)/(neg_exp + pos_exp)
assert tan(x).rewrite(sin) == 2*sin(x)**2/sin(2*x)
assert tan(x).rewrite(cos) == -cos(x + S.Pi/2)/cos(x)
assert tan(x).rewrite(cot) == 1/cot(x)
assert tan(sinh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sinh(3)).n()
assert tan(cosh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cosh(3)).n()
assert tan(tanh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tanh(3)).n()
assert tan(coth(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, coth(3)).n()
assert tan(sin(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sin(3)).n()
assert tan(cos(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cos(3)).n()
assert tan(tan(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tan(3)).n()
assert tan(cot(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cot(3)).n()
assert tan(log(x)).rewrite(Pow) == I*(x**-I - x**I)/(x**-I + x**I)
assert 0 == (cos(pi/15)*tan(pi/15) - sin(pi/15)).rewrite(pow)
assert tan(pi/19).rewrite(pow) == tan(pi/19)
assert tan(8*pi/19).rewrite(sqrt) == tan(8*pi/19)
def test_tan_subs():
assert tan(x).subs(tan(x), y) == y
assert tan(x).subs(x, y) == tan(y)
assert tan(x).subs(x, S.Pi/2) == zoo
assert tan(x).subs(x, 3*S.Pi/2) == zoo
def test_tan_expansion():
assert tan(x + y).expand(trig=True) == ((tan(x) + tan(y))/(1 - tan(x)*tan(y))).expand()
assert tan(x - y).expand(trig=True) == ((tan(x) - tan(y))/(1 + tan(x)*tan(y))).expand()
assert tan(x + y + z).expand(trig=True) == (
(tan(x) + tan(y) + tan(z) - tan(x)*tan(y)*tan(z))/
(1 - tan(x)*tan(y) - tan(x)*tan(z) - tan(y)*tan(z))).expand()
assert 0 == tan(2*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 7))])*24 - 7
assert 0 == tan(3*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*55 - 37
assert 0 == tan(4*x - pi/4).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*239 - 1
def test_cot():
assert cot(nan) == nan
assert cot.nargs == FiniteSet(1)
assert cot(oo*I) == -I
assert cot(-oo*I) == I
assert cot(0) == zoo
assert cot(2*pi) == zoo
assert cot(acot(x)) == x
assert cot(atan(x)) == 1 / x
assert cot(asin(x)) == sqrt(1 - x**2) / x
assert cot(acos(x)) == x / sqrt(1 - x**2)
assert cot(atan2(y, x)) == x/y
assert cot(pi*I) == -coth(pi)*I
assert cot(-pi*I) == coth(pi)*I
assert cot(-2*I) == coth(2)*I
assert cot(pi) == cot(2*pi) == cot(3*pi)
assert cot(-pi) == cot(-2*pi) == cot(-3*pi)
assert cot(pi/2) == 0
assert cot(-pi/2) == 0
assert cot(5*pi/2) == 0
assert cot(7*pi/2) == 0
assert cot(pi/3) == 1/sqrt(3)
assert cot(-2*pi/3) == 1/sqrt(3)
assert cot(pi/4) == S.One
assert cot(-pi/4) == -S.One
assert cot(17*pi/4) == S.One
assert cot(-3*pi/4) == S.One
assert cot(pi/6) == sqrt(3)
assert cot(-pi/6) == -sqrt(3)
assert cot(7*pi/6) == sqrt(3)
assert cot(-5*pi/6) == sqrt(3)
assert cot(x*I) == -coth(x)*I
assert cot(k*pi*I) == -coth(k*pi)*I
assert cot(r).is_real is True
assert cot(a).is_algebraic is None
assert cot(na).is_algebraic is False
assert cot(10*pi/7) == cot(3*pi/7)
assert cot(11*pi/7) == -cot(3*pi/7)
assert cot(-11*pi/7) == cot(3*pi/7)
assert cot(x).is_finite is None
assert cot(r).is_finite is None
i = Symbol('i', imaginary=True)
assert cot(i).is_finite is True
assert cot(x).subs(x, 3*pi) == zoo
def test_cot_series():
assert cot(x).series(x, 0, 9) == \
1/x - x/3 - x**3/45 - 2*x**5/945 - x**7/4725 + O(x**9)
# issue 6210
assert cot(x**4 + x**5).series(x, 0, 1) == \
x**(-4) - 1/x**3 + x**(-2) - 1/x + 1 + O(x)
def test_cot_rewrite():
neg_exp, pos_exp = exp(-x*I), exp(x*I)
assert cot(x).rewrite(exp) == I*(pos_exp + neg_exp)/(pos_exp - neg_exp)
assert cot(x).rewrite(sin) == 2*sin(2*x)/sin(x)**2
assert cot(x).rewrite(cos) == -cos(x)/cos(x + S.Pi/2)
assert cot(x).rewrite(tan) == 1/tan(x)
assert cot(sinh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sinh(3)).n()
assert cot(cosh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, cosh(3)).n()
assert cot(tanh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tanh(3)).n()
assert cot(coth(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, coth(3)).n()
assert cot(sin(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sin(3)).n()
assert cot(tan(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tan(3)).n()
assert cot(log(x)).rewrite(Pow) == -I*(x**-I + x**I)/(x**-I - x**I)
assert cot(4*pi/15).rewrite(pow) == (cos(4*pi/15)/sin(4*pi/15)).rewrite(pow)
assert cot(pi/19).rewrite(pow) == cot(pi/19)
assert cot(pi/19).rewrite(sqrt) == cot(pi/19)
def test_cot_subs():
assert cot(x).subs(cot(x), y) == y
assert cot(x).subs(x, y) == cot(y)
assert cot(x).subs(x, 0) == zoo
assert cot(x).subs(x, S.Pi) == zoo
def test_cot_expansion():
assert cot(x + y).expand(trig=True) == ((cot(x)*cot(y) - 1)/(cot(x) + cot(y))).expand()
assert cot(x - y).expand(trig=True) == (-(cot(x)*cot(y) + 1)/(cot(x) - cot(y))).expand()
assert cot(x + y + z).expand(trig=True) == (
(cot(x)*cot(y)*cot(z) - cot(x) - cot(y) - cot(z))/
(-1 + cot(x)*cot(y) + cot(x)*cot(z) + cot(y)*cot(z))).expand()
assert cot(3*x).expand(trig=True) == ((cot(x)**3 - 3*cot(x))/(3*cot(x)**2 - 1)).expand()
assert 0 == cot(2*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 3))])*3 + 4
assert 0 == cot(3*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 5))])*55 - 37
assert 0 == cot(4*x - pi/4).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 7))])*863 + 191
def test_asin():
assert asin(nan) == nan
assert asin.nargs == FiniteSet(1)
assert asin(oo) == -I*oo
assert asin(-oo) == I*oo
# Note: asin(-x) = - asin(x)
assert asin(0) == 0
assert asin(1) == pi/2
assert asin(-1) == -pi/2
assert asin(sqrt(3)/2) == pi/3
assert asin(-sqrt(3)/2) == -pi/3
assert asin(sqrt(2)/2) == pi/4
assert asin(-sqrt(2)/2) == -pi/4
assert asin(sqrt((5 - sqrt(5))/8)) == pi/5
assert asin(-sqrt((5 - sqrt(5))/8)) == -pi/5
assert asin(Rational(1, 2)) == pi/6
assert asin(-Rational(1, 2)) == -pi/6
assert asin((sqrt(2 - sqrt(2)))/2) == pi/8
assert asin(-(sqrt(2 - sqrt(2)))/2) == -pi/8
assert asin((sqrt(5) - 1)/4) == pi/10
assert asin(-(sqrt(5) - 1)/4) == -pi/10
assert asin((sqrt(3) - 1)/sqrt(2**3)) == pi/12
assert asin(-(sqrt(3) - 1)/sqrt(2**3)) == -pi/12
assert asin(x).diff(x) == 1/sqrt(1 - x**2)
assert asin(0.2).is_real is True
assert asin(-2).is_real is False
assert asin(r).is_real is None
assert asin(-2*I) == -I*asinh(2)
assert asin(Rational(1, 7), evaluate=False).is_positive is True
assert asin(Rational(-1, 7), evaluate=False).is_positive is False
assert asin(p).is_positive is None
def test_asin_series():
assert asin(x).series(x, 0, 9) == \
x + x**3/6 + 3*x**5/40 + 5*x**7/112 + O(x**9)
t5 = asin(x).taylor_term(5, x)
assert t5 == 3*x**5/40
assert asin(x).taylor_term(7, x, t5, 0) == 5*x**7/112
def test_asin_rewrite():
assert asin(x).rewrite(log) == -I*log(I*x + sqrt(1 - x**2))
assert asin(x).rewrite(atan) == 2*atan(x/(1 + sqrt(1 - x**2)))
assert asin(x).rewrite(acos) == S.Pi/2 - acos(x)
assert asin(x).rewrite(acot) == 2*acot((sqrt(-x**2 + 1) + 1)/x)
assert asin(x).rewrite(asec) == -asec(1/x) + pi/2
assert asin(x).rewrite(acsc) == acsc(1/x)
def test_acos():
assert acos(nan) == nan
assert acos.nargs == FiniteSet(1)
assert acos(oo) == I*oo
assert acos(-oo) == -I*oo
# Note: acos(-x) = pi - acos(x)
assert acos(0) == pi/2
assert acos(Rational(1, 2)) == pi/3
assert acos(-Rational(1, 2)) == (2*pi)/3
assert acos(1) == 0
assert acos(-1) == pi
assert acos(sqrt(2)/2) == pi/4
assert acos(-sqrt(2)/2) == (3*pi)/4
assert acos(x).diff(x) == -1/sqrt(1 - x**2)
assert acos(0.2).is_real is True
assert acos(-2).is_real is False
assert acos(r).is_real is None
assert acos(Rational(1, 7), evaluate=False).is_positive is True
assert acos(Rational(-1, 7), evaluate=False).is_positive is True
assert acos(Rational(3, 2), evaluate=False).is_positive is False
assert acos(p).is_positive is None
assert acos(2 + p).conjugate() != acos(10 + p)
assert acos(-3 + n).conjugate() != acos(-3 + n)
assert acos(S.One/3).conjugate() == acos(S.One/3)
assert acos(-S.One/3).conjugate() == acos(-S.One/3)
assert acos(p + n*I).conjugate() == acos(p - n*I)
assert acos(z).conjugate() != acos(conjugate(z))
def test_acos_series():
assert acos(x).series(x, 0, 8) == \
pi/2 - x - x**3/6 - 3*x**5/40 - 5*x**7/112 + O(x**8)
assert acos(x).series(x, 0, 8) == pi/2 - asin(x).series(x, 0, 8)
t5 = acos(x).taylor_term(5, x)
assert t5 == -3*x**5/40
assert acos(x).taylor_term(7, x, t5, 0) == -5*x**7/112
def test_acos_rewrite():
assert acos(x).rewrite(log) == pi/2 + I*log(I*x + sqrt(1 - x**2))
assert acos(x).rewrite(atan) == \
atan(sqrt(1 - x**2)/x) + (pi/2)*(1 - x*sqrt(1/x**2))
assert acos(0).rewrite(atan) == S.Pi/2
assert acos(0.5).rewrite(atan) == acos(0.5).rewrite(log)
assert acos(x).rewrite(asin) == S.Pi/2 - asin(x)
assert acos(x).rewrite(acot) == -2*acot((sqrt(-x**2 + 1) + 1)/x) + pi/2
assert acos(x).rewrite(asec) == asec(1/x)
assert acos(x).rewrite(acsc) == -acsc(1/x) + pi/2
def test_atan():
assert atan(nan) == nan
assert atan.nargs == FiniteSet(1)
assert atan(oo) == pi/2
assert atan(-oo) == -pi/2
assert atan(0) == 0
assert atan(1) == pi/4
assert atan(sqrt(3)) == pi/3
assert atan(oo) == pi/2
assert atan(x).diff(x) == 1/(1 + x**2)
assert atan(r).is_real is True
assert atan(-2*I) == -I*atanh(2)
assert atan(p).is_positive is True
assert atan(n).is_positive is False
assert atan(x).is_positive is None
def test_atan_rewrite():
assert atan(x).rewrite(log) == I*log((1 - I*x)/(1 + I*x))/2
assert atan(x).rewrite(asin) == (-asin(1/sqrt(x**2 + 1)) + pi/2)*sqrt(x**2)/x
assert atan(x).rewrite(acos) == sqrt(x**2)*acos(1/sqrt(x**2 + 1))/x
assert atan(x).rewrite(acot) == acot(1/x)
assert atan(x).rewrite(asec) == sqrt(x**2)*asec(sqrt(x**2 + 1))/x
assert atan(x).rewrite(acsc) == (-acsc(sqrt(x**2 + 1)) + pi/2)*sqrt(x**2)/x
def test_atan2():
assert atan2.nargs == FiniteSet(2)
assert atan2(0, 0) == S.NaN
assert atan2(0, 1) == 0
assert atan2(1, 1) == pi/4
assert atan2(1, 0) == pi/2
assert atan2(1, -1) == 3*pi/4
assert atan2(0, -1) == pi
assert atan2(-1, -1) == -3*pi/4
assert atan2(-1, 0) == -pi/2
assert atan2(-1, 1) == -pi/4
i = symbols('i', imaginary=True)
r = symbols('r', real=True)
eq = atan2(r, i)
ans = -I*log((i + I*r)/sqrt(i**2 + r**2))
reps = ((r, 2), (i, I))
assert eq.subs(reps) == ans.subs(reps)
u = Symbol("u", positive=True)
assert atan2(0, u) == 0
u = Symbol("u", negative=True)
assert atan2(0, u) == pi
assert atan2(y, oo) == 0
assert atan2(y, -oo)== 2*pi*Heaviside(re(y)) - pi
assert atan2(y, x).rewrite(log) == -I*log((x + I*y)/sqrt(x**2 + y**2))
assert atan2(y, x).rewrite(atan) == 2*atan(y/(x + sqrt(x**2 + y**2)))
ex = atan2(y, x) - arg(x + I*y)
assert ex.subs({x:2, y:3}).rewrite(arg) == 0
assert ex.subs({x:2, y:3*I}).rewrite(arg) == -pi - I*log(sqrt(5)*I/5)
assert ex.subs({x:2*I, y:3}).rewrite(arg) == -pi/2 - I*log(sqrt(5)*I)
assert ex.subs({x:2*I, y:3*I}).rewrite(arg) == -pi + atan(2/S(3)) + atan(3/S(2))
i = symbols('i', imaginary=True)
r = symbols('r', real=True)
e = atan2(i, r)
rewrite = e.rewrite(arg)
reps = {i: I, r: -2}
assert rewrite == -I*log(abs(I*i + r)/sqrt(abs(i**2 + r**2))) + arg((I*i + r)/sqrt(i**2 + r**2))
assert (e - rewrite).subs(reps).equals(0)
assert conjugate(atan2(x, y)) == atan2(conjugate(x), conjugate(y))
assert diff(atan2(y, x), x) == -y/(x**2 + y**2)
assert diff(atan2(y, x), y) == x/(x**2 + y**2)
assert simplify(diff(atan2(y, x).rewrite(log), x)) == -y/(x**2 + y**2)
assert simplify(diff(atan2(y, x).rewrite(log), y)) == x/(x**2 + y**2)
def test_acot():
assert acot(nan) == nan
assert acot.nargs == FiniteSet(1)
assert acot(-oo) == 0
assert acot(oo) == 0
assert acot(1) == pi/4
assert acot(0) == pi/2
assert acot(sqrt(3)/3) == pi/3
assert acot(1/sqrt(3)) == pi/3
assert acot(-1/sqrt(3)) == -pi/3
assert acot(x).diff(x) == -1/(1 + x**2)
assert acot(r).is_real is True
assert acot(I*pi) == -I*acoth(pi)
assert acot(-2*I) == I*acoth(2)
assert acot(x).is_positive is None
assert acot(r).is_positive is True
assert acot(p).is_positive is True
assert acot(I).is_positive is False
def test_acot_rewrite():
assert acot(x).rewrite(log) == I*log((x - I)/(x + I))/2
assert acot(x).rewrite(asin) == x*(-asin(sqrt(-x**2)/sqrt(-x**2 - 1)) + pi/2)*sqrt(x**(-2))
assert acot(x).rewrite(acos) == x*sqrt(x**(-2))*acos(sqrt(-x**2)/sqrt(-x**2 - 1))
assert acot(x).rewrite(atan) == atan(1/x)
assert acot(x).rewrite(asec) == x*sqrt(x**(-2))*asec(sqrt((x**2 + 1)/x**2))
assert acot(x).rewrite(acsc) == x*(-acsc(sqrt((x**2 + 1)/x**2)) + pi/2)*sqrt(x**(-2))
def test_attributes():
assert sin(x).args == (x,)
def test_sincos_rewrite():
assert sin(pi/2 - x) == cos(x)
assert sin(pi - x) == sin(x)
assert cos(pi/2 - x) == sin(x)
assert cos(pi - x) == -cos(x)
def _check_even_rewrite(func, arg):
"""Checks that the expr has been rewritten using f(-x) -> f(x)
arg : -x
"""
return func(arg).args[0] == -arg
def _check_odd_rewrite(func, arg):
"""Checks that the expr has been rewritten using f(-x) -> -f(x)
arg : -x
"""
return func(arg).func.is_Mul
def _check_no_rewrite(func, arg):
"""Checks that the expr is not rewritten"""
return func(arg).args[0] == arg
def test_evenodd_rewrite():
a = cos(2) # negative
b = sin(1) # positive
even = [cos]
odd = [sin, tan, cot, asin, atan, acot]
with_minus = [-1, -2**1024 * E, -pi/105, -x*y, -x - y]
for func in even:
for expr in with_minus:
assert _check_even_rewrite(func, expr)
assert _check_no_rewrite(func, a*b)
assert func(
x - y) == func(y - x) # it doesn't matter which form is canonical
for func in odd:
for expr in with_minus:
assert _check_odd_rewrite(func, expr)
assert _check_no_rewrite(func, a*b)
assert func(
x - y) == -func(y - x) # it doesn't matter which form is canonical
def test_issue_4547():
assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2)
assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2)
assert tan(x).rewrite(cot) == 1/cot(x)
assert cot(x).fdiff() == -1 - cot(x)**2
def test_as_leading_term_issue_5272():
assert sin(x).as_leading_term(x) == x
assert cos(x).as_leading_term(x) == 1
assert tan(x).as_leading_term(x) == x
assert cot(x).as_leading_term(x) == 1/x
assert asin(x).as_leading_term(x) == x
assert acos(x).as_leading_term(x) == x
assert atan(x).as_leading_term(x) == x
assert acot(x).as_leading_term(x) == x
def test_leading_terms():
for func in [sin, cos, tan, cot, asin, acos, atan, acot]:
for arg in (1/x, S.Half):
eq = func(arg)
assert eq.as_leading_term(x) == eq
def test_atan2_expansion():
assert cancel(atan2(x**2, x + 1).diff(x) - atan(x**2/(x + 1)).diff(x)) == 0
assert cancel(atan(y/x).series(y, 0, 5) - atan2(y, x).series(y, 0, 5)
+ atan2(0, x) - atan(0)) == O(y**5)
assert cancel(atan(y/x).series(x, 1, 4) - atan2(y, x).series(x, 1, 4)
+ atan2(y, 1) - atan(y)) == O((x - 1)**4, (x, 1))
assert cancel(atan((y + x)/x).series(x, 1, 3) - atan2(y + x, x).series(x, 1, 3)
+ atan2(1 + y, 1) - atan(1 + y)) == O((x - 1)**3, (x, 1))
assert Matrix([atan2(y, x)]).jacobian([y, x]) == \
Matrix([[x/(y**2 + x**2), -y/(y**2 + x**2)]])
def test_aseries():
def t(n, v, d, e):
assert abs(
n(1/v).evalf() - n(1/x).series(x, dir=d).removeO().subs(x, v)) < e
t(atan, 0.1, '+', 1e-5)
t(atan, -0.1, '-', 1e-5)
t(acot, 0.1, '+', 1e-5)
t(acot, -0.1, '-', 1e-5)
def test_issue_4420():
i = Symbol('i', integer=True)
e = Symbol('e', even=True)
o = Symbol('o', odd=True)
# unknown parity for variable
assert cos(4*i*pi) == 1
assert sin(4*i*pi) == 0
assert tan(4*i*pi) == 0
assert cot(4*i*pi) == zoo
assert cos(3*i*pi) == cos(pi*i) # +/-1
assert sin(3*i*pi) == 0
assert tan(3*i*pi) == 0
assert cot(3*i*pi) == zoo
assert cos(4.0*i*pi) == 1
assert sin(4.0*i*pi) == 0
assert tan(4.0*i*pi) == 0
assert cot(4.0*i*pi) == zoo
assert cos(3.0*i*pi) == cos(pi*i) # +/-1
assert sin(3.0*i*pi) == 0
assert tan(3.0*i*pi) == 0
assert cot(3.0*i*pi) == zoo
assert cos(4.5*i*pi) == cos(0.5*pi*i)
assert sin(4.5*i*pi) == sin(0.5*pi*i)
assert tan(4.5*i*pi) == tan(0.5*pi*i)
assert cot(4.5*i*pi) == cot(0.5*pi*i)
# parity of variable is known
assert cos(4*e*pi) == 1
assert sin(4*e*pi) == 0
assert tan(4*e*pi) == 0
assert cot(4*e*pi) == zoo
assert cos(3*e*pi) == 1
assert sin(3*e*pi) == 0
assert tan(3*e*pi) == 0
assert cot(3*e*pi) == zoo
assert cos(4.0*e*pi) == 1
assert sin(4.0*e*pi) == 0
assert tan(4.0*e*pi) == 0
assert cot(4.0*e*pi) == zoo
assert cos(3.0*e*pi) == 1
assert sin(3.0*e*pi) == 0
assert tan(3.0*e*pi) == 0
assert cot(3.0*e*pi) == zoo
assert cos(4.5*e*pi) == cos(0.5*pi*e)
assert sin(4.5*e*pi) == sin(0.5*pi*e)
assert tan(4.5*e*pi) == tan(0.5*pi*e)
assert cot(4.5*e*pi) == cot(0.5*pi*e)
assert cos(4*o*pi) == 1
assert sin(4*o*pi) == 0
assert tan(4*o*pi) == 0
assert cot(4*o*pi) == zoo
assert cos(3*o*pi) == -1
assert sin(3*o*pi) == 0
assert tan(3*o*pi) == 0
assert cot(3*o*pi) == zoo
assert cos(4.0*o*pi) == 1
assert sin(4.0*o*pi) == 0
assert tan(4.0*o*pi) == 0
assert cot(4.0*o*pi) == zoo
assert cos(3.0*o*pi) == -1
assert sin(3.0*o*pi) == 0
assert tan(3.0*o*pi) == 0
assert cot(3.0*o*pi) == zoo
assert cos(4.5*o*pi) == cos(0.5*pi*o)
assert sin(4.5*o*pi) == sin(0.5*pi*o)
assert tan(4.5*o*pi) == tan(0.5*pi*o)
assert cot(4.5*o*pi) == cot(0.5*pi*o)
# x could be imaginary
assert cos(4*x*pi) == cos(4*pi*x)
assert sin(4*x*pi) == sin(4*pi*x)
assert tan(4*x*pi) == tan(4*pi*x)
assert cot(4*x*pi) == cot(4*pi*x)
assert cos(3*x*pi) == cos(3*pi*x)
assert sin(3*x*pi) == sin(3*pi*x)
assert tan(3*x*pi) == tan(3*pi*x)
assert cot(3*x*pi) == cot(3*pi*x)
assert cos(4.0*x*pi) == cos(4.0*pi*x)
assert sin(4.0*x*pi) == sin(4.0*pi*x)
assert tan(4.0*x*pi) == tan(4.0*pi*x)
assert cot(4.0*x*pi) == cot(4.0*pi*x)
assert cos(3.0*x*pi) == cos(3.0*pi*x)
assert sin(3.0*x*pi) == sin(3.0*pi*x)
assert tan(3.0*x*pi) == tan(3.0*pi*x)
assert cot(3.0*x*pi) == cot(3.0*pi*x)
assert cos(4.5*x*pi) == cos(4.5*pi*x)
assert sin(4.5*x*pi) == sin(4.5*pi*x)
assert tan(4.5*x*pi) == tan(4.5*pi*x)
assert cot(4.5*x*pi) == cot(4.5*pi*x)
def test_inverses():
raises(AttributeError, lambda: sin(x).inverse())
raises(AttributeError, lambda: cos(x).inverse())
assert tan(x).inverse() == atan
assert cot(x).inverse() == acot
raises(AttributeError, lambda: csc(x).inverse())
raises(AttributeError, lambda: sec(x).inverse())
assert asin(x).inverse() == sin
assert acos(x).inverse() == cos
assert atan(x).inverse() == tan
assert acot(x).inverse() == cot
def test_real_imag():
a, b = symbols('a b', real=True)
z = a + b*I
for deep in [True, False]:
assert sin(
z).as_real_imag(deep=deep) == (sin(a)*cosh(b), cos(a)*sinh(b))
assert cos(
z).as_real_imag(deep=deep) == (cos(a)*cosh(b), -sin(a)*sinh(b))
assert tan(z).as_real_imag(deep=deep) == (sin(2*a)/(cos(2*a) +
cosh(2*b)), sinh(2*b)/(cos(2*a) + cosh(2*b)))
assert cot(z).as_real_imag(deep=deep) == (-sin(2*a)/(cos(2*a) -
cosh(2*b)), -sinh(2*b)/(cos(2*a) - cosh(2*b)))
assert sin(a).as_real_imag(deep=deep) == (sin(a), 0)
assert cos(a).as_real_imag(deep=deep) == (cos(a), 0)
assert tan(a).as_real_imag(deep=deep) == (tan(a), 0)
assert cot(a).as_real_imag(deep=deep) == (cot(a), 0)
@XFAIL
def test_sin_cos_with_infinity():
# Test for issue 5196
# https://github.com/sympy/sympy/issues/5196
assert sin(oo) == S.NaN
assert cos(oo) == S.NaN
@slow
def test_sincos_rewrite_sqrt():
# equivalent to testing rewrite(pow)
for p in [1, 3, 5, 17]:
for t in [1, 8]:
n = t*p
for i in xrange(1, (n + 1)//2 + 1):
if 1 == gcd(i, n):
x = i*pi/n
s1 = sin(x).rewrite(sqrt)
c1 = cos(x).rewrite(sqrt)
assert not s1.has(cos, sin), "fails for %d*pi/%d" % (i, n)
assert not c1.has(cos, sin), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs(sin(x.evalf(5)) - s1.evalf(2)), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs(cos(x.evalf(5)) - c1.evalf(2)), "fails for %d*pi/%d" % (i, n)
assert cos(pi/14).rewrite(sqrt) == sqrt(cos(pi/7)/2 + S.Half)
@slow
def test_tancot_rewrite_sqrt():
# equivalent to testing rewrite(pow)
for p in [1, 3, 5, 17]:
for t in [1, 8]:
n = t*p
for i in xrange(1, (n + 1)//2 + 1):
if 1 == gcd(i, n):
x = i*pi/n
if 2*i != n and 3*i != 2*n:
t1 = tan(x).rewrite(sqrt)
assert not t1.has(cot, tan), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs( tan(x.evalf(7)) - t1.evalf(4) ), "fails for %d*pi/%d" % (i, n)
if i != 0 and i != n:
c1 = cot(x).rewrite(sqrt)
assert not c1.has(cot, tan), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs( cot(x.evalf(7)) - c1.evalf(4) ), "fails for %d*pi/%d" % (i, n)
def test_sec():
x = symbols('x', real=True)
z = symbols('z')
assert sec.nargs == FiniteSet(1)
assert sec(0) == 1
assert sec(pi) == -1
assert sec(pi/2) == zoo
assert sec(-pi/2) == zoo
assert sec(pi/6) == 2*sqrt(3)/3
assert sec(pi/3) == 2
assert sec(5*pi/2) == zoo
assert sec(9*pi/7) == -sec(2*pi/7)
assert sec(I) == 1/cosh(1)
assert sec(x*I) == 1/cosh(x)
assert sec(-x) == sec(x)
assert sec(asec(x)) == x
assert sec(x).rewrite(exp) == 1/(exp(I*x)/2 + exp(-I*x)/2)
assert sec(x).rewrite(sin) == sec(x)
assert sec(x).rewrite(cos) == 1/cos(x)
assert sec(x).rewrite(tan) == (tan(x/2)**2 + 1)/(-tan(x/2)**2 + 1)
assert sec(x).rewrite(pow) == sec(x)
assert sec(x).rewrite(sqrt) == sec(x)
assert sec(z).rewrite(cot) == (cot(z/2)**2 + 1)/(cot(z/2)**2 - 1)
assert sec(z).conjugate() == sec(conjugate(z))
assert (sec(z).as_real_imag() ==
(cos(re(z))*cosh(im(z))/(sin(re(z))**2*sinh(im(z))**2 +
cos(re(z))**2*cosh(im(z))**2),
sin(re(z))*sinh(im(z))/(sin(re(z))**2*sinh(im(z))**2 +
cos(re(z))**2*cosh(im(z))**2)))
assert sec(x).expand(trig=True) == 1/cos(x)
assert sec(2*x).expand(trig=True) == 1/(2*cos(x)**2 - 1)
assert sec(x).is_real == True
assert sec(z).is_real == None
assert sec(a).is_algebraic is None
assert sec(na).is_algebraic is False
assert sec(x).as_leading_term() == sec(x)
assert sec(0).is_finite == True
assert sec(x).is_finite == None
assert sec(pi/2).is_finite == False
assert series(sec(x), x, x0=0, n=6) == 1 + x**2/2 + 5*x**4/24 + O(x**6)
# https://github.com/sympy/sympy/issues/7166
assert series(sqrt(sec(x))) == 1 + x**2/4 + 7*x**4/96 + O(x**6)
# https://github.com/sympy/sympy/issues/7167
assert (series(sqrt(sec(x)), x, x0=pi*3/2, n=4) ==
1/sqrt(x - 3*pi/2) + (x - 3*pi/2)**(S(3)/2)/12 +
(x - 3*pi/2)**(S(7)/2)/160 + O((x - 3*pi/2)**4, (x, 3*pi/2)))
assert sec(x).diff(x) == tan(x)*sec(x)
# Taylor Term checks
assert sec(z).taylor_term(4, z) == 5*z**4/24
assert sec(z).taylor_term(6, z) == 61*z**6/720
assert sec(z).taylor_term(5, z) == 0
def test_csc():
x = symbols('x', real=True)
z = symbols('z')
# https://github.com/sympy/sympy/issues/6707
cosecant = csc('x')
alternate = 1/sin('x')
assert cosecant.equals(alternate) == True
assert alternate.equals(cosecant) == True
assert csc.nargs == FiniteSet(1)
assert csc(0) == zoo
assert csc(pi) == zoo
assert csc(pi/2) == 1
assert csc(-pi/2) == -1
assert csc(pi/6) == 2
assert csc(pi/3) == 2*sqrt(3)/3
assert csc(5*pi/2) == 1
assert csc(9*pi/7) == -csc(2*pi/7)
assert csc(I) == -I/sinh(1)
assert csc(x*I) == -I/sinh(x)
assert csc(-x) == -csc(x)
assert csc(acsc(x)) == x
assert csc(x).rewrite(exp) == 2*I/(exp(I*x) - exp(-I*x))
assert csc(x).rewrite(sin) == 1/sin(x)
assert csc(x).rewrite(cos) == csc(x)
assert csc(x).rewrite(tan) == (tan(x/2)**2 + 1)/(2*tan(x/2))
assert csc(x).rewrite(cot) == (cot(x/2)**2 + 1)/(2*cot(x/2))
assert csc(z).conjugate() == csc(conjugate(z))
assert (csc(z).as_real_imag() ==
(sin(re(z))*cosh(im(z))/(sin(re(z))**2*cosh(im(z))**2 +
cos(re(z))**2*sinh(im(z))**2),
-cos(re(z))*sinh(im(z))/(sin(re(z))**2*cosh(im(z))**2 +
cos(re(z))**2*sinh(im(z))**2)))
assert csc(x).expand(trig=True) == 1/sin(x)
assert csc(2*x).expand(trig=True) == 1/(2*sin(x)*cos(x))
assert csc(x).is_real == True
assert csc(z).is_real == None
assert csc(a).is_algebraic is None
assert csc(na).is_algebraic is False
assert csc(x).as_leading_term() == csc(x)
assert csc(0).is_finite == False
assert csc(x).is_finite == None
assert csc(pi/2).is_finite == True
assert series(csc(x), x, x0=pi/2, n=6) == \
1 + (x - pi/2)**2/2 + 5*(x - pi/2)**4/24 + O((x - pi/2)**6, (x, pi/2))
assert series(csc(x), x, x0=0, n=6) == \
1/x + x/6 + 7*x**3/360 + 31*x**5/15120 + O(x**6)
assert csc(x).diff(x) == -cot(x)*csc(x)
assert csc(x).taylor_term(2, x) == 0
assert csc(x).taylor_term(3, x) == 7*x**3/360
assert csc(x).taylor_term(5, x) == 31*x**5/15120
def test_asec():
assert asec(nan) == nan
assert asec(1) == 0
assert asec(-1) == pi
assert asec(oo) == pi/2
assert asec(-oo) == pi/2
assert asec(zoo) == pi/2
assert asec(x).diff(x) == 1/(x**2*sqrt(1 - 1/x**2))
assert asec(x).as_leading_term(x) == log(x)
assert asec(x).rewrite(log) == I*log(sqrt(1 - 1/x**2) + I/x) + pi/2
assert asec(x).rewrite(asin) == -asin(1/x) + pi/2
assert asec(x).rewrite(acos) == acos(1/x)
assert asec(x).rewrite(atan) == (2*atan(x + sqrt(x**2 - 1)) - pi/2)*sqrt(x**2)/x
assert asec(x).rewrite(acot) == (2*acot(x - sqrt(x**2 - 1)) - pi/2)*sqrt(x**2)/x
assert asec(x).rewrite(acsc) == -acsc(x) + pi/2
def test_acsc():
assert acsc(nan) == nan
assert acsc(1) == pi/2
assert acsc(-1) == -pi/2
assert acsc(oo) == 0
assert acsc(-oo) == 0
assert acsc(zoo) == 0
assert acsc(x).diff(x) == -1/(x**2*sqrt(1 - 1/x**2))
assert acsc(x).as_leading_term(x) == log(x)
assert acsc(x).rewrite(log) == -I*log(sqrt(1 - 1/x**2) + I/x)
assert acsc(x).rewrite(asin) == asin(1/x)
assert acsc(x).rewrite(acos) == -acos(1/x) + pi/2
assert acsc(x).rewrite(atan) == (-atan(sqrt(x**2 - 1)) + pi/2)*sqrt(x**2)/x
assert acsc(x).rewrite(acot) == (-acot(1/sqrt(x**2 - 1)) + pi/2)*sqrt(x**2)/x
assert acsc(x).rewrite(asec) == -asec(x) + pi/2
@XFAIL
@slow
def test_csc_rewrite_failing():
# Move these 2 tests to test_csc() once bugs fixed
# sin(x).rewrite(pow) raises RuntimeError: maximum recursion depth
# https://github.com/sympy/sympy/issues/7171
assert csc(x).rewrite(pow) == csc(x)
assert csc(x).rewrite(sqrt) == csc(x)
| bsd-3-clause | -7,621,424,771,226,412,000 | 32.559363 | 105 | 0.529572 | false |
JT5D/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 7 | 1768 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
from sklearn.tree import DecisionTreeRegressor
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
import pylab as pl
pl.figure()
pl.scatter(y[:, 0], y[:, 1], c="k", label="data")
pl.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
pl.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
pl.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
pl.xlim([-6, 6])
pl.ylim([-6, 6])
pl.xlabel("data")
pl.ylabel("target")
pl.title("Multi-output Decision Tree Regression")
pl.legend()
pl.show()
| bsd-3-clause | 6,395,079,049,915,944,000 | 30.017544 | 76 | 0.630656 | false |
Neozaru/depot_tools | third_party/logilab/common/logging_ext.py | 20 | 6473 | # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extends the logging module from the standard library."""
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None):
"""get an apropriate handler according to given parameters"""
if os.environ.get('APYCOT_ROOT'):
handler = logging.StreamHandler(sys.stdout)
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(
logfile, **rotation_parameters)
except IOError:
handler = logging.StreamHandler()
return handler
def get_threshold(debug=False, logthreshold=None):
if logthreshold is None:
if debug:
logthreshold = logging.DEBUG
else:
logthreshold = logging.ERROR
elif isinstance(logthreshold, basestring):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
return logthreshold
def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT):
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if isatty and sys.platform != 'win32':
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
return fmt
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None,
rotation_parameters=None, handler=None):
"""init the log service"""
logger = logging.getLogger()
if handler is None:
handler = get_handler(debug, syslog, logfile, rotation_parameters)
# only addHandler and removeHandler method while I would like a setHandler
# method, so do it this way :$
logger.handlers = [handler]
logthreshold = get_threshold(debug, logthreshold)
logger.setLevel(logthreshold)
if fmt is None:
if debug:
fmt = get_formatter(logformat=logformat, logdateformat=logdateformat)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
| bsd-3-clause | -2,717,760,004,482,268,000 | 35.365169 | 83 | 0.621968 | false |
sasukeh/cinder | cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py | 20 | 5047 | # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# New table
consistencygroups = Table(
'consistencygroups', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('availability_zone', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('volume_type_id', String(length=255)),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
consistencygroups.create()
# New table
cgsnapshots = Table(
'cgsnapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True, nullable=False),
Column('consistencygroup_id', String(36),
ForeignKey('consistencygroups.id'),
nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
cgsnapshots.create()
# Add column to volumes table
volumes = Table('volumes', meta, autoload=True)
consistencygroup_id = Column('consistencygroup_id', String(36),
ForeignKey('consistencygroups.id'))
volumes.create_column(consistencygroup_id)
volumes.update().values(consistencygroup_id=None).execute()
# Add column to snapshots table
snapshots = Table('snapshots', meta, autoload=True)
cgsnapshot_id = Column('cgsnapshot_id', String(36),
ForeignKey('cgsnapshots.id'))
snapshots.create_column(cgsnapshot_id)
snapshots.update().values(cgsnapshot_id=None).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Drop column from snapshots table
if migrate_engine.name == 'mysql':
# MySQL cannot drop column cgsnapshot_id until the foreign key
# constraint is removed. So remove the foreign key first, and
# then drop the column.
table = Table('snapshots', meta, autoload=True)
ref_table = Table('snapshots', meta, autoload=True)
params = {'columns': [table.c['cgsnapshot_id']],
'refcolumns': [ref_table.c['id']],
'name': 'snapshots_ibfk_1'}
fkey = ForeignKeyConstraint(**params)
fkey.drop()
snapshots = Table('snapshots', meta, autoload=True)
cgsnapshot_id = snapshots.columns.cgsnapshot_id
snapshots.drop_column(cgsnapshot_id)
# Drop column from volumes table
if migrate_engine.name == 'mysql':
# MySQL cannot drop column consistencygroup_id until the foreign
# key constraint is removed. So remove the foreign key first,
# and then drop the column.
table = Table('volumes', meta, autoload=True)
ref_table = Table('volumes', meta, autoload=True)
params = {'columns': [table.c['consistencygroup_id']],
'refcolumns': [ref_table.c['id']],
'name': 'volumes_ibfk_1'}
fkey = ForeignKeyConstraint(**params)
fkey.drop()
volumes = Table('volumes', meta, autoload=True)
consistencygroup_id = volumes.columns.consistencygroup_id
volumes.drop_column(consistencygroup_id)
# Drop table
cgsnapshots = Table('cgsnapshots', meta, autoload=True)
cgsnapshots.drop()
# Drop table
consistencygroups = Table('consistencygroups', meta, autoload=True)
consistencygroups.drop()
| apache-2.0 | -3,883,090,861,318,809,600 | 37.526718 | 78 | 0.64573 | false |
JasonFruit/hymnal-tools | HtmlEmitter.py | 1 | 1657 | class HtmlEmitter(object):
def emit(self, s):
self.file.write(s)
def emit_line(self, s=""):
self.emit(s)
self.emit("\n")
def initialize(self, filename, title, author, date):
self.file = open(filename, "w")
self.emit_line("""<html>
<head>
<link rel="stylesheet" type="text/css" href="hymnal.css" />
<meta charset="utf-8" />
<title>%s, %s</title>
</head>
<body>""" % (title, author))
self.emit_line("""<h1 class="hymnal_title">%s</h1>""" % title)
self.emit_line("""<h2 class="hymnal_subtitle">%s (%s)</h2>""" % (author, date))
def emit_category(self, category):
self.emit_line("""<h3 class="category">%s</h3>""" % category)
def emit_header(self, num, meter, author):
self.emit_line("""<div class="hymn">""")
if author == "":
self.emit_line(
"""<span class="hymn_num">%s</span>. (<span class="meter">%s</span>)<br />""" %
(num, meter))
else:
self.emit_line(
"""<span class="hymn_num">%s</span>. (<span class="meter">%s</span>) <span class="author">%s</span><br />""" %
(num, meter, author))
def emit_footer(self):
self.emit_line("</div>")
def emit_stanza(self, stanza):
self.emit_line("""<div class="stanza">""")
self.emit("""<span class="stanza_num">%s</span>""" % stanza.num)
for line in stanza:
self.emit_line("%s<br />" % line)
self.emit_line("</div>")
def finalize(self):
self.emit_line("""</body>
</html>""")
self.file.close()
| mit | 3,116,201,792,270,743,000 | 33.520833 | 126 | 0.503923 | false |
genesi/u-boot-upstream | tools/patman/test.py | 7 | 7788 | #
# Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import tempfile
import unittest
import checkpatch
import gitutil
import patchstream
import series
class TestPatch(unittest.TestCase):
"""Test this program
TODO: Write tests for the rest of the functionality
"""
def testBasic(self):
"""Test basic filter operation"""
data='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <[email protected]>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
BUG=chromium-os:13875
TEST=build U-Boot for Seaboard, boot
Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413
Review URL: http://codereview.chromium.org/6900006
Signed-off-by: Simon Glass <[email protected]>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
expected='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <[email protected]>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
Signed-off-by: Simon Glass <[email protected]>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
out = ''
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
infd.write(data)
infd.close()
exphandle, expname = tempfile.mkstemp()
expfd = os.fdopen(exphandle, 'w')
expfd.write(expected)
expfd.close()
patchstream.FixPatch(None, inname, series.Series(), None)
rc = os.system('diff -u %s %s' % (inname, expname))
self.assertEqual(rc, 0)
os.remove(inname)
os.remove(expname)
def GetData(self, data_type):
data='''
From 4924887af52713cabea78420eff03badea8f0035 Mon Sep 17 00:00:00 2001
From: Simon Glass <[email protected]>
Date: Thu, 7 Apr 2011 10:14:41 -0700
Subject: [PATCH 1/4] Add microsecond boot time measurement
This defines the basics of a new boot time measurement feature. This allows
logging of very accurate time measurements as the boot proceeds, by using
an available microsecond counter.
%s
---
README | 11 ++++++++
common/bootstage.c | 50 ++++++++++++++++++++++++++++++++++++
include/bootstage.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
include/common.h | 8 ++++++
5 files changed, 141 insertions(+), 0 deletions(-)
create mode 100644 common/bootstage.c
create mode 100644 include/bootstage.h
diff --git a/README b/README
index 6f3748d..f9e4e65 100644
--- a/README
+++ b/README
@@ -2026,6 +2026,17 @@ The following options need to be configured:
example, some LED's) on your board. At the moment,
the following checkpoints are implemented:
+- Time boot progress
+ CONFIG_BOOTSTAGE
+
+ Define this option to enable microsecond boot stage timing
+ on supported platforms. For this to work your platform
+ needs to define a function timer_get_us() which returns the
+ number of microseconds since reset. This would normally
+ be done in your SOC or board timer.c file.
+
+ You can add calls to bootstage_mark() to set time markers.
+
- Standalone program support:
CONFIG_STANDALONE_LOAD_ADDR
diff --git a/common/bootstage.c b/common/bootstage.c
new file mode 100644
index 0000000..2234c87
--- /dev/null
+++ b/common/bootstage.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Google Inc. All rights reserved.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+/*
+ * This module records the progress of boot and arbitrary commands, and
+ * permits accurate timestamping of each. The records can optionally be
+ * passed to kernel in the ATAGs
+ */
+
+#include <common.h>
+
+
+struct bootstage_record {
+ uint32_t time_us;
+ const char *name;
+};
+
+static struct bootstage_record record[BOOTSTAGE_COUNT];
+
+uint32_t bootstage_mark(enum bootstage_id id, const char *name)
+{
+ struct bootstage_record *rec = &record[id];
+
+ /* Only record the first event for each */
+%sif (!rec->name) {
+ rec->time_us = (uint32_t)timer_get_us();
+ rec->name = name;
+ }
+%sreturn rec->time_us;
+}
--
1.7.3.1
'''
signoff = 'Signed-off-by: Simon Glass <[email protected]>\n'
tab = ' '
if data_type == 'good':
pass
elif data_type == 'no-signoff':
signoff = ''
elif data_type == 'spaces':
tab = ' '
else:
print 'not implemented'
return data % (signoff, tab, tab)
def SetupData(self, data_type):
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
data = self.GetData(data_type)
infd.write(data)
infd.close()
return inname
def testCheckpatch(self):
"""Test checkpatch operation"""
inf = self.SetupData('good')
result, problems, err, warn, lines, stdout = checkpatch.CheckPatch(inf)
self.assertEqual(result, True)
self.assertEqual(problems, [])
self.assertEqual(err, 0)
self.assertEqual(warn, 0)
self.assertEqual(lines, 67)
os.remove(inf)
inf = self.SetupData('no-signoff')
result, problems, err, warn, lines, stdout = checkpatch.CheckPatch(inf)
self.assertEqual(result, False)
self.assertEqual(len(problems), 1)
self.assertEqual(err, 1)
self.assertEqual(warn, 0)
self.assertEqual(lines, 67)
os.remove(inf)
inf = self.SetupData('spaces')
result, problems, err, warn, lines, stdout = checkpatch.CheckPatch(inf)
self.assertEqual(result, False)
self.assertEqual(len(problems), 2)
self.assertEqual(err, 0)
self.assertEqual(warn, 2)
self.assertEqual(lines, 67)
os.remove(inf)
if __name__ == "__main__":
unittest.main()
gitutil.RunTests()
| gpl-2.0 | 3,512,452,392,712,941,000 | 30.152 | 79 | 0.660375 | false |
pawelad/verse | tests/projects/test_views.py | 1 | 8264 | """
Test `projects.views` file
"""
from unittest.mock import MagicMock
from django.http import Http404
from django.utils.crypto import get_random_string
from rest_framework import viewsets, status
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
import pytest
from github3.repos import Repository
from checkers.base import BaseVersionChecker, GitHubVersionChecker
from projects import utils
from projects import views
available_projects = {'python': MagicMock(spec=BaseVersionChecker)}
class TestProjectsVersionsViewSet:
"""
Tests for 'views.ProjectsVersionsViewSet'
"""
client = APIClient()
base_name = 'projects'
@pytest.fixture
def instance(self):
return views.ProjectsVersionsViewSet()
def test_view_inheritance(self, instance):
"""Test view inheritance name"""
assert isinstance(instance, viewsets.ReadOnlyModelViewSet)
@pytest.mark.parametrize('suffix, expected', [
('List', 'Projects list'),
('Latest', 'Latest project version'),
('Major', 'Latest major versions'),
('Minor', 'Latest minor versions'),
])
def test_view_get_view_name_method(self, instance, suffix, expected):
"""Test view `get_view_name()` method"""
instance.suffix = suffix
assert instance.get_view_name() == expected
def test_view_get_object_method(self, mocker, instance):
"""Test view `get_object()` method"""
mocker.patch('projects.views.AVAILABLE_CHECKERS', available_projects)
instance.kwargs = {'project': 'python'}
assert isinstance(instance.get_object(), MagicMock)
# Nonexistent project
instance.kwargs = {'project': get_random_string()}
with pytest.raises(Http404):
instance.get_object()
def test_view_list_method(self, mocker):
"""Test view `list()` method"""
projects = {get_random_string(): get_random_string()}
mocked_get_projects = mocker.patch(
'projects.views.utils.get_projects',
)
mocked_get_or_set = mocker.patch(
'projects.views.cache.get_or_set',
return_value=projects
)
url = reverse('{0.base_name}:list'.format(self))
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.data == projects
mocked_get_or_set.assert_called_once_with(
key=utils.AVAILABLE_PROJECTS_KEY,
default=mocked_get_projects.return_value,
timeout=None,
)
def test_view_retrieve_method(self, mocker):
"""Test view `retrieve()` method"""
mocker.patch('projects.views.AVAILABLE_CHECKERS', available_projects)
project = available_projects['python']
latest_version = '0.1.1'
mocked_get_or_set = mocker.patch(
'projects.views.cache.get_or_set', return_value=latest_version,
)
mocked_key = mocker.patch(
'projects.tasks.utils.get_latest_version_key'
)
url = reverse('{0.base_name}:latest'.format(self), args=['python'])
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'latest': latest_version,
}
# Wrong project name
url = reverse(
'{0.base_name}:latest'.format(self), args=[get_random_string()],
)
response = self.client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
mocked_key.assert_called_once_with(project.slug)
mocked_get_or_set.assert_called_once_with(
key=mocked_key.return_value,
default=project.get_latest_version,
timeout=60 * 60,
)
def test_view_major_method(self, mocker):
"""Test view `major()` method"""
mocker.patch('projects.views.AVAILABLE_CHECKERS', available_projects)
project = available_projects['python']
latest_versions = {
'1': '1.2.3',
'0': '0.12.0',
}
mocked_get_or_set = mocker.patch(
'projects.views.cache.get_or_set',
return_value=latest_versions,
)
mocked_key = mocker.patch(
'projects.tasks.utils.get_latest_major_versions_key',
)
url = reverse('{0.base_name}:major'.format(self), args=['python'])
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.data == latest_versions
# Wrong project name
url = reverse(
'{0.base_name}:major'.format(self), args=[get_random_string()],
)
response = self.client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
mocked_key.assert_called_once_with(project.slug)
mocked_get_or_set.assert_called_once_with(
key=mocked_key.return_value,
default=project.get_latest_major_versions,
timeout=60 * 60 * 6,
)
def test_view_minor_method(self, mocker):
"""Test view `minor()` method"""
mocker.patch('projects.views.AVAILABLE_CHECKERS', available_projects)
project = available_projects['python']
latest_versions = {
'1.2': '1.2.3',
'1.1': '1.1.4',
'1.0': '1.0.2',
}
mocked_get_or_set = mocker.patch(
'projects.views.cache.get_or_set',
return_value=latest_versions,
)
mocked_key = mocker.patch(
'projects.tasks.utils.get_latest_minor_versions_key',
)
url = reverse('{0.base_name}:minor'.format(self), args=['python'])
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.data == latest_versions
# Wrong project name
url = reverse(
'{0.base_name}:minor'.format(self), args=[get_random_string()],
)
response = self.client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
mocked_key.assert_called_once_with(project.slug)
mocked_get_or_set.assert_called_once_with(
key=mocked_key.return_value,
default=project.get_latest_minor_versions,
timeout=60 * 60 * 6,
)
class TestGitHubProjectsVersionsViewSet:
"""
Tests for 'views.GitHubProjectsVersionsViewSet'
"""
client = APIClient()
base_name = 'github-projects'
@pytest.fixture
def instance(self):
return views.GitHubProjectsVersionsViewSet()
def test_view_inheritance(self, instance):
"""Test view inheritance name"""
assert isinstance(instance, viewsets.ReadOnlyModelViewSet)
@pytest.mark.parametrize('suffix, expected', [
('Latest', 'Latest GitHub repository version'),
('Major', 'Latest major versions'),
('Minor', 'Latest minor versions'),
])
def test_view_get_view_name_method(self, instance, suffix, expected):
"""Test view `get_view_name()` method"""
instance.suffix = suffix
assert instance.get_view_name() == expected
def test_view_get_object_method(self, mocker, instance):
"""Test view `get_object()` method"""
instance.kwargs = {
'owner': 'pawelad',
'repo': 'verse',
}
mocked_repo = MagicMock(autospec=Repository)
mocked_github_client = mocker.patch('projects.views.github_client')
mocked_github_client.repository.return_value = mocked_repo
checker = instance.get_object()
assert isinstance(checker, GitHubVersionChecker)
assert checker.slug == 'gh-pawelad-verse'
assert checker.homepage == 'https://github.com/pawelad/verse'
assert checker.repository == 'https://github.com/pawelad/verse'
# Nonexistent GitHub repository
mocked_github_client.repository.return_value = None
with pytest.raises(Http404):
instance.get_object()
def test_view_list_method(self, instance):
"""Test view `list()` method"""
with pytest.raises(Http404):
instance.list(request=MagicMock())
| apache-2.0 | -2,319,080,477,112,680,400 | 32.188755 | 77 | 0.611447 | false |
glaubitz/fs-uae-debian | arcade/fsgs/platforms/arcade/arcadeplatform.py | 2 | 1070 | from fsgs.platform import PlatformHandler
from fsgs.platforms.arcade.mamearcadedriver import MameArcadeDriver
from fsgs.platforms.loader import SimpleLoader
class ArcadePlatformHandler(PlatformHandler):
PLATFORM_NAME = "Arcade"
def __init__(self):
PlatformHandler.__init__(self)
def get_loader(self, fsgs):
return ArcadeLoader(fsgs)
def get_runner(self, fsgs):
return MameArcadeDriver(fsgs)
class ArcadeLoader(SimpleLoader):
def load_files(self, values):
# file_list = json.loads(values["file_list"])
# assert len(file_list) == 1
# self.config["cartridge"] = "sha1://{0}/{1}".format(
# file_list[0]["sha1"], file_list[0]["name"])
self.config["file_list"] = values["file_list"]
def load_extra(self, values):
if "refresh_rate" in values:
self.config["refresh_rate"] = values["refresh_rate"]
if "orientation" in values:
self.config["orientation"] = values["orientation"]
self.config["mame_rom_set"] = values["mame_rom_set"]
| gpl-2.0 | -7,945,367,685,215,314,000 | 31.424242 | 67 | 0.640187 | false |
philipwangdk/HPC | HPC_bitbucket/uwhpsc/lectures/lecture4/mysqrt.py | 2 | 1078 | """
Module for approximating sqrt.
More ...
"""
def sqrt2(x, debug=False):
"""
more details.
"""
from numpy import nan
if x==0.:
return 0.
elif x<0:
print "*** Error, x must be nonnegative"
return nan
assert x>0. and type(x) is float, "Unrecognized input"
s = 1.
kmax = 100
tol = 1.e-14
for k in range(kmax):
if debug:
print "Before iteration %s, s = %20.15f" % (k,s)
s0 = s
s = 0.5 * (s + x/s)
delta_s = s - s0
if abs(delta_s / x) < tol:
break
if debug:
print "After %s iterations, s = %20.15f" % (k+1,s)
return s
def test():
from numpy import sqrt
xvalues = [0., 2., 100., 10000., 1.e-4]
for x in xvalues:
print "Testing with x = %20.15e" % x
s = sqrt2(x)
s_numpy = sqrt(x)
print " s = %20.15e, numpy.sqrt = %20.15e" \
% (s, s_numpy)
assert abs(s - s_numpy) < 1e-14, \
"Disagree for x = %20.15e" % x
| mit | -55,332,910,589,394,170 | 22.977778 | 60 | 0.455473 | false |
redhat-cip/tempest | tempest/scenario/manager.py | 2 | 58062 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import netaddr
from oslo_log import log
import six
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.services.network import resources as net_resources
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.manager.flavors_client
cls.floating_ips_client = cls.manager.floating_ips_client
# Glance image client v1
cls.image_client = cls.manager.image_client
# Compute image client
cls.images_client = cls.manager.images_client
cls.keypairs_client = cls.manager.keypairs_client
# Nova security groups client
cls.security_groups_client = cls.manager.security_groups_client
cls.servers_client = cls.manager.servers_client
cls.volumes_client = cls.manager.volumes_client
cls.snapshots_client = cls.manager.snapshots_client
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
cls.network_client = cls.manager.network_client
# Heat client
cls.orchestration_client = cls.manager.orchestration_client
# ## Methods to handle sync and async deletes
def setUp(self):
super(ScenarioTest, self).setUp()
self.cleanup_waits = []
# NOTE(mtreinish) This is safe to do in setUp instead of setUp class
# because scenario tests in the same test class should not share
# resources. If resources were shared between test cases then it
# should be a single scenario test instead of multiples.
# NOTE(yfried): this list is cleaned at the end of test_methods and
# not at the end of the class
self.addCleanup(self._wait_for_cleanups)
def delete_wrapper(self, delete_thing, *args, **kwargs):
"""Ignores NotFound exceptions for delete operations.
@param delete_thing: delete method of a resource. method will be
executed as delete_thing(*args, **kwargs)
"""
try:
# Tempest clients return dicts, so there is no common delete
# method available. Using a callable instead
delete_thing(*args, **kwargs)
except lib_exc.NotFound:
# If the resource is already missing, mission accomplished.
pass
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
cleanup_callable, cleanup_args=None,
cleanup_kwargs=None, ignore_error=True):
"""Adds wait for async resource deletion at the end of cleanups
@param waiter_callable: callable to wait for the resource to delete
@param thing_id: the id of the resource to be cleaned-up
@param thing_id_param: the name of the id param in the waiter
@param cleanup_callable: method to load pass to self.addCleanup with
the following *cleanup_args, **cleanup_kwargs.
usually a delete method.
"""
if cleanup_args is None:
cleanup_args = []
if cleanup_kwargs is None:
cleanup_kwargs = {}
self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
wait_dict = {
'waiter_callable': waiter_callable,
thing_id_param: thing_id
}
self.cleanup_waits.append(wait_dict)
def _wait_for_cleanups(self):
"""To handle async delete actions, a list of waits is added
which will be iterated over as the last step of clearing the
cleanup queue. That way all the delete calls are made up front
and the tests won't succeed unless the deletes are eventually
successful. This is the same basic approach used in the api tests to
limit cleanup execution time except here it is multi-resource,
because of the nature of the scenario tests.
"""
for wait in self.cleanup_waits:
waiter_callable = wait.pop('waiter_callable')
waiter_callable(**wait)
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name)
self.addCleanup(client.delete_keypair, name)
return body
def create_server(self, name=None, image=None, flavor=None,
wait_on_boot=True, wait_on_delete=True,
create_kwargs=None):
"""Creates VM instance.
@param image: image from which to create the instance
@param wait_on_boot: wait for status ACTIVE before continue
@param wait_on_delete: force synchronous delete on cleanup
@param create_kwargs: additional details for instance creation
@return: server dict
"""
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
if image is None:
image = CONF.compute.image_ref
if flavor is None:
flavor = CONF.compute.flavor_ref
if create_kwargs is None:
create_kwargs = {}
network = self.get_tenant_network()
create_kwargs = fixed_network.set_networks_kwarg(network,
create_kwargs)
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = self.servers_client.create_server(name, image, flavor,
**create_kwargs)
if wait_on_delete:
self.addCleanup(self.servers_client.wait_for_server_termination,
server['id'])
self.addCleanup_with_wait(
waiter_callable=self.servers_client.wait_for_server_termination,
thing_id=server['id'], thing_id_param='server_id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.servers_client.delete_server, server['id']])
if wait_on_boot:
waiters.wait_for_server_status(self.servers_client,
server_id=server['id'],
status='ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = self.servers_client.show_server(server['id'])
self.assertEqual(server['name'], name)
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None, wait_on_delete=True):
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
volume = self.volumes_client.create_volume(
size=size, display_name=name, snapshot_id=snapshot_id,
imageRef=imageRef, volume_type=volume_type)
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(self.delete_wrapper,
self.volumes_client.delete_volume, volume['id'])
else:
self.addCleanup_with_wait(
waiter_callable=self.volumes_client.wait_for_resource_deletion,
thing_id=volume['id'], thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.volumes_client.delete_volume, volume['id']])
self.assertEqual(name, volume['display_name'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])
return volume
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.security_groups_client
if secgroup_id is None:
sgs = _client.list_security_groups()
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_proto': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_proto': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client.create_security_group_rule(secgroup_id,
**ruleset)
self.addCleanup(self.delete_wrapper,
_client.delete_security_group_rule,
sg_rule['id'])
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.security_groups_client.create_security_group(
sg_name, sg_desc)
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(self.delete_wrapper,
self.security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, server_or_ip, username=None, private_key=None,
log_console_of_servers=None):
"""Get a SSH client to a remote server
@param server_or_ip a server object as returned by Tempest compute
client or an IP address to connect to
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@param log_console_of_servers a list of server objects. Each server
in the list will have its console printed in the logs in case the
SSH connection failed to be established
@return a RemoteClient object
"""
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
try:
ip = (addr['addr'] for addr in addrs if
netaddr.valid_ipv4(addr['addr'])).next()
except StopIteration:
raise lib_exc.NotFound("No IPv4 addresses to use for SSH to "
"remote server.")
if username is None:
username = CONF.scenario.ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.compute.ssh_auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.compute.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip, 'error': e})
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
# If we don't explicitly set for which servers we want to
# log the console output then all the servers will be logged.
# See the definition of _log_console_output()
self._log_console_output(log_console_of_servers)
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
image_file = open(path, 'rb')
self.addCleanup(image_file.close)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
'is_public': 'False',
}
params['properties'] = properties
image = self.image_client.create_image(**params)
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
self.image_client.update_image(image['id'], data=image_file)
return image['id']
def glance_image_create(self):
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
"properties: %s, ami: %s, ari: %s, aki: %s" %
(img_path, img_container_format, img_disk_format,
img_properties, ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
img_container_format,
img_path,
disk_format=img_disk_format,
properties=img_properties)
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
self.image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s" % self.image)
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
console_output = self.servers_client.get_console_output(
server['id'], length=None).data
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
# Compute client
_images_client = self.images_client
if name is None:
name = data_utils.rand_name('scenario-snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
image = _images_client.create_image(server['id'], name)
image_id = image.response['location'].split('images/')[1]
_image_client.wait_for_image_status(image_id, 'active')
self.addCleanup_with_wait(
waiter_callable=_image_client.wait_for_resource_deletion,
thing_id=image_id, thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[_image_client.delete_image, image_id])
snapshot_image = _image_client.get_image_meta(image_id)
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image %s for server %s",
image_name, server['name'])
return snapshot_image
def nova_volume_attach(self):
volume = self.servers_client.attach_volume(
self.server['id'], self.volume['id'], '/dev/%s'
% CONF.compute.volume_device_name)
self.assertEqual(self.volume['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
# Refresh the volume after the attachment
self.volume = self.volumes_client.show_volume(volume['id'])
def nova_volume_detach(self):
self.servers_client.detach_volume(self.server['id'], self.volume['id'])
self.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
volume = self.volumes_client.show_volume(self.volume['id'])
self.assertEqual('available', volume['status'])
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild(server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None):
timeout = ping_timeout or CONF.compute.ping_timeout
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
return tempest.test.call_until_true(ping, timeout, 1)
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
LOG.debug('checking network connections to IP %s with user: %s' %
(ip_address, username))
try:
self.check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
raise
def create_floating_ip(self, thing, pool_name=None):
"""Creates a floating IP and associates to a server using
Nova clients
"""
floating_ip = self.floating_ips_client.create_floating_ip(pool_name)
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
with the neutron API.
This Class also enforces using Neutron instead of novanetwork.
Subclassed tests will be skipped if Neutron is not enabled
"""
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(NetworkScenarioTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
@classmethod
def resource_setup(cls):
super(NetworkScenarioTest, cls).resource_setup()
cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, client=None, tenant_id=None,
namestart='network-smoke-'):
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_network(name=name, tenant_id=tenant_id)
network = net_resources.DeletableNetwork(client=client,
**result['network'])
self.assertEqual(network.name, name)
self.addCleanup(self.delete_wrapper, network.delete)
return network
def _list_networks(self, *args, **kwargs):
"""List networks using admin creds """
networks_list = self.admin_manager.network_client.list_networks(
*args, **kwargs)
return networks_list['networks']
def _list_subnets(self, *args, **kwargs):
"""List subnets using admin creds """
subnets_list = self.admin_manager.network_client.list_subnets(
*args, **kwargs)
return subnets_list['subnets']
def _list_routers(self, *args, **kwargs):
"""List routers using admin creds """
routers_list = self.admin_manager.network_client.list_routers(
*args, **kwargs)
return routers_list['routers']
def _list_ports(self, *args, **kwargs):
"""List ports using admin creds """
ports_list = self.admin_manager.network_client.list_ports(
*args, **kwargs)
return ports_list['ports']
def _create_subnet(self, network, client=None, namestart='subnet-smoke',
**kwargs):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
if not client:
client = self.network_client
def cidr_in_use(cidr, tenant_id):
"""
:return True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
return len(cidr_in_use) != 0
ip_version = kwargs.pop('ip_version', 4)
if ip_version == 6:
tenant_cidr = netaddr.IPNetwork(
CONF.network.tenant_network_v6_cidr)
num_bits = CONF.network.tenant_network_v6_mask_bits
else:
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
num_bits = CONF.network.tenant_network_mask_bits
result = None
str_cidr = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(num_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
continue
subnet = dict(
name=data_utils.rand_name(namestart),
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str_cidr,
ip_version=ip_version,
**kwargs
)
try:
result = client.create_subnet(**subnet)
break
except lib_exc.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_resources.DeletableSubnet(client=client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.addCleanup(self.delete_wrapper, subnet.delete)
return subnet
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.network_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_resources.DeletablePort(client=client,
**result['port'])
self.addCleanup(self.delete_wrapper, port.delete)
return port
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
ports = self._list_ports(device_id=server['id'],
fixed_ip=ip_addr)
self.assertEqual(len(ports), 1,
"Unable to determine which port to target.")
# it might happen here that this port has more then one ip address
# as in case of dual stack- when this port is created on 2 subnets
for ip46 in ports[0]['fixed_ips']:
ip = ip46['ip_address']
if netaddr.valid_ipv4(ip):
return ports[0]['id'], ip
def _get_network_by_name(self, network_name):
net = self._list_networks(name=network_name)
self.assertNotEqual(len(net), 0,
"Unable to get network by name: %s" % network_name)
return net_resources.AttributeDict(net[0])
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Creates a floating IP and associates to a resource/port using
Neutron client
"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.network_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = net_resources.DeletableFloatingIp(
client=client,
**result['floatingip'])
self.addCleanup(self.delete_wrapper, floating_ip.delete)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id, _ = self._get_server_port_id_and_ip4(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
"""
:param floating_ip: type DeletableFloatingIp
"""
floating_ip.update(port_id=None)
self.assertIsNone(floating_ip.port_id)
return floating_ip
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
:param floating_ip: net_resources.DeletableFloatingIp floating IP to
to check status
:param status: target status
:raises: AssertionError if status doesn't match
"""
def refresh():
floating_ip.refresh()
return status == floating_ip.status
tempest.test.call_until_true(refresh,
CONF.network.build_timeout,
CONF.network.build_interval)
self.assertEqual(status, floating_ip.status,
message="FloatingIP: {fp} is at status: {cst}. "
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip.status,
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True,
servers_for_debug=None):
if not CONF.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in six.iteritems(server['addresses']):
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
private_key,
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True):
"""
check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest)
except lib_exc.SSHExecCommandFailed:
LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
% (dest, source.ssh_client.host))
return not should_succeed
return should_succeed
return tempest.test.call_until_true(ping_remote,
CONF.compute.ping_timeout,
1)
def _create_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
if client is None:
client = self.network_client
if tenant_id is None:
tenant_id = client.tenant_id
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule(client=client,
secgroup=secgroup)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
result = client.create_security_group(**sg_dict)
secgroup = net_resources.DeletableSecurityGroup(
client=client,
**result['security_group']
)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
self.addCleanup(self.delete_wrapper, secgroup.delete)
return secgroup
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
def _create_security_group_rule(self, secgroup=None, client=None,
tenant_id=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
if secgroup is None:
secgroup = self._default_security_group(client=client,
tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id)
ruleset.update(kwargs)
sg_rule = client.create_security_group_rule(**ruleset)
sg_rule = net_resources.DeletableSecurityGroupRule(
client=client,
**sg_rule['security_group_rule']
)
self.addCleanup(self.delete_wrapper, sg_rule.delete)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
return sg_rule
def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
"""These rules are intended to permit inbound ssh and icmp
traffic from all sources, so no group_id is provided.
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
if client is None:
client = self.network_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
),
dict(
# ipv6-icmp for ping6
protocol='icmp',
ethertype='IPv6',
)
]
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
client=client, secgroup=secgroup, **ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
if msg not in ex._error_string:
raise ex
else:
self.assertEqual(r_direction, sg_rule.direction)
rules.append(sg_rule)
return rules
def _ssh_to_server(self, server, private_key):
ssh_login = CONF.compute.image_ssh_user
return self.get_remote_client(server,
username=ssh_login,
private_key=private_key)
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return net_resources.AttributeDict(**body['router'])
elif network_id:
router = self._create_router(client, tenant_id)
router.set_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
tenant_id=tenant_id)
router = net_resources.DeletableRouter(client=client,
**result['router'])
self.assertEqual(router.name, name)
self.addCleanup(self.delete_wrapper, router.delete)
return router
def _update_router_admin_state(self, router, admin_state_up):
router.update(admin_state_up=admin_state_up)
self.assertEqual(admin_state_up, router.admin_state_up)
def create_networks(self, client=None, tenant_id=None,
dns_nameservers=None):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param client: network client to create resources with.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
if CONF.baremetal.driver_enabled:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise exceptions.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(client=client, tenant_id=tenant_id)
router = self._get_router(client=client, tenant_id=tenant_id)
subnet_kwargs = dict(network=network, client=client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
subnet.add_to_router(router.id)
return network, subnet, router
def create_server(self, name=None, image=None, flavor=None,
wait_on_boot=True, wait_on_delete=True,
create_kwargs=None):
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
networks = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if create_kwargs:
net_client = create_kwargs.get("network_client",
self.network_client)
# Convert security group names to security group ids
# to pass to create_port
if create_kwargs.get('security_groups'):
security_groups = net_client.list_security_groups().get(
'security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in create_kwargs[
'security_groups']]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = create_kwargs.get('networks')
else:
net_client = self.network_client
# If there are no networks passed to us we look up
# for the tenant's private networks and create a port
# if there is only one private network. The same behaviour
# as we would expect when passing the call to the clients
# with no networks
if not networks:
networks = net_client.list_networks(filters={
'router:external': False})
self.assertEqual(1, len(networks),
"There is more than one"
" network for the tenant")
for net in networks:
net_id = net['uuid']
port = self._create_port(network_id=net_id,
client=net_client,
**create_port_body)
ports.append({'port': port.id})
if ports:
create_kwargs['networks'] = ports
return super(NetworkScenarioTest, self).create_server(
name=name, image=image, flavor=flavor,
wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete,
create_kwargs=create_kwargs)
# power/provision states as of icehouse
class BaremetalPowerStates(object):
"""Possible power states of an Ironic node."""
POWER_ON = 'power on'
POWER_OFF = 'power off'
REBOOT = 'rebooting'
SUSPEND = 'suspended'
class BaremetalProvisionStates(object):
"""Possible provision states of an Ironic node."""
NOSTATE = None
INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYWAIT = 'wait call-back'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETING = 'deleting'
DELETED = 'deleted'
ERROR = 'error'
class BaremetalScenarioTest(ScenarioTest):
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(BaremetalScenarioTest, cls).skip_checks()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
msg = 'Ironic not available or Ironic compute driver not enabled'
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(BaremetalScenarioTest, cls).setup_clients()
cls.baremetal_client = cls.admin_manager.baremetal_client
@classmethod
def resource_setup(cls):
super(BaremetalScenarioTest, cls).resource_setup()
# allow any issues obtaining the node list to raise early
cls.baremetal_client.list_nodes()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
if not isinstance(target_states, list):
target_states = [target_states]
def check_state():
node = self.get_node(node_id=node_id)
if node.get(state_attr) in target_states:
return True
return False
if not tempest.test.call_until_true(
check_state, timeout, interval):
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
(node_id, state_attr, target_states))
raise exceptions.TimeoutException(msg)
def wait_provisioning_state(self, node_id, state, timeout):
self._node_state_timeout(
node_id=node_id, state_attr='provision_state',
target_states=state, timeout=timeout)
def wait_power_state(self, node_id, state):
self._node_state_timeout(
node_id=node_id, state_attr='power_state',
target_states=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
except lib_exc.NotFound:
pass
return node is not None
if not tempest.test.call_until_true(
_get_node, CONF.baremetal.association_timeout, 1):
msg = ('Timed out waiting to get Ironic node by instance id %s'
% instance_id)
raise exceptions.TimeoutException(msg)
def get_node(self, node_id=None, instance_id=None):
if node_id:
_, body = self.baremetal_client.show_node(node_id)
return body
elif instance_id:
_, body = self.baremetal_client.show_node_by_instance_uuid(
instance_id)
if body['nodes']:
return body['nodes'][0]
def get_ports(self, node_uuid):
ports = []
_, body = self.baremetal_client.list_node_ports(node_uuid)
for port in body['ports']:
_, p = self.baremetal_client.show_port(port['uuid'])
ports.append(p)
return ports
def add_keypair(self):
self.keypair = self.create_keypair()
def verify_connectivity(self, ip=None):
if ip:
dest = self.get_remote_client(ip)
else:
dest = self.get_remote_client(self.instance)
dest.validate_authentication()
def boot_instance(self):
create_kwargs = {
'key_name': self.keypair['name']
}
self.instance = self.create_server(
wait_on_boot=False, create_kwargs=create_kwargs)
self.wait_node(self.instance['id'])
self.node = self.get_node(instance_id=self.instance['id'])
self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
self.wait_provisioning_state(
self.node['uuid'],
[BaremetalProvisionStates.DEPLOYWAIT,
BaremetalProvisionStates.ACTIVE],
timeout=15)
self.wait_provisioning_state(self.node['uuid'],
BaremetalProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout)
waiters.wait_for_server_status(self.servers_client,
self.instance['id'], 'ACTIVE')
self.node = self.get_node(instance_id=self.instance['id'])
self.instance = self.servers_client.show_server(self.instance['id'])
def terminate_instance(self):
self.servers_client.delete_server(self.instance['id'])
self.wait_power_state(self.node['uuid'],
BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node['uuid'],
BaremetalProvisionStates.NOSTATE,
timeout=CONF.baremetal.unprovision_timeout)
class EncryptionScenarioTest(ScenarioTest):
"""
Base class for encryption scenario tests
"""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(EncryptionScenarioTest, cls).setup_clients()
cls.admin_volume_types_client = cls.os_adm.volume_types_client
def _wait_for_volume_status(self, status):
self.status_timeout(
self.volume_client.volumes, self.volume.id, status)
def nova_boot(self):
self.keypair = self.create_keypair()
create_kwargs = {'key_name': self.keypair['name']}
self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def create_volume_type(self, client=None, name=None):
if not client:
client = self.admin_volume_types_client
if not name:
name = 'generic'
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s", randomized_name)
body = client.create_volume_type(
randomized_name)
self.assertIn('id', body)
self.addCleanup(client.delete_volume_type, body['id'])
return body
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
control_location=None):
if not client:
client = self.admin_volume_types_client
if not type_id:
volume_type = self.create_volume_type()
type_id = volume_type['id']
LOG.debug("Creating an encryption type for volume type: %s", type_id)
client.create_encryption_type(
type_id, provider=provider, key_size=key_size, cipher=cipher,
control_location=control_location)
class SwiftScenarioTest(ScenarioTest):
"""
Provide harness to do Swift scenario tests.
Subclasses implement the tests that use the methods provided by this
class.
"""
@classmethod
def skip_checks(cls):
super(SwiftScenarioTest, cls).skip_checks()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(SwiftScenarioTest, cls).setup_credentials()
operator_role = CONF.object_storage.operator_role
cls.os_operator = cls.get_client_manager(roles=[operator_role])
@classmethod
def setup_clients(cls):
super(SwiftScenarioTest, cls).setup_clients()
# Clients for Swift
cls.account_client = cls.os_operator.account_client
cls.container_client = cls.os_operator.container_client
cls.object_client = cls.os_operator.object_client
def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
self.addCleanup(self.delete_wrapper,
self.container_client.delete_container,
name)
return name
def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
self.addCleanup(self.delete_wrapper,
self.object_client.delete_object,
container_name,
obj_name)
return obj_name, obj_data
def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
self.list_and_check_container_objects(container_name,
not_present_obj=[filename])
def list_and_check_container_objects(self, container_name,
present_obj=None,
not_present_obj=None):
"""
List objects for a given container and assert which are present and
which are not.
"""
if present_obj is None:
present_obj = []
if not_present_obj is None:
not_present_obj = []
_, object_list = self.container_client.list_container_contents(
container_name)
if present_obj:
for obj in present_obj:
self.assertIn(obj, object_list)
if not_present_obj:
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
**metadata_param)
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
| apache-2.0 | 9,041,714,999,479,491,000 | 40.681263 | 79 | 0.569701 | false |
theperfectionist89/fotc | huntingAdvantages.py | 1 | 2409 | class Advantage:
mods = {
"Small":1,
"Medium":2,
"Large":3,
"Inedible":4,
"NPC":0
}
def modifyMeatDamage(self, hunt, attack):
pass
def modifyPeltDamage(self, hunt, attack):
pass
def modifySkills(self, hunt):
pass
class CarefulHunter(Advantage):
def modifyMeatDamage(self, hunt, attack):
if attack.weapon.DmgType == "Bludgeoning":
hunt.meatDmg = 1
def modifyPeltDamage(self, hunt, attack):
if attack.weapon.DmgType in ["Piercing","Slashing"]:
hunt.peltDmg = 1
class Gentle(Advantage):
def modifyPeltDamage(self, hunt, attack):
if hunt.start == True:
hunt.peltDmg = 0
class Humane(Advantage):
def modifyMeatDamage(self, hunt, attack):
if hunt.start == True:
hunt.meatDmg = 0
class LightStep(Advantage):
def modifySkills(self, hunt):
mod = self.mods[hunt.prey.Type]*3
hunt.pred.skills["Stealth"] += mod
class SwiftHunter(Advantage):
def modifySkills(self, hunt):
mod = self.mods[hunt.prey.Type]*3
hunt.pred.skills["Acrobatics"] += mod
class Brutal(Advantage):
def modifyMeatDamage(self, hunt, attack):
if hunt.start == True:
hunt.meatDmg *= 2
class Careless(Advantage):
def modifyPeltDamage(self, hunt, attack):
if hunt.start == True:
hunt.peltDmg *= 2
class Rowdy(Advantage):
def modifySkills(self, hunt):
mod = (5-self.mods[hunt.prey.Type])*3
hunt.pred.skills["Stealth"] -= mod
class SlowHunter(Advantage):
def modifySkills(self, hunt):
mod = (5-self.mods[hunt.prey.Type])*3
hunt.pred.skills["Acrobatics"] -= mod
class SloppyHunter(Advantage):
def modifyMeatDamage(self, hunt, attack):
if attack.weapon.DmgType in ["Piercing","Slashing"]:
hunt.meatDmg = 2
def modifyPeltDamage(self, hunt, attack):
if attack.weapon.DmgType == "Bludgeoning":
hunt.peltDmg = 2
class Slothful(Advantage):
pass
class Impatient(Advantage):
pass
#NPC Advantages
class Skittish(Advantage):
def modifySkills(self, hunt):
mod = round(hunt.prey.Level / 2,0)
hunt.prey.skills["Escape"] += mod
class Determined(Advantage):
def modifySkills(self, hunt):
mod = round(hunt.prey.Level / 2,0)
hunt.prey.skills["Escape"] -= 3
class Observant(Advantage):
def modifySkills(self, hunt):
mod = round(hunt.prey.Level / 2,0)
hunt.prey.skills["Perception"] += 3
class Blind(Advantage):
def modifySkills(self, hunt):
mod = round(hunt.prey.Level / 2,0)
hunt.prey.skills["Perception"] -= 3 | mit | -4,470,928,599,606,183,000 | 23.343434 | 54 | 0.690328 | false |
piotrek-golda/CivilHubIndependantCopy | topics/views.py | 3 | 9562 | # -*- coding: utf-8 -*-
import json, datetime
from dateutil.relativedelta import relativedelta
from django.db import transaction
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from django.utils.timesince import timesince
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import View, DetailView, ListView
from django.views.generic.edit import UpdateView
from django.views.decorators.http import require_http_methods, require_POST
from django.contrib.auth.decorators import login_required
from actstream import action
from places_core.mixins import LoginRequiredMixin
from places_core.permissions import is_moderator
from places_core.helpers import SimplePaginator, truncatehtml, truncatesmart
from maps.models import MapPointer
from locations.mixins import LocationContextMixin, SearchableListMixin
from locations.models import Location
from locations.links import LINKS_MAP as links
from .models import Discussion, Entry, EntryVote, Category
from .forms import DiscussionForm, ReplyForm, ConfirmDeleteForm
class TopicsContextMixin(LocationContextMixin):
""" """
def get_context_data(self):
context = super(TopicsContextMixin, self).get_context_data()
context['links'] = links['discussions']
return context
class DiscussionListView(TopicsContextMixin, SearchableListMixin):
""" """
model = Discussion
paginate_by = 25
def get_queryset(self):
qs = super(DiscussionListView, self).get_queryset()
status = self.request.GET.get('status', 'all')
if status.lower() == 'true':
qs = qs.filter(status=True)
elif status.lower() == 'false':
qs = qs.filter(status=False)
return qs.filter(question__icontains=self.request.GET.get('haystack', ''))
def get_context_data(self):
context = super(DiscussionListView, self).get_context_data()
context['categories'] = Category.objects.all()
return context
class DiscussionDetailView(DetailView):
""" Single discussion page as forum page. """
model = Discussion
def get_context_data(self, **kwargs):
from maps.forms import AjaxPointerForm
topic = super(DiscussionDetailView, self).get_object()
context = super(DiscussionDetailView, self).get_context_data(**kwargs)
replies = Entry.objects.filter(discussion=topic)
paginator = Paginator(replies, settings.PAGE_PAGINATION_LIMIT)
page = self.request.GET.get('page')
moderator = is_moderator(self.request.user, topic.location)
try:
context['replies'] = paginator.page(page)
except PageNotAnInteger:
context['replies'] = paginator.page(1)
except EmptyPage:
context['replies'] = paginator.page(paginator.num_pages)
context['form'] = ReplyForm(initial={
'discussion': topic.slug
})
context['title'] = topic.question
context['location'] = topic.location
context['map_markers'] = MapPointer.objects.filter(
content_type = ContentType.objects.get_for_model(self.object)
).filter(object_pk=self.object.pk)
if self.request.user == self.object.creator or moderator:
context['marker_form'] = AjaxPointerForm(initial={
'content_type': ContentType.objects.get_for_model(Discussion),
'object_pk' : self.object.pk,
})
context['is_moderator'] = moderator
context['links'] = links['discussions']
context['content_type'] = ContentType.objects.get_for_model(Discussion).pk
context['ct'] = ContentType.objects.get_for_model(Entry).pk
return context
class DiscussionUpdateView(LoginRequiredMixin, UpdateView):
""" Allow owner user to update and change their discussions. """
model = Discussion
form_class = DiscussionForm
template_name = 'locations/location_forum_create.html'
def get_context_data(self, **kwargs):
obj = super(DiscussionUpdateView, self).get_object()
context = super(DiscussionUpdateView, self).get_context_data(**kwargs)
moderator = is_moderator(self.request.user, obj.location)
if self.request.user != obj.creator and not moderator:
raise PermissionDenied
context['title'] = obj.question
context['subtitle'] = _('Edit this topic')
context['location'] = obj.location
context['links'] = links['discussions']
context['is_moderator'] = moderator
return context
class DeleteDiscussionView(LoginRequiredMixin, View):
""" Delete single discussion in 'classic' way. """
template_name = 'topics/delete.html'
def get(self, request, pk):
discussion = get_object_or_404(Discussion, pk=pk)
ctx = {
'form' : ConfirmDeleteForm(initial={'confirm':True}),
'title': _("Delete discussion"),
'location': discussion.location,
}
return render(request, self.template_name, ctx)
def post(self, request, pk):
discussion = get_object_or_404(Discussion, pk=pk)
try:
with transaction.commit_on_success(): discussion.delete()
ctx = {
'title': _("Entry deleted"),
'location': discussion.location,
}
return redirect(reverse('locations:discussions', kwargs={
'slug': discussion.location.slug
}))
except Exception as ex:
ctx = {
'title': _("Error"),
'error': str(ex),
'location': discussion.location,
}
return render(request, 'topics/delete-confirm.html', ctx)
class EntryUpdateView(LoginRequiredMixin, View):
""" Update entry in static form. """
def post(self, request, slug, pk):
entry = get_object_or_404(Entry, pk=pk)
entry.content = request.POST.get('content')
entry.save()
return redirect(request.META['HTTP_REFERER'] + '#reply-' + str(entry.pk))
@login_required
@require_POST
@transaction.non_atomic_requests
@transaction.autocommit
def delete_topic(request):
""" Delete topic from discussion list via AJAX request. """
pk = request.POST.get('object_pk')
if not pk:
return HttpResponse(json.dumps({
'success': False,
'message': _("No entry ID provided"),
'level': 'danger',
}))
try:
topic = Discussion.objects.get(pk=pk)
except Discussion.DoesNotExist as ex:
return HttpResponse(json.dumps({
'success': False,
'message': str(ex),
'level': 'danger',
}))
moderator = is_moderator(request.user, topic.location)
if request.user != topic.creator and not moderator:
return HttpResponse(json.dumps({
'success': False,
'message': _("Permission required!"),
'level': 'danger',
}))
try:
with transaction.commit_on_success(): topic.delete()
return HttpResponse(json.dumps({
'success': True,
'message': _("Entry deleted"),
'level': 'success',
}))
except Exception as ex:
return HttpResponse(json.dumps({
'success': False,
'message': str(ex),
'level': 'danger',
}))
def reply(request, slug):
""" Create forum reply. """
if request.method == 'POST' and request.POST:
post = request.POST
topic = Discussion.objects.get(slug=post['discussion'])
if not topic.status:
return HttpResponse(_('This discussion is closed.'))
entry = Entry(
content = post['content'],
creator = request.user,
discussion = topic,
)
entry.save()
action.send(
request.user,
action_object=entry,
target = topic,
verb= _('posted')
)
return HttpResponseRedirect(request.META['HTTP_REFERER'] + '#reply-' + str(entry.pk))
@login_required
@require_POST
@transaction.non_atomic_requests
@transaction.autocommit
def vote(request, pk):
""" Vote for reply. """
entry = Entry.objects.get(pk=pk)
vote = False if request.POST.get('vote') == 'false' else True
user = request.user
check = EntryVote.objects.filter(entry=entry).filter(user=user)
if not len(check):
entry_vote = EntryVote.objects.create(
entry = entry,
user = user,
vote = vote)
try:
entry_vote.save()
context = {
'success': True,
'message': _("Vote saved"),
'votes' : Entry.objects.get(pk=pk).calculate_votes(),
'level' : "success",
}
except Exception as ex:
context = {
'success': False,
'message': str(ex),
'level' : "danger",
}
else:
context = {
'success': False,
'message': _("You already voted on this entry."),
'level' : "warning",
}
return HttpResponse(json.dumps(context))
| gpl-3.0 | 2,802,770,483,311,767,000 | 35.219697 | 89 | 0.617235 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.