repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mlyundin/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause | -5,259,778,432,817,994,000 | 24.714286 | 73 | 0.567778 | false |
scith/htpc-manager_ynh | sources/libs/sqlobject/declarative.py | 10 | 7263 | """
Declarative objects.
Declarative objects have a simple protocol: you can use classes in
lieu of instances and they are equivalent, and any keyword arguments
you give to the constructor will override those instance variables.
(So if a class is received, we'll simply instantiate an instance with
no arguments).
You can provide a variable __unpackargs__ (a list of strings), and if
the constructor is called with non-keyword arguments they will be
interpreted as the given keyword arguments.
If __unpackargs__ is ('*', name), then all the arguments will be put
in a variable by that name.
You can define a __classinit__(cls, new_attrs) method, which will be
called when the class is created (including subclasses). Note: you
can't use super() in __classinit__ because the class isn't bound to a
name. As an analog to __classinit__, Declarative adds
__instanceinit__ which is called with the same argument (new_attrs).
This is like __init__, but after __unpackargs__ and other factors have
been taken into account.
If __mutableattributes__ is defined as a sequence of strings, these
attributes will not be shared between superclasses and their
subclasses. E.g., if you have a class variable that contains a list
and you append to that list, changes to subclasses will effect
superclasses unless you add the attribute here.
Also defines classinstancemethod, which acts as either a class method
or an instance method depending on where it is called.
"""
import copy
import events
import itertools
counter = itertools.count()
__all__ = ('classinstancemethod', 'DeclarativeMeta', 'Declarative')
class classinstancemethod(object):
"""
Acts like a class method when called from a class, like an
instance method when called by an instance. The method should
take two arguments, 'self' and 'cls'; one of these will be None
depending on how the method was called.
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, type=None):
return _methodwrapper(self.func, obj=obj, type=type)
class _methodwrapper(object):
def __init__(self, func, obj, type):
self.func = func
self.obj = obj
self.type = type
def __call__(self, *args, **kw):
assert not 'self' in kw and not 'cls' in kw, (
"You cannot use 'self' or 'cls' arguments to a "
"classinstancemethod")
return self.func(*((self.obj, self.type) + args), **kw)
def __repr__(self):
if self.obj is None:
return ('<bound class method %s.%s>'
% (self.type.__name__, self.func.func_name))
else:
return ('<bound method %s.%s of %r>'
% (self.type.__name__, self.func.func_name, self.obj))
class DeclarativeMeta(type):
def __new__(meta, class_name, bases, new_attrs):
post_funcs = []
early_funcs = []
events.send(events.ClassCreateSignal,
bases[0], class_name, bases, new_attrs,
post_funcs, early_funcs)
cls = type.__new__(meta, class_name, bases, new_attrs)
for func in early_funcs:
func(cls)
if '__classinit__' in new_attrs:
cls.__classinit__ = staticmethod(cls.__classinit__.im_func)
cls.__classinit__(cls, new_attrs)
for func in post_funcs:
func(cls)
return cls
class Declarative(object):
__unpackargs__ = ()
__mutableattributes__ = ()
__metaclass__ = DeclarativeMeta
__restrict_attributes__ = None
def __classinit__(cls, new_attrs):
cls.declarative_count = counter.next()
for name in cls.__mutableattributes__:
if name not in new_attrs:
setattr(cls, copy.copy(getattr(cls, name)))
def __instanceinit__(self, new_attrs):
if self.__restrict_attributes__ is not None:
for name in new_attrs:
if name not in self.__restrict_attributes__:
raise TypeError(
'%s() got an unexpected keyword argument %r'
% (self.__class__.__name__, name))
for name, value in new_attrs.items():
setattr(self, name, value)
if 'declarative_count' not in new_attrs:
self.declarative_count = counter.next()
def __init__(self, *args, **kw):
if self.__unpackargs__ and self.__unpackargs__[0] == '*':
assert len(self.__unpackargs__) == 2, \
"When using __unpackargs__ = ('*', varname), you must only provide a single variable name (you gave %r)" % self.__unpackargs__
name = self.__unpackargs__[1]
if name in kw:
raise TypeError(
"keyword parameter '%s' was given by position and name"
% name)
kw[name] = args
else:
if len(args) > len(self.__unpackargs__):
raise TypeError(
'%s() takes at most %i arguments (%i given)'
% (self.__class__.__name__,
len(self.__unpackargs__),
len(args)))
for name, arg in zip(self.__unpackargs__, args):
if name in kw:
raise TypeError(
"keyword parameter '%s' was given by position and name"
% name)
kw[name] = arg
if '__alsocopy' in kw:
for name, value in kw['__alsocopy'].items():
if name not in kw:
if name in self.__mutableattributes__:
value = copy.copy(value)
kw[name] = value
del kw['__alsocopy']
self.__instanceinit__(kw)
def __call__(self, *args, **kw):
kw['__alsocopy'] = self.__dict__
return self.__class__(*args, **kw)
@classinstancemethod
def singleton(self, cls):
if self:
return self
name = '_%s__singleton' % cls.__name__
if not hasattr(cls, name):
setattr(cls, name, cls(declarative_count=cls.declarative_count))
return getattr(cls, name)
@classinstancemethod
def __repr__(self, cls):
if self:
name = '%s object' % self.__class__.__name__
v = self.__dict__.copy()
else:
name = '%s class' % cls.__name__
v = cls.__dict__.copy()
if 'declarative_count' in v:
name = '%s %i' % (name, v['declarative_count'])
del v['declarative_count']
# @@: simplifying repr:
#v = {}
names = v.keys()
args = []
for n in self._repr_vars(names):
args.append('%s=%r' % (n, v[n]))
if not args:
return '<%s>' % name
else:
return '<%s %s>' % (name, ' '.join(args))
@staticmethod
def _repr_vars(dictNames):
names = [n for n in dictNames
if not n.startswith('_')
and n != 'declarative_count']
names.sort()
return names
def setup_attributes(cls, new_attrs):
for name, value in new_attrs.items():
if hasattr(value, '__addtoclass__'):
value.__addtoclass__(cls, name)
| gpl-3.0 | 4,964,667,106,897,932,000 | 34.602941 | 145 | 0.561338 | false |
fredericlepied/ansible | lib/ansible/modules/network/vyos/vyos_user.py | 11 | 10403 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: vyos_user
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage the collection of local users on VyOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
options:
users:
description:
- The set of username objects to be configured on the remote
VyOS device. The list entries can either be the username or
a hash of username and properties. This argument is mutually
exclusive with the C(name) argument. alias C(aggregate).
name:
description:
- The username to be configured on the VyOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
password:
description:
- The password to be configured on the VyOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
level:
description:
- The C(level) argument configures the level of the user when logged
into the system. This argument accepts string values admin or operator.
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
vyos_user:
name: ansible
password: password
state: present
- name: remove all users except admin
vyos_user:
purge: yes
- name: set multiple users to level operator
vyos_user:
users:
- name: netop
- name: netend
level: operator
state: present
- name: Change Password for User netop
vyos_user:
name: netop
password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system login user test level operator
- set system login user authentication plaintext-password password
"""
import re
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def validate_level(value, module):
if value not in ('admin', 'operator'):
module.fail_json(msg='level must be either admin or operator, got %s' % value)
def spec_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('set system login user %s %s' % (want['name'], x))
for update in updates:
want, have = update
if want['state'] == 'absent':
commands.append('delete system login user %s' % want['name'])
continue
if needs_update(want, have, 'level'):
add(commands, want, "level %s" % want['level'])
if needs_update(want, have, 'full_name'):
add(commands, want, "full-name %s" % want['full_name'])
if needs_update(want, have, 'password'):
if update_password == 'always' or not have:
add(commands, want, 'authentication plaintext-password %s' % want['password'])
return commands
def parse_level(data):
match = re.search(r'level (\S+)', data, re.M)
if match:
level = match.group(1)[1:-1]
return level
def parse_full_name(data):
match = re.search(r'full-name (\S+)', data, re.M)
if match:
full_name = match.group(1)[1:-1]
return full_name
def config_to_dict(module):
data = get_config(module)
match = re.findall(r'^set system login user (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r' %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'password': None,
'level': parse_level(cfg),
'full_name': parse_full_name(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['users']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['password'] = get_value('password')
item['full_name'] = get_value('full_name')
item['level'] = get_value('level')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if item is None:
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
argument_spec = dict(
users=dict(type='list', aliases=['aggregate']),
name=dict(),
full_name=dict(),
level=dict(aliases=['role']),
password=dict(no_log=True),
update_password=dict(default='always', choices=['on_create', 'always']),
purge=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
mutually_exclusive = [('name', 'users')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
commands.append('delete system login user %s' % item)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,746,141,781,751,737,000 | 30.620061 | 94 | 0.632221 | false |
jillesme/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/examples/add_person.py | 432 | 1656 | #! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# This function fills in a Person message based on user input.
def PromptForAddress(person):
person.id = int(raw_input("Enter person ID number: "))
person.name = raw_input("Enter name: ")
email = raw_input("Enter email address (blank for none): ")
if email != "":
person.email = email
while True:
number = raw_input("Enter a phone number (or leave blank to finish): ")
if number == "":
break
phone_number = person.phone.add()
phone_number.number = number
type = raw_input("Is this a mobile, home, or work phone? ")
if type == "mobile":
phone_number.type = addressbook_pb2.Person.MOBILE
elif type == "home":
phone_number.type = addressbook_pb2.Person.HOME
elif type == "work":
phone_number.type = addressbook_pb2.Person.WORK
else:
print "Unknown phone type; leaving as default value."
# Main procedure: Reads the entire address book from a file,
# adds one person based on user input, then writes it back out to the same
# file.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
try:
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
except IOError:
print sys.argv[1] + ": File not found. Creating a new file."
# Add an address.
PromptForAddress(address_book.person.add())
# Write the new address book back to disk.
f = open(sys.argv[1], "wb")
f.write(address_book.SerializeToString())
f.close()
| bsd-3-clause | -3,392,131,246,811,777,000 | 27.551724 | 76 | 0.676932 | false |
datascopeanalytics/traces | tests/test_docs.py | 1 | 1056 | import traces
from datetime import datetime
def test_quickstart():
time_series = traces.TimeSeries()
time_series[datetime(2042, 2, 1, 6, 0, 0)] = 0 # 6:00:00am
time_series[datetime(2042, 2, 1, 7, 45, 56)] = 1 # 7:45:56am
time_series[datetime(2042, 2, 1, 8, 51, 42)] = 0 # 8:51:42am
time_series[datetime(2042, 2, 1, 12, 3, 56)] = 1 # 12:03:56am
time_series[datetime(2042, 2, 1, 12, 7, 13)] = 0 # 12:07:13am
assert time_series[datetime(2042, 2, 1, 11, 0, 0)] == 0
distribution = time_series.distribution(
start=datetime(2042, 2, 1, 6, 0, 0), # 6:00am
end=datetime(2042, 2, 1, 13, 0, 0) # 1:00pm
)
assert distribution[1] == 0.16440476190476191
def test_reference():
cart = traces.TimeSeries()
cart[1.2] = {'broccoli'}
cart[1.7] = {'broccoli', 'apple'}
cart[2.2] = {'apple'}
cart[3.5] = {'apple', 'beets'}
assert cart[2] == {'broccoli', 'apple'}
assert cart[-1] is None
cart = traces.TimeSeries(default=set())
assert cart[-1] == set([])
| mit | 4,300,419,985,127,917,600 | 30.058824 | 67 | 0.573864 | false |
guolivar/totus-niwa | service/thirdparty/featureserver/FeatureServer/DataSource/DBM.py | 1 | 6212 | __author__ = "MetaCarta"
__copyright__ = "Copyright (c) 2006-2008 MetaCarta"
__license__ = "Clear BSD"
__version__ = "$Id: DBM.py 444 2008-03-19 01:35:35Z brentp $"
from FeatureServer.DataSource import DataSource
from FeatureServer.DataSource import Lock
from FeatureServer.Service.Action import Action
import anydbm
import UserDict
try:
import cPickle as pickle
except ImportError:
import pickle
class DBM (DataSource):
"""Simple datasource using the anydbm module and pickled datastructures."""
def __init__(self, name, writable = 0, lockfile = None, unique = None, **args):
DataSource.__init__(self, name, **args)
self.db = Recno( args["file"] )
self.append = self.db.append
self.writable = int(writable)
self.unique = unique
if self.writable and lockfile:
self.lock = Lock(lockfile)
else:
self.lock = None
def __iter__ (self):
return self.db.__iter__()
def begin (self):
if self.lock: return self.lock.lock()
def commit (self):
if hasattr(self.db, "sync"): self.db.sync()
if self.lock: self.lock.unlock()
def rollback (self):
if self.lock: self.lock.unlock()
def insert (self, action):
if self.unique:
action.id = self.insertUnique(action.feature)
else:
thunk = self.freeze_feature(action.feature)
action.id = self.append(thunk)
return self.select(action)
def insertUnique(self, feature):
if not feature.properties.has_key(self.unique):
raise Exception("Unique key (%s) missing from feature." % self.unique)
action = Action()
action.attributes[self.unique] = feature.properties[self.unique]
features = self.select(action)
if len(features) > 1:
raise Exception("There are two features with the unique key %s. Something's wrong with that." % feature.properties[self.unique])
thunk = self.freeze_feature(feature)
if len(features) == 0:
return self.append(thunk)
else:
self.db[features[0].id] = thunk
return features[0].id
def update (self, action):
self.db[action.id] = self.freeze_feature(action.feature)
return self.select(action)
def delete (self, action):
feature = action.feature
if action.id:
del self.db[action.id]
elif action.attributes:
for feat in self.select(action):
del self.db[feat.id]
return []
def select (self, action):
def _overlap (a, b):
return a[2] >= b[0] and \
b[2] >= a[0] and \
a[3] >= b[1] and \
b[3] >= a[1]
if action.id is not None:
feature = self.thaw_feature( self.db[action.id] )
feature.id = action.id
return [feature]
else:
result = []
count = action.maxfeatures
counter = 0
for id in self:
if counter < action.startfeature:
counter += 1
continue
thunk = self.db[id]
feature = self.thaw_feature(thunk)
feature.id = id
if action.bbox and not _overlap(action.bbox, feature.bbox):
continue
if action.attributes:
props = feature.properties
skip = False
for key, val in action.attributes.items():
if (key not in props and val is not None) or \
(key in props and str(props[key]) != val):
skip = True
break
if skip: continue
result.append(feature)
if count is not None:
count -= 1
if not count: break
return result
def freeze_feature (self, feature):
feature.bbox = feature.get_bbox()
return pickle.dumps(feature)
def thaw_feature (self, thunk):
return pickle.loads(thunk)
class Recno (object):
"""Class to handle managment of the database file."""
class Iterator (object):
def __init__ (self, recno, idx = 0):
self.recno = recno
self.idx = self.recno.max + 1
self.stopIdx = idx
def __iter__ (self):
return self
def next (self):
while True:
self.idx -= 1
if self.idx == 0 or self.idx == self.stopIdx:
raise StopIteration
if not self.recno.has_key(self.idx):
continue
return self.idx
def __init__(self, file):
self.file = file
self.max = 0
self.data = None
self.open()
def __getitem__ (self, key):
if not self.data:
self.open()
return self.data[str(key)]
def __setitem__ (self, key, val):
if not self.data:
self.open()
self.data[str(key)] = val
if key > self.max: self.max = key
def __delitem__ (self, key):
if not self.data:
self.open()
del self.data[str(key)]
def __len__ (self):
if not self.data:
self.open()
return len(self.data)
def __iter__ (self):
return self.Iterator(self)
def has_key (self, key):
if not self.data:
self.open()
return self.data.has_key(str(key))
def sync (self, reopen=True):
if not self.data:
self.open()
self.data["_"] = str(self.max)
del self.data
self.data = None
if reopen:
self.data = anydbm.open( self.file, "c" )
def __del__ (self):
self.sync(False)
def append (self, val):
self.max += 1
self.__setitem__(self.max, val)
return self.max
def open(self):
self.data = anydbm.open( self.file, "c" )
if self.data.has_key("_"):
self.max = int(self.data["_"])
| gpl-3.0 | -6,092,681,979,574,670,000 | 30.06 | 140 | 0.518191 | false |
GladeRom/android_external_chromium_org | tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/top_20.py | 34 | 3245 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Top20Page(page_module.Page):
def __init__(self, url, page_set, name=''):
super(Top20Page, self).__init__(url=url, page_set=page_set, name=name)
self.archive_data_file = '../data/chrome_proxy_top_20.json'
class Top20PageSet(page_set_module.PageSet):
""" Pages hand-picked for Chrome Proxy tests. """
def __init__(self):
super(Top20PageSet, self).__init__(
archive_data_file='../data/chrome_proxy_top_20.json')
# Why: top google property; a google tab is often open
self.AddPage(Top20Page('https://www.google.com/#hl=en&q=barack+obama',
self))
# Why: #3 (Alexa global)
self.AddPage(Top20Page('http://www.youtube.com', self))
# Why: #18 (Alexa global), Picked an interesting post
self.AddPage(Top20Page(
# pylint: disable=C0301
'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
self, 'Wordpress'))
# Why: top social,Public profile
self.AddPage(Top20Page('http://www.facebook.com/barackobama', self,
'Facebook'))
# Why: #12 (Alexa global),Public profile
self.AddPage(Top20Page('http://www.linkedin.com/in/linustorvalds', self,
'LinkedIn'))
# Why: #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddPage(Top20Page('http://en.wikipedia.org/wiki/Wikipedia', self,
'Wikipedia (1 tab)'))
# Why: #8 (Alexa global),Picked an interesting page
self.AddPage(Top20Page('https://twitter.com/katyperry', self, 'Twitter'))
# Why: #37 (Alexa global)
self.AddPage(Top20Page('http://pinterest.com', self, 'Pinterest'))
# Why: #1 sports
self.AddPage(Top20Page('http://espn.go.com', self, 'ESPN'))
# Why: #1 news worldwide (Alexa global)
self.AddPage(Top20Page('http://news.yahoo.com', self))
# Why: #2 news worldwide
self.AddPage(Top20Page('http://www.cnn.com', self))
# Why: #7 (Alexa news); #27 total time spent,Picked interesting page
self.AddPage(Top20Page(
'http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
self, 'Weather.com'))
# Why: #1 world commerce website by visits; #3 commerce in the US by time
# spent
self.AddPage(Top20Page('http://www.amazon.com', self))
# Why: #1 commerce website by time spent by users in US
self.AddPage(Top20Page('http://www.ebay.com', self))
# Why: #1 games according to Alexa (with actual games in it)
self.AddPage(Top20Page('http://games.yahoo.com', self))
# Why: #1 Alexa recreation
self.AddPage(Top20Page('http://booking.com', self))
# Why: #1 Alexa reference
self.AddPage(Top20Page('http://answers.yahoo.com', self))
# Why: #1 Alexa sports
self.AddPage(Top20Page('http://sports.yahoo.com/', self))
# Why: top tech blog
self.AddPage(Top20Page('http://techcrunch.com', self))
self.AddPage(Top20Page('http://www.nytimes.com', self))
| bsd-3-clause | 7,473,953,177,414,501,000 | 35.055556 | 95 | 0.651464 | false |
jkandasa/integration_tests | scripts/enable_internal_db.py | 8 | 1146 | #!/usr/bin/env python2
"""SSH in to a running appliance and set up an internal DB.
An optional region can be specified (default 0), and the script
will use the first available unpartitioned disk as the data volume
for postgresql.
Running this script against an already configured appliance is
unsupported, hilarity may ensue.
"""
import argparse
import sys
from cfme.utils.appliance import IPAppliance
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('address',
help='hostname or ip address of target appliance')
parser.add_argument('--region', default=0, type=int,
help='region to assign to the new DB')
args = parser.parse_args()
print('Initializing Appliance Internal DB')
ip_a = IPAppliance(args.address)
status, out = ip_a.db.enable_internal(args.region)
if status != 0:
print('Enabling DB failed with error:')
print(out)
sys.exit(1)
else:
print('DB Enabled, evm watchdog should start the UI shortly.')
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | 6,192,854,152,533,709,000 | 26.285714 | 70 | 0.690227 | false |
ryenus/vbox | src/VBox/HostServices/SharedOpenGL/crserverlib/get_sizes.py | 1 | 17633 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
num_get_values = {
'GL_ACCUM_ALPHA_BITS' : 1,
'GL_ACCUM_BLUE_BITS' : 1,
'GL_ACCUM_CLEAR_VALUE': 4,
'GL_ACCUM_GREEN_BITS' : 1,
'GL_ACCUM_RED_BITS' : 1,
'GL_ALPHA_BIAS' : 1,
'GL_ALPHA_BITS' : 1,
'GL_ALPHA_SCALE' : 1,
'GL_ALPHA_TEST' : 1,
'GL_ALPHA_TEST_FUNC' : 1,
'GL_ALPHA_TEST_REF' : 1,
'GL_ATTRIB_STACK_DEPTH' : 1,
'GL_AUTO_NORMAL' : 1,
'GL_AUX_BUFFERS' : 1,
'GL_BLEND' : 1,
'GL_BLEND_DST' : 1,
'GL_BLEND_SRC' : 1,
'GL_BLUE_BIAS' : 1,
'GL_BLUE_BITS' : 1,
'GL_BLUE_SCALE' : 1,
'GL_CLIENT_ATTRIB_STACK_DEPTH' : 1,
'GL_CLIP_PLANE0' : 1,
'GL_CLIP_PLANE1' : 1,
'GL_CLIP_PLANE2' : 1,
'GL_CLIP_PLANE3' : 1,
'GL_CLIP_PLANE4' : 1,
'GL_CLIP_PLANE5' : 1,
'GL_COLOR_ARRAY' : 1,
'GL_COLOR_ARRAY_SIZE' : 1,
'GL_COLOR_ARRAY_STRIDE' : 1,
'GL_COLOR_ARRAY_TYPE' : 1,
'GL_COLOR_CLEAR_VALUE': 4,
'GL_COLOR_LOGIC_OP' : 1,
'GL_COLOR_MATERIAL' : 1,
'GL_COLOR_MATERIAL_FACE' : 1,
'GL_COLOR_MATERIAL_PARAMETER' : 1,
'GL_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_COLOR_WRITEMASK': 4,
'GL_CULL_FACE' : 1,
'GL_CULL_FACE_MODE' : 1,
'GL_CURRENT_COLOR': 4,
'GL_CURRENT_INDEX' : 1,
'GL_CURRENT_NORMAL': 3,
'GL_CURRENT_RASTER_COLOR': 4,
'GL_CURRENT_RASTER_DISTANCE' : 1,
'GL_CURRENT_RASTER_INDEX' : 1,
'GL_CURRENT_RASTER_POSITION': 4,
'GL_CURRENT_RASTER_POSITION_VALID' : 1,
'GL_CURRENT_RASTER_TEXTURE_COORDS': 4,
'GL_CURRENT_TEXTURE_COORDS': 4,
'GL_DEPTH_BIAS' : 1,
'GL_DEPTH_BITS' : 1,
'GL_DEPTH_CLEAR_VALUE' : 1,
'GL_DEPTH_FUNC' : 1,
'GL_DEPTH_RANGE': 2,
'GL_DEPTH_SCALE' : 1,
'GL_DEPTH_TEST' : 1,
'GL_DEPTH_WRITEMASK' : 1,
'GL_DITHER' : 1,
'GL_DOUBLEBUFFER' : 1,
'GL_DRAW_BUFFER' : 1,
'GL_EDGE_FLAG' : 1,
'GL_EDGE_FLAG_ARRAY' : 1,
'GL_EDGE_FLAG_ARRAY_STRIDE' : 1,
'GL_FEEDBACK_BUFFER_SIZE' : 1,
'GL_FEEDBACK_BUFFER_TYPE' : 1,
'GL_FOG' : 1,
'GL_FOG_COLOR': 4,
'GL_FOG_DENSITY' : 1,
'GL_FOG_END' : 1,
'GL_FOG_HINT' : 1,
'GL_FOG_INDEX' : 1,
'GL_FOG_MODE' : 1,
'GL_FOG_START' : 1,
'GL_FRONT_FACE' : 1,
'GL_GREEN_BIAS' : 1,
'GL_GREEN_BITS' : 1,
'GL_GREEN_SCALE' : 1,
'GL_INDEX_ARRAY' : 1,
'GL_INDEX_ARRAY_STRIDE' : 1,
'GL_INDEX_ARRAY_TYPE' : 1,
'GL_INDEX_BITS' : 1,
'GL_INDEX_CLEAR_VALUE' : 1,
'GL_INDEX_LOGIC_OP' : 1,
'GL_INDEX_MODE' : 1,
'GL_INDEX_OFFSET' : 1,
'GL_INDEX_SHIFT' : 1,
'GL_INDEX_WRITEMASK' : 1,
'GL_LIGHT0' : 1,
'GL_LIGHT1' : 1,
'GL_LIGHT2' : 1,
'GL_LIGHT3' : 1,
'GL_LIGHT4' : 1,
'GL_LIGHT5' : 1,
'GL_LIGHT6' : 1,
'GL_LIGHT7' : 1,
'GL_LIGHTING' : 1,
'GL_LIGHT_MODEL_AMBIENT': 4,
'GL_LIGHT_MODEL_LOCAL_VIEWER' : 1,
'GL_LIGHT_MODEL_TWO_SIDE' : 1,
'GL_LINE_SMOOTH' : 1,
'GL_LINE_SMOOTH_HINT' : 1,
'GL_LINE_STIPPLE' : 1,
'GL_LINE_STIPPLE_PATTERN' : 1,
'GL_LINE_STIPPLE_REPEAT' : 1,
'GL_LINE_WIDTH' : 1,
'GL_LINE_WIDTH_GRANULARITY' : 1,
'GL_LINE_WIDTH_RANGE': 2,
'GL_LIST_BASE' : 1,
'GL_LIST_INDEX' : 1,
'GL_LIST_MODE' : 1,
'GL_LOGIC_OP_MODE' : 1,
'GL_MAP1_COLOR_4' : 1,
'GL_MAP1_GRID_DOMAIN': 2,
'GL_MAP1_GRID_SEGMENTS' : 1,
'GL_MAP1_INDEX' : 1,
'GL_MAP1_NORMAL' : 1,
'GL_MAP1_TEXTURE_COORD_1' : 1,
'GL_MAP1_TEXTURE_COORD_2' : 1,
'GL_MAP1_TEXTURE_COORD_3' : 1,
'GL_MAP1_TEXTURE_COORD_4' : 1,
'GL_MAP1_VERTEX_3' : 1,
'GL_MAP1_VERTEX_4' : 1,
'GL_MAP2_COLOR_4' : 1,
'GL_MAP2_GRID_DOMAIN': 4,
'GL_MAP2_GRID_SEGMENTS': 2,
'GL_MAP2_INDEX' : 1,
'GL_MAP2_NORMAL' : 1,
'GL_MAP2_TEXTURE_COORD_1' : 1,
'GL_MAP2_TEXTURE_COORD_2' : 1,
'GL_MAP2_TEXTURE_COORD_3' : 1,
'GL_MAP2_TEXTURE_COORD_4' : 1,
'GL_MAP2_VERTEX_3' : 1,
'GL_MAP2_VERTEX_4' : 1,
'GL_MAP_COLOR' : 1,
'GL_MAP_STENCIL' : 1,
'GL_MATRIX_MODE' : 1,
'GL_MAX_CLIENT_ATTRIB_STACK_DEPTH' : 1,
'GL_MAX_ATTRIB_STACK_DEPTH' : 1,
'GL_MAX_CLIP_PLANES' : 1,
'GL_MAX_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_MAX_EVAL_ORDER' : 1,
'GL_MAX_LIGHTS' : 1,
'GL_MAX_LIST_NESTING' : 1,
'GL_MAX_MODELVIEW_STACK_DEPTH' : 1,
'GL_MAX_NAME_STACK_DEPTH' : 1,
'GL_MAX_PIXEL_MAP_TABLE' : 1,
'GL_MAX_PROJECTION_STACK_DEPTH' : 1,
'GL_MAX_TEXTURE_SIZE' : 1,
'GL_MAX_3D_TEXTURE_SIZE' : 1,
'GL_MAX_TEXTURE_STACK_DEPTH' : 1,
'GL_MAX_VIEWPORT_DIMS': 2,
'GL_MODELVIEW_MATRIX': 16,
'GL_MODELVIEW_STACK_DEPTH' : 1,
'GL_NAME_STACK_DEPTH' : 1,
'GL_NORMAL_ARRAY' : 1,
'GL_NORMAL_ARRAY_STRIDE' : 1,
'GL_NORMAL_ARRAY_TYPE' : 1,
'GL_NORMALIZE' : 1,
'GL_PACK_ALIGNMENT' : 1,
'GL_PACK_LSB_FIRST' : 1,
'GL_PACK_ROW_LENGTH' : 1,
'GL_PACK_SKIP_PIXELS' : 1,
'GL_PACK_SKIP_ROWS' : 1,
'GL_PACK_SWAP_BYTES' : 1,
'GL_PERSPECTIVE_CORRECTION_HINT' : 1,
'GL_PIXEL_MAP_A_TO_A_SIZE' : 1,
'GL_PIXEL_MAP_B_TO_B_SIZE' : 1,
'GL_PIXEL_MAP_G_TO_G_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_A_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_B_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_G_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_I_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_R_SIZE' : 1,
'GL_PIXEL_MAP_R_TO_R_SIZE' : 1,
'GL_PIXEL_MAP_S_TO_S_SIZE' : 1,
'GL_POINT_SIZE' : 1,
'GL_POINT_SIZE_GRANULARITY' : 1,
'GL_POINT_SIZE_RANGE': 2,
'GL_POINT_SMOOTH' : 1,
'GL_POINT_SMOOTH_HINT' : 1,
'GL_POLYGON_MODE': 2,
'GL_POLYGON_OFFSET_FACTOR' : 1,
'GL_POLYGON_OFFSET_UNITS' : 1,
'GL_POLYGON_OFFSET_FILL' : 1,
'GL_POLYGON_OFFSET_LINE' : 1,
'GL_POLYGON_OFFSET_POINT' : 1,
'GL_POLYGON_SMOOTH' : 1,
'GL_POLYGON_SMOOTH_HINT' : 1,
'GL_POLYGON_STIPPLE' : 1,
'GL_PROJECTION_MATRIX': 16,
'GL_PROJECTION_STACK_DEPTH' : 1,
'GL_READ_BUFFER' : 1,
'GL_RED_BIAS' : 1,
'GL_RED_BITS' : 1,
'GL_RED_SCALE' : 1,
'GL_RENDER_MODE' : 1,
'GL_RGBA_MODE' : 1,
'GL_SCISSOR_BOX': 4,
'GL_SCISSOR_TEST' : 1,
'GL_SELECTION_BUFFER_SIZE' : 1,
'GL_SHADE_MODEL' : 1,
'GL_STENCIL_BITS' : 1,
'GL_STENCIL_CLEAR_VALUE' : 1,
'GL_STENCIL_FAIL' : 1,
'GL_STENCIL_FUNC' : 1,
'GL_STENCIL_PASS_DEPTH_FAIL' : 1,
'GL_STENCIL_PASS_DEPTH_PASS' : 1,
'GL_STENCIL_REF' : 1,
'GL_STENCIL_TEST' : 1,
'GL_STENCIL_VALUE_MASK' : 1,
'GL_STENCIL_WRITEMASK' : 1,
'GL_STEREO' : 1,
'GL_SUBPIXEL_BITS' : 1,
'GL_TEXTURE_1D' : 1,
'GL_TEXTURE_2D' : 1,
'GL_TEXTURE_BINDING_1D' : 1,
'GL_TEXTURE_BINDING_2D' : 1,
'GL_TEXTURE_BINDING_3D' : 1,
'GL_TEXTURE_COORD_ARRAY' : 1,
'GL_TEXTURE_COORD_ARRAY_SIZE' : 1,
'GL_TEXTURE_COORD_ARRAY_STRIDE' : 1,
'GL_TEXTURE_COORD_ARRAY_TYPE' : 1,
'GL_TEXTURE_ENV_COLOR': 4,
'GL_TEXTURE_ENV_MODE' : 1,
'GL_TEXTURE_GEN_Q' : 1,
'GL_TEXTURE_GEN_R' : 1,
'GL_TEXTURE_GEN_S' : 1,
'GL_TEXTURE_GEN_T' : 1,
'GL_TEXTURE_MATRIX': 16,
'GL_TEXTURE_STACK_DEPTH' : 1,
'GL_UNPACK_ALIGNMENT' : 1,
'GL_UNPACK_LSB_FIRST' : 1,
'GL_UNPACK_ROW_LENGTH' : 1,
'GL_UNPACK_SKIP_PIXELS' : 1,
'GL_UNPACK_SKIP_ROWS' : 1,
'GL_UNPACK_SWAP_BYTES' : 1,
'GL_VERTEX_ARRAY' : 1,
'GL_VERTEX_ARRAY_SIZE' : 1,
'GL_VERTEX_ARRAY_STRIDE' : 1,
'GL_VERTEX_ARRAY_TYPE' : 1,
'GL_VIEWPORT': 4,
'GL_ZOOM_X' : 1,
'GL_ZOOM_Y' : 1,
#GL_ARB_IMAGING which is part of 1.2.1
'GL_COLOR_MATRIX' : 16,
'GL_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_COLOR_TABLE' : 1,
'GL_POST_CONVOLUTION_COLOR_TABLE' : 1,
'GL_POST_COLOR_MATRIX_COLOR_TABLE' : 1,
'GL_PROXY_COLOR_TABLE' : 1,
'GL_CONVOLUTION_1D' : 1,
'GL_CONVOLUTION_2D' : 1,
'GL_SEPARABLE_2D' : 1,
'GL_POST_CONVOLUTION_RED_SCALE' : 1,
'GL_POST_CONVOLUTION_GREEN_SCALE' : 1,
'GL_POST_CONVOLUTION_BLUE_SCALE' : 1,
'GL_POST_CONVOLUTION_ALPHA_SCALE' : 1,
'GL_POST_CONVOLUTION_RED_BIAS' : 1,
'GL_POST_CONVOLUTION_GREEN_BIAS' : 1,
'GL_POST_CONVOLUTION_BLUE_BIAS' : 1,
'GL_POST_CONVOLUTION_ALPHA_BIAS' : 1,
'GL_HISTOGRAM' : 1,
'GL_MINMAX' : 1,
'GL_MAX_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_MAX_CONVOLUTION_WIDTH' : 1,
'GL_MAX_CONVOLUTION_HEIGHT' : 1,
}
extensions_num_get_values = {
'GL_BLEND_COLOR_EXT': (4, 'CR_EXT_blend_color'),
'GL_BLEND_EQUATION_EXT': (1, 'CR_EXT_blend_minmax'),
'GL_BLEND_SRC_RGB_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_BLEND_DST_RGB_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_BLEND_SRC_ALPHA_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_BLEND_DST_ALPHA_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_FOG_DISTANCE_MODE_NV': (1, 'CR_NV_fog_distance'),
'GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB': (1, 'CR_ARB_texture_cube_map'),
'GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT': (1, 'CR_EXT_texture_filter_anisotropic'),
'GL_TEXTURE_BINDING_CUBE_MAP_ARB': (1, 'CR_ARB_texture_cube_map'),
'GL_TEXTURE_CUBE_MAP_ARB': (1, 'CR_ARB_texture_cube_map'),
'GL_ACTIVE_TEXTURE_ARB': (1, 'CR_ARB_multitexture'),
'GL_CLIENT_ACTIVE_TEXTURE_ARB': (1, 'CR_ARB_multitexture'),
'GL_MAX_TEXTURE_UNITS_ARB': (1, 'CR_ARB_multitexture'),
'GL_NUM_GENERAL_COMBINERS_NV': (1, 'CR_NV_register_combiners'),
'GL_MAX_GENERAL_COMBINERS_NV': (1, 'CR_NV_register_combiners'),
'GL_COLOR_SUM_CLAMP_NV': (1, 'CR_NV_register_combiners'),
'GL_CONSTANT_COLOR0_NV': (4, 'CR_NV_register_combiners'),
'GL_CONSTANT_COLOR1_NV': (4, 'CR_NV_register_combiners'),
'GL_PER_STAGE_CONSTANTS_NV': (1, 'CR_NV_register_combiners2'),
'GL_LIGHT_MODEL_COLOR_CONTROL_EXT': (1, 'CR_EXT_separate_specular_color'),
'GL_COLOR_SUM_EXT': (1, 'CR_EXT_secondary_color'),
'GL_CURRENT_SECONDARY_COLOR_EXT': (4, 'CR_EXT_secondary_color'),
'GL_SECONDARY_COLOR_ARRAY_SIZE_EXT': (1, 'CR_EXT_secondary_color'),
'GL_SECONDARY_COLOR_ARRAY_TYPE_EXT': (1, 'CR_EXT_secondary_color'),
'GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT': (1, 'CR_EXT_secondary_color'),
'GL_RESCALE_NORMAL': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_NUM_COMPRESSED_TEXTURE_FORMATS': (1, 'CR_ARB_texture_compression'),
'GL_TEXTURE_3D': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_LIGHT_MODEL_COLOR_CONTROL': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_UNPACK_IMAGE_HEIGHT': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_UNPACK_SKIP_IMAGES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_PACK_IMAGE_HEIGHT': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_PACK_SKIP_IMAGES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_ALIASED_POINT_SIZE_RANGE': (2, 'CR_OPENGL_VERSION_1_2'),
'GL_ALIASED_LINE_WIDTH_RANGE': (2, 'CR_OPENGL_VERSION_1_2'),
'GL_MAX_ELEMENTS_INDICES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_MAX_ELEMENTS_VERTICES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_MULTISAMPLE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_ALPHA_TO_COVERAGE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_ALPHA_TO_ONE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_COVERAGE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_BUFFERS_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLES_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_COVERAGE_VALUE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_COVERAGE_INVERT_ARB': (1, 'CR_ARB_multisample'),
'GL_POINT_SPRITE_ARB': (1, 'CR_ARB_point_sprite'),
'GL_MAX_TEXTURE_LOD_BIAS_EXT': (1, 'CR_EXT_texture_lod_bias'),
'GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB': (1, 'CR_ARB_texture_compression'),
'GL_PROGRAM_ERROR_POSITION_NV': (1, 'CR_NV_vertex_program'),
'GL_VERTEX_PROGRAM_BINDING_NV': (1, 'CR_NV_vertex_program'),
'GL_MAX_VERTEX_ATTRIBS_ARB': (1, 'CR_ARB_vertex_program'),
'GL_MAX_TEXTURE_COORDS_ARB': (1, 'CR_ARB_vertex_program'),
'GL_PROGRAM_ERROR_POSITION_NV': (1, 'CR_NV_fragment_program'),
'GL_FRAGMENT_PROGRAM_BINDING_NV': (1, 'CR_NV_fragment_program'),
'GL_MAX_RECTANGLE_TEXTURE_SIZE_NV': (1, 'CR_NV_texture_rectangle'),
'GL_TEXTURE_RECTANGLE_NV': (1, 'CR_NV_texture_rectangle'),
'GL_TEXTURE_BINDING_RECTANGLE_NV': (1, 'CR_NV_texture_rectangle'),
'GL_CLIP_VOLUME_CLIPPING_HINT_EXT' : (3, 'CR_EXT_clip_volume_hint'),
'GL_RASTER_POSITION_UNCLIPPED_IBM' : (1, 'CR_IBM_rasterpos_clip'),
'GL_GENERATE_MIPMAP_HINT_SGIS' : (1, 'CR_SGIS_generate_mipmap'),
'GL_CURRENT_FOG_COORDINATE_EXT' : (1, 'CR_EXT_fog_coord'),
'GL_FOG_COORDINATE_ARRAY_TYPE_EXT' : (1, 'CR_EXT_fog_coord'),
'GL_FOG_COORDINATE_ARRAY_STRIDE_EXT' : (1, 'CR_EXT_fog_coord'),
'GL_TRANSPOSE_COLOR_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_TRANSPOSE_MODELVIEW_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_TRANSPOSE_PROJECTION_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_TRANSPOSE_TEXTURE_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_VERTEX_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_NORMAL_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_COLOR_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_INDEX_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_MAX_TEXTURE_IMAGE_UNITS_ARB': (1, 'CR_ARB_fragment_program'),
# We don't support GL_ARB_draw_buffers, but for some reason ubuntu64 8.10 vm queries it on macos host
'GL_MAX_DRAW_BUFFERS_ARB': (1, 'VBOX'),
'GL_MAX_PROGRAM_MATRICES_ARB': (1, 'CR_ARB_vertex_program'),
'GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB': (1, 'CR_ARB_vertex_program'),
# Vertex shaders (2.0) #
'GL_MAX_VERTEX_UNIFORM_COMPONENTS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_VARYING_FLOATS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_VERTEX_ATTRIBS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_TEXTURE_IMAGE_UNITS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_TEXTURE_COORDS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_VERTEX_PROGRAM_POINT_SIZE': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_VERTEX_PROGRAM_TWO_SIDE': (1, 'CR_OPENGL_VERSION_2_0'),
# Fragment shaders (2.0) #
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT': (1, 'CR_OPENGL_VERSION_2_0'),
# Draw buffers (2.0) #
'GL_MAX_DRAW_BUFFERS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER0': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER1': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER2': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER3': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER4': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER5': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER6': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER7': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER8': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER9': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER10': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER11': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER12': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER13': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER14': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER15': (1, 'CR_OPENGL_VERSION_2_0'),
# Point sprite (2.0) #
'GL_POINT_SPRITE': (1, 'CR_OPENGL_VERSION_2_0'),
# Separate stencil (2.0) #
'GL_STENCIL_BACK_FUNC': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_REF': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_VALUE_MASK': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_FAIL': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_PASS_DEPTH_FAIL': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_PASS_DEPTH_PASS': (1, 'CR_OPENGL_VERSION_2_0'),
# Frame buffer object EXT #
'GL_FRAMEBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_object'),
'GL_RENDERBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_object'),
'GL_MAX_COLOR_ATTACHMENTS_EXT': (1, 'CR_EXT_framebuffer_object'),
'GL_MAX_RENDERBUFFER_SIZE_EXT': (1, 'CR_EXT_framebuffer_object'),
# ARB_shader_objects
'GL_CURRENT_PROGRAM': (1, 'CR_ARB_shader_objects'),
# EXT_framebuffer_blit
'GL_READ_FRAMEBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_blit'),
'GL_DRAW_FRAMEBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_blit'),
# EXT_stencil_two_side
'GL_ACTIVE_STENCIL_FACE_EXT': (1, 'CR_EXT_stencil_two_side'),
}
get_keys = num_get_values.keys() + extensions_num_get_values.keys()
get_keys.sort()
print "struct nv_struct { GLenum pname; unsigned int num_values; } num_values_array[] = {"
for key in get_keys:
try:
print '\t{ %s, %d },' % (key, num_get_values[key])
except KeyError:
(nv, ifdef) = extensions_num_get_values[key]
print '#ifdef %s' % ifdef
print '\t{ %s, %d },' % (key, nv)
print '#endif /* %s */' % ifdef
print "\t{ 0, 0 }"
print "};"
print """
static unsigned int __numValues( GLenum pname )
{
struct nv_struct *temp;
for (temp = num_values_array; temp->num_values != 0 ; temp++)
{
if (temp->pname == pname)
return temp->num_values;
}
crDebug( "Invalid pname to __numValues: 0x%x\\n", (int) pname );
return 0;
}
"""
| gpl-2.0 | 7,988,666,012,723,350,000 | 38.713964 | 105 | 0.595077 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/frame/test_nonunique_indexes.py | 2 | 18038 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = list(range(2))
df = DataFrame(arr, columns=["A", "A"])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range("20130101", periods=4, freq="Q-NOV")
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]
)
df.columns = idx
expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["string"] = "bah"
expected = DataFrame(
[[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],
columns=["foo", "bar", "foo", "hello", "string"],
)
check(df, expected)
with pytest.raises(ValueError, match="Length of value"):
df.insert(0, "AnotherColumn", range(len(df.index) - 1))
# insert same dtype
df["foo2"] = 3
expected = DataFrame(
[[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# set (non-dup)
df["foo2"] = 4
expected = DataFrame(
[[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
df["foo2"] = 3
# delete (non dup)
del df["bar"]
expected = DataFrame(
[[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],
columns=["foo", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# try to delete again (its not consolidated)
del df["hello"]
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# insert
df.insert(2, "new_col", 5.0)
expected = DataFrame(
[[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],
columns=["foo", "foo", "new_col", "string", "foo2"],
)
check(df, expected)
# insert a dup
with pytest.raises(ValueError, match="cannot insert"):
df.insert(2, "new_col", 4.0)
df.insert(2, "new_col", 4.0, allow_duplicates=True)
expected = DataFrame(
[
[1, 1, 4.0, 5.0, "bah", 3],
[1, 2, 4.0, 5.0, "bah", 3],
[2, 3, 4.0, 5.0, "bah", 3],
],
columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],
)
check(df, expected)
# delete (dup)
del df["foo"]
expected = DataFrame(
[[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],
columns=["new_col", "new_col", "string", "foo2"],
)
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame(
[[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],
columns=["foo", "bar", "foo", "hello"],
)
check(df)
df["foo2"] = 7.0
expected = DataFrame(
[[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
result = df["foo"]
expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])
check(result, expected)
# multiple replacements
df["foo"] = "string"
expected = DataFrame(
[
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
del df["foo"]
expected = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]
)
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]},
index=MultiIndex.from_tuples(
[(600809, 20130331)], names=["STK_ID", "RPT_Date"]
),
)
df5 = DataFrame(
{
"RPT_Date": [20120930, 20121231, 20130331],
"STK_ID": [600809] * 3,
"STK_Name": ["饡驦", "饡驦", "饡驦"],
"TClose": [38.05, 41.66, 30.01],
},
index=MultiIndex.from_tuples(
[(600809, 20120930), (600809, 20121231), (600809, 20130331)],
names=["STK_ID", "RPT_Date"],
),
)
k = pd.merge(df4, df5, how="inner", left_index=True, right_index=True)
result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"})
str(result)
result.dtypes
expected = DataFrame(
[[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]],
columns=[
"RT",
"TClose",
"TExg",
"RPT_Date",
"STK_ID",
"STK_Name",
"QT_Close",
],
).set_index(["STK_ID", "RPT_Date"], drop=False)
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar", "foo"])
# drop
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
result = df.drop(["a"], axis=1)
expected = DataFrame([[1], [1], [1]], columns=["bar"])
check(result, expected)
result = df.drop("a", axis=1)
check(result, expected)
# describe
df = DataFrame(
[[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=["bar", "a", "a"],
dtype="float64",
)
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "A"],
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"], "A": expected_ser},
columns=["A", "B", "A"],
)
this_df["A"] = index
check(this_df, expected_df)
# operations
for op in ["__add__", "__mul__", "__sub__", "__truediv__"]:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ["A", "A"]
df.columns = ["A", "A"]
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=["that", "that"])
expected = DataFrame(1.0, index=range(5), columns=["that", "that"])
df["that"] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=["that", "that"])
expected = DataFrame(1, index=range(5), columns=["that", "that"])
df["that"] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop("C", axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=["A", "B", "C"], how="all")
expected.columns = ["A", "A", "B", "C"]
df.columns = ["A", "A", "B", "C"]
result = df.dropna(subset=["A", "C"], how="all")
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ["A", "A", "C", "D"]
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
# dup aligning operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])
# not-comparing like-labelled
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame(
[[False, True], [True, False], [False, False], [True, False]],
columns=["A", "A"],
)
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame(
{
"one": Series([True, True, False], index=["a", "b", "c"]),
"two": Series([False, False, True, False], index=["a", "b", "c", "d"]),
"three": Series([False, True, True, True], index=["a", "b", "c", "d"]),
}
)
expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)
result = dfbool[["one", "three", "one"]]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]].copy()
expected = z.loc[["a", "c", "a"]]
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]]
result = z.loc[["a", "c", "a"]]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame(
{"A": np.arange(5, dtype="int64"), "B": np.arange(1, 6, dtype="int64")},
index=[2, 2, 3, 3, 4],
)
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame(
{
"A": date_range("20130101", periods=5),
"B": date_range("20130101 09:00:00", periods=5),
},
index=[2, 2, 3, 3, 4],
)
result = df.B - df.A
expected = Series(pd.Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["a", "a.1"]
str(df)
expected = DataFrame([[1, 2]], columns=["a", "a.1"])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"])
df.columns = ["b", "a", "a.1"]
str(df)
expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["b", "b"]
str(df)
expected = DataFrame([[1, 2]], columns=["b", "b"])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]],
columns=["a", "a", "b", "b", "d", "c", "c"],
)
df.columns = list("ABCDEFG")
str(df)
expected = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG")
)
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"])
df.columns = ["a", "a.1", "a.2", "a.3"]
str(df)
expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"])
assert_frame_equal(df, expected)
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype="float64")
df_int = DataFrame(np.random.randn(10, 3), dtype="int64")
df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns)
df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns)
df_dt = DataFrame(
pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns
)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.0], [2, -2, 3.0]]
rs = DataFrame(vals, columns=["A", "A", "B"])
xp = DataFrame(vals)
xp.columns = ["A", "A", "B"]
assert_frame_equal(rs, xp)
def test_values_duplicates(self):
df = DataFrame(
[[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"]
)
result = df.values
expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list("AAA")
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
exp = pd.DataFrame(
[["a", "d", "g"], ["b", "e", "h"], ["c", "f", "i"]], columns=["A", "A", "A"]
)
assert_frame_equal(df, exp)
| apache-2.0 | 293,897,945,920,869,200 | 33.132576 | 88 | 0.455943 | false |
ZuluPro/libcloud | libcloud/dns/drivers/onapp.py | 10 | 10684 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OnApp DNS Driver
"""
__all__ = [
'OnAppDNSDriver'
]
import json
from libcloud.common.onapp import OnAppConnection
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.base import DNSDriver, Zone, Record
DEFAULT_ZONE_TTL = 1200
class OnAppDNSDriver(DNSDriver):
type = Provider.ONAPP
name = 'OnApp'
website = 'http://onapp.com/'
connectionCls = OnAppConnection
RECORD_TYPE_MAP = {
RecordType.SOA: 'SOA',
RecordType.NS: 'NS',
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
}
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
response = self.connection.request('/dns_zones.json')
zones = self._to_zones(response.object)
return zones
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
response = self.connection.request('/dns_zones/%s.json' % zone_id)
zone = self._to_zone(response.object)
return zone
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (All zones are master by design).
:type type: ``str``
:param ttl: TTL for new records. (This is not really used)
:type ttl: ``int``
:param extra: Extra attributes (set auto_populate: 0 if you
don't want to auto populate with existing DNS records). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
For more info, please see:
https://docs.onapp.com/display/52API/Add+DNS+Zone
"""
dns_zone = {'name': domain}
if extra is not None:
dns_zone.update(extra)
dns_zone_data = json.dumps({'dns_zone': dns_zone})
response = self.connection.request(
'/dns_zones.json',
method='POST',
headers={"Content-type": "application/json"},
data=dns_zone_data)
zone = self._to_zone(response.object)
return zone
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will also delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
self.connection.request(
'/dns_zones/%s.json' % zone.id,
method='DELETE')
return True
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
response = self.connection.request(
'/dns_zones/%s/records.json' % zone.id)
dns_records = response.object['dns_zone']['records']
records = self._to_records(dns_records, zone)
return records
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
response = self.connection.request('/dns_zones/%s/records/%s.json' %
(zone_id, record_id))
record = self._to_record(response.object, zone_id=zone_id)
return record
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
Used only for A and AAAA record types.
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
For more info, please see:
https://docs.onapp.com/display/52API/Add+DNS+Record
"""
dns_record = self._format_record(name, type, data, extra)
dns_record_data = json.dumps({'dns_record': dns_record})
response = self.connection.request(
'/dns_zones/%s/records.json' % zone.id,
method='POST',
headers={"Content-type": "application/json"},
data=dns_record_data)
record = self._to_record(response.object, zone=zone)
return record
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
Used only for A and AAAA record types.
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
For more info, please see:
https://docs.onapp.com/display/52API/Edit+DNS+Records
"""
zone = record.zone
dns_record = self._format_record(name, type, data, extra)
dns_record_data = json.dumps({'dns_record': dns_record})
self.connection.request(
'/dns_zones/%s/records/%s.json' % (zone.id, record.id),
method='PUT',
headers={"Content-type": "application/json"},
data=dns_record_data)
record = self.get_record(zone.id, record.id)
return record
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
For more info, please see:
https://docs.onapp.com/display/52API/Delete+DNS+Record
"""
zone_id = record.zone.id
self.connection.request('/dns_zones/%s/records/%s.json' % (zone_id,
record.id), method='DELETE')
return True
#
# Helper methods
#
def _format_record(self, name, type, data, extra):
if name is '':
name = '@'
if extra is None:
extra = {}
record_type = self.RECORD_TYPE_MAP[type]
new_record = {
'name': name,
'ttl': extra.get('ttl', DEFAULT_ZONE_TTL),
'type': record_type
}
if type == RecordType.MX:
additions = {
'priority': extra.get('priority', 1),
'hostname': extra.get('hostname')
}
elif type == RecordType.SRV:
additions = {
'port': extra.get('port'),
'weight': extra.get('weight', 1),
'priority': extra.get('priority', 1),
'hostname': extra.get('hostname')
}
elif type == RecordType.A:
additions = {'ip': data}
elif type == RecordType.CNAME:
additions = {'hostname': extra.get('hostname')}
elif type == RecordType.AAAA:
additions = {'ip': data}
elif type == RecordType.TXT:
additions = {'txt': extra.get('txt')}
elif type == RecordType.NS:
additions = {'hostname': extra.get('hostname')}
new_record.update(additions)
return new_record
def _to_zones(self, data):
zones = []
for zone in data:
_zone = self._to_zone(zone)
zones.append(_zone)
return zones
def _to_zone(self, data):
dns_zone = data.get('dns_zone')
id = dns_zone.get('id')
name = dns_zone.get('name')
extra = {'user_id': dns_zone.get('user_id'),
'cdn_reference': dns_zone.get('cdn_reference'),
'created_at': dns_zone.get('created_at'),
'updated_at': dns_zone.get('updated_at')}
type = 'master'
return Zone(id=id, domain=name, type=type, ttl=DEFAULT_ZONE_TTL,
driver=self, extra=extra)
def _to_records(self, data, zone):
records = []
data = data.values()
for data_type in data:
for item in data_type:
record = self._to_record(item, zone=zone)
records.append(record)
records.sort(key=lambda x: x.id, reverse=False)
return records
def _to_record(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('dns_record')
id = record.get('id')
name = record.get('name')
type = record.get('type')
ttl = record.get('ttl', None)
return Record(id=id, name=name, type=type, data=record, zone=zone,
driver=self, ttl=ttl, extra={})
| apache-2.0 | 3,356,851,954,810,707,500 | 31.180723 | 76 | 0.55672 | false |
gistic/PublicSpatialImpala | thirdparty/thrift-0.9.0/lib/py/src/TSerialization.py | 184 | 1387 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from protocol import TBinaryProtocol
from transport import TTransport
def serialize(thrift_object,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer()
protocol = protocol_factory.getProtocol(transport)
thrift_object.write(protocol)
return transport.getvalue()
def deserialize(base,
buf,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer(buf)
protocol = protocol_factory.getProtocol(transport)
base.read(protocol)
return base
| apache-2.0 | -3,834,667,191,983,747,600 | 35.5 | 75 | 0.753425 | false |
gabrielcnr/pytest | testing/test_pluginmanager.py | 4 | 12014 | import pytest
import py
import os
from _pytest.config import get_config, PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
@pytest.fixture
def pytestpm():
return PytestPluginManager()
class TestPytestPluginInteractions:
def test_addhooks_conftestplugin(self, testdir):
testdir.makepyfile(newhooks="""
def pytest_myhook(xyz):
"new hook"
""")
conf = testdir.makeconftest("""
import sys ; sys.path.insert(0, '.')
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(newhooks)
def pytest_myhook(xyz):
return xyz + 1
""")
config = get_config()
pm = config.pluginmanager
pm.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=config.pluginmanager))
config.pluginmanager._importconftest(conf)
#print(config.pluginmanager.get_plugins())
res = config.hook.pytest_myhook(xyz=10)
assert res == [11]
def test_addhooks_nohooks(self, testdir):
testdir.makeconftest("""
import sys
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(sys)
""")
res = testdir.runpytest()
assert res.ret != 0
res.stderr.fnmatch_lines([
"*did not find*sys*"
])
def test_namespace_early_from_import(self, testdir):
p = testdir.makepyfile("""
from pytest import Item
from pytest import Item as Item2
assert Item is Item2
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_do_ext_namespace(self, testdir):
testdir.makeconftest("""
def pytest_namespace():
return {'hello': 'world'}
""")
p = testdir.makepyfile("""
from pytest import hello
import pytest
def test_hello():
assert hello == "world"
assert 'hello' in pytest.__all__
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_do_option_postinitialize(self, testdir):
config = testdir.parseconfigure()
assert not hasattr(config.option, 'test123')
p = testdir.makepyfile("""
def pytest_addoption(parser):
parser.addoption('--test123', action="store_true",
default=True)
""")
config.pluginmanager._importconftest(p)
assert config.option.test123
def test_configure(self, testdir):
config = testdir.parseconfig()
l = []
class A:
def pytest_configure(self, config):
l.append(self)
config.pluginmanager.register(A())
assert len(l) == 0
config._do_configure()
assert len(l) == 1
config.pluginmanager.register(A()) # leads to a configured() plugin
assert len(l) == 2
assert l[0] != l[1]
config._ensure_unconfigure()
config.pluginmanager.register(A())
assert len(l) == 2
def test_hook_tracing(self):
pytestpm = get_config().pluginmanager # fully initialized with plugins
saveindent = []
class api1:
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
class api2:
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
raise ValueError()
l = []
pytestpm.trace.root.setwriter(l.append)
undo = pytestpm.enable_tracing()
try:
indent = pytestpm.trace.root.indent
p = api1()
pytestpm.register(p)
assert pytestpm.trace.root.indent == indent
assert len(l) >= 2
assert 'pytest_plugin_registered' in l[0]
assert 'finish' in l[1]
l[:] = []
with pytest.raises(ValueError):
pytestpm.register(api2())
assert pytestpm.trace.root.indent == indent
assert saveindent[0] > indent
finally:
undo()
def test_warn_on_deprecated_multicall(self, pytestpm):
warnings = []
class get_warnings:
def pytest_logwarning(self, message):
warnings.append(message)
class Plugin:
def pytest_configure(self, __multicall__):
pass
pytestpm.register(get_warnings())
before = list(warnings)
pytestpm.register(Plugin())
assert len(warnings) == len(before) + 1
assert "deprecated" in warnings[-1]
def test_warn_on_deprecated_addhooks(self, pytestpm):
warnings = []
class get_warnings:
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings.append(message)
class Plugin:
def pytest_testhook():
pass
pytestpm.register(get_warnings())
before = list(warnings)
pytestpm.addhooks(Plugin())
assert len(warnings) == len(before) + 1
assert "deprecated" in warnings[-1]
def test_namespace_has_default_and_env_plugins(testdir):
p = testdir.makepyfile("""
import pytest
pytest.mark
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*tryfirst*first*",
"*trylast*last*",
])
def test_importplugin_issue375(testdir, pytestpm):
testdir.syspathinsert(testdir.tmpdir)
testdir.makepyfile(qwe="import aaaa")
with pytest.raises(ImportError) as excinfo:
pytestpm.import_plugin("qwe")
assert "qwe" not in str(excinfo.value)
assert "aaaa" in str(excinfo.value)
class TestPytestPluginManager:
def test_register_imported_modules(self):
pm = PytestPluginManager()
mod = py.std.types.ModuleType("x.y.pytest_hello")
pm.register(mod)
assert pm.is_registered(mod)
l = pm.get_plugins()
assert mod in l
pytest.raises(ValueError, "pm.register(mod)")
pytest.raises(ValueError, lambda: pm.register(mod))
#assert not pm.is_registered(mod2)
assert pm.get_plugins() == l
def test_canonical_import(self, monkeypatch):
mod = py.std.types.ModuleType("pytest_xyz")
monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod)
pm = PytestPluginManager()
pm.import_plugin('pytest_xyz')
assert pm.get_plugin('pytest_xyz') == mod
assert pm.is_registered(mod)
def test_consider_module(self, testdir, pytestpm):
testdir.syspathinsert()
testdir.makepyfile(pytest_p1="#")
testdir.makepyfile(pytest_p2="#")
mod = py.std.types.ModuleType("temp")
mod.pytest_plugins = ["pytest_p1", "pytest_p2"]
pytestpm.consider_module(mod)
assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1"
assert pytestpm.get_plugin("pytest_p2").__name__ == "pytest_p2"
def test_consider_module_import_module(self, testdir):
pytestpm = get_config().pluginmanager
mod = py.std.types.ModuleType("x")
mod.pytest_plugins = "pytest_a"
aplugin = testdir.makepyfile(pytest_a="#")
reprec = testdir.make_hook_recorder(pytestpm)
#syspath.prepend(aplugin.dirpath())
py.std.sys.path.insert(0, str(aplugin.dirpath()))
pytestpm.consider_module(mod)
call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name)
assert call.plugin.__name__ == "pytest_a"
# check that it is not registered twice
pytestpm.consider_module(mod)
l = reprec.getcalls("pytest_plugin_registered")
assert len(l) == 1
def test_consider_env_fails_to_import(self, monkeypatch, pytestpm):
monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",")
with pytest.raises(ImportError):
pytestpm.consider_env()
def test_plugin_skip(self, testdir, monkeypatch):
p = testdir.makepyfile(skipping1="""
import pytest
pytest.skip("hello")
""")
p.copy(p.dirpath("skipping2.py"))
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"WI1*skipped plugin*skipping1*hello*",
"WI1*skipped plugin*skipping2*hello*",
])
def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm):
testdir.syspathinsert()
testdir.makepyfile(xy123="#")
monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123')
l1 = len(pytestpm.get_plugins())
pytestpm.consider_env()
l2 = len(pytestpm.get_plugins())
assert l2 == l1 + 1
assert pytestpm.get_plugin('xy123')
pytestpm.consider_env()
l3 = len(pytestpm.get_plugins())
assert l2 == l3
def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
testdir.makepyfile(pytest_x500="#")
p = testdir.makepyfile("""
import pytest
def test_hello(pytestconfig):
plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500')
assert plugin is not None
""")
monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
result = testdir.runpytest(p, syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_import_plugin_importname(self, testdir, pytestpm):
pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")')
testdir.syspathinsert()
pluginname = "pytest_hello"
testdir.makepyfile(**{pluginname: ""})
pytestpm.import_plugin("pytest_hello")
len1 = len(pytestpm.get_plugins())
pytestpm.import_plugin("pytest_hello")
len2 = len(pytestpm.get_plugins())
assert len1 == len2
plugin1 = pytestpm.get_plugin("pytest_hello")
assert plugin1.__name__.endswith('pytest_hello')
plugin2 = pytestpm.get_plugin("pytest_hello")
assert plugin2 is plugin1
def test_import_plugin_dotted_name(self, testdir, pytestpm):
pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")')
testdir.syspathinsert()
testdir.mkpydir("pkg").join("plug.py").write("x=3")
pluginname = "pkg.plug"
pytestpm.import_plugin(pluginname)
mod = pytestpm.get_plugin("pkg.plug")
assert mod.x == 3
def test_consider_conftest_deps(self, testdir, pytestpm):
mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
with pytest.raises(ImportError):
pytestpm.consider_conftest(mod)
class TestPytestPluginManagerBootstrapming:
def test_preparse_args(self, pytestpm):
pytest.raises(ImportError, lambda:
pytestpm.consider_preparse(["xyz", "-p", "hello123"]))
def test_plugin_prevent_register(self, pytestpm):
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
l1 = pytestpm.get_plugins()
pytestpm.register(42, name="abc")
l2 = pytestpm.get_plugins()
assert len(l2) == len(l1)
assert 42 not in l2
def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm):
pytestpm.register(42, name="abc")
l1 = pytestpm.get_plugins()
assert 42 in l1
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
l2 = pytestpm.get_plugins()
assert 42 not in l2
| mit | -3,061,002,859,826,017,000 | 34.755952 | 85 | 0.598385 | false |
dzbarsky/servo | components/script/dom/bindings/codegen/parser/tests/test_special_methods_uniqueness.py | 241 | 1433 | import WebIDL
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
getter deleter boolean (DOMString name);
getter boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
deleter boolean (DOMString name);
getter deleter boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
setter creator boolean (DOMString name);
creator boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
setter boolean (DOMString name);
creator setter boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 | -8,941,170,251,327,639,000 | 22.112903 | 54 | 0.530356 | false |
linktlh/Toontown-journey | toontown/coghq/LaserGameRoll.py | 4 | 1977 | import random
from direct.distributed import ClockDelta
from direct.task import Task
from toontown.coghq import LaserGameBase
class LaserGameRoll(LaserGameBase.LaserGameBase):
def __init__(self, funcSuccess, funcFail, funcSendGrid, funcSetGrid):
LaserGameBase.LaserGameBase.__init__(self, funcSuccess, funcFail, funcSendGrid, funcSetGrid)
self.setGridSize(5, 5)
self.blankGrid()
def win(self):
if not self.finshed:
self.blankGrid()
self.funcSendGrid()
LaserGameBase.LaserGameBase.win(self)
def lose(self):
self.blankGrid()
self.funcSendGrid()
LaserGameBase.LaserGameBase.lose(self)
def startGrid(self):
LaserGameBase.LaserGameBase.startGrid(self)
for column in xrange(0, self.gridNumX):
for row in xrange(0, self.gridNumY):
tile = random.choice([
10,
13])
self.gridData[column][row] = tile
for column in xrange(0, self.gridNumX):
self.gridData[column][self.gridNumY - 1] = 12
def hit(self, hitX, hitY, oldx = -1, oldy = -1):
if self.finshed:
return None
if self.gridData[hitX][hitY] == 10:
self.gridData[hitX][hitY] = 13
elif self.gridData[hitX][hitY] == 13:
self.gridData[hitX][hitY] = 10
if self.checkForWin():
self.win()
else:
self.funcSendGrid()
def checkForWin(self):
count1 = 0
count2 = 0
for column in xrange(0, self.gridNumX):
for row in xrange(0, self.gridNumY):
if self.gridData[column][row] == 10:
count1 += 1
continue
if self.gridData[column][row] == 13:
count2 += 1
continue
if count1 and count2:
return 0
else:
return 1
| apache-2.0 | 6,616,388,150,910,226,000 | 24.675325 | 100 | 0.549823 | false |
framon/samba | buildtools/wafadmin/Runner.py | 16 | 5556 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"Execute the tasks"
import os, sys, random, time, threading, traceback
try: from Queue import Queue
except ImportError: from queue import Queue
import Build, Utils, Logs, Options
from Logs import debug, error
from Constants import *
GAP = 15
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
def process_task(tsk):
m = tsk.master
if m.stop:
m.out.put(tsk)
return
try:
tsk.generator.bld.printout(tsk.display())
if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
# actual call to task's run() function
else: ret = tsk.call_run()
except Exception, e:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(tsk)
m.out.put(tsk)
return
if ret:
tsk.err_code = ret
tsk.hasrun = CRASHED
else:
try:
tsk.post_run()
except Utils.WafError:
pass
except Exception:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
else:
tsk.hasrun = SUCCESS
if tsk.hasrun != SUCCESS:
m.error_handler(tsk)
m.out.put(tsk)
class TaskConsumer(threading.Thread):
ready = Queue(0)
consumers = []
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except:
pass
def loop(self):
while 1:
tsk = TaskConsumer.ready.get()
process_task(tsk)
class Parallel(object):
"""
keep the consumer threads busy, and avoid consuming cpu cycles
when no more tasks can be added (end of the build, etc)
"""
def __init__(self, bld, j=2):
# number of consumers
self.numjobs = j
self.manager = bld.task_manager
self.manager.current_group = 0
self.total = self.manager.total()
# tasks waiting to be processed - IMPORTANT
self.outstanding = []
self.maxjobs = MAXJOBS
# tasks that are awaiting for another task to complete
self.frozen = []
# tasks returned by the consumers
self.out = Queue(0)
self.count = 0 # tasks not in the producer area
self.processed = 1 # progress indicator
self.stop = False # error condition to stop the build
self.error = False # error flag
def get_next(self):
"override this method to schedule the tasks in a particular order"
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"override this method to schedule the tasks in a particular order"
# TODO consider using a deque instead
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"called to set the next group of tasks"
while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
(jobs, tmp) = self.manager.get_next_set()
if jobs != None: self.maxjobs = jobs
if tmp: self.outstanding += tmp
break
def get_out(self):
"the tasks that are put to execute are all collected using get_out"
ret = self.out.get()
self.manager.add_finished(ret)
if not self.stop and getattr(ret, 'more_tasks', None):
self.outstanding += ret.more_tasks
self.total += len(ret.more_tasks)
self.count -= 1
def error_handler(self, tsk):
"by default, errors make the build stop (not thread safe so be careful)"
if not Options.options.keep:
self.stop = True
self.error = True
def start(self):
"execute the tasks"
if TaskConsumer.consumers:
# the worker pool is usually loaded lazily (see below)
# in case it is re-used with a different value of numjobs:
while len(TaskConsumer.consumers) < self.numjobs:
TaskConsumer.consumers.append(TaskConsumer())
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
self.manager.add_finished(tsk)
continue
try:
st = tsk.runnable_status()
except Exception, e:
self.processed += 1
if self.stop and not Options.options.keep:
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
continue
self.error_handler(tsk)
self.manager.add_finished(tsk)
tsk.hasrun = EXCEPTION
tsk.err_msg = Utils.ex_stack()
continue
if st == ASK_LATER:
self.postpone(tsk)
elif st == SKIP_ME:
self.processed += 1
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
process_task(tsk)
else:
TaskConsumer.ready.put(tsk)
# create the consumer threads only if there is something to consume
if not TaskConsumer.consumers:
TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)]
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
| gpl-3.0 | 8,707,639,019,741,558,000 | 22.542373 | 89 | 0.676386 | false |
mchasal/compose | compose/project.py | 15 | 12276 | from __future__ import unicode_literals
from __future__ import absolute_import
from functools import reduce
import logging
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
from .container import Container
from .legacy import check_for_legacy_containers
from .service import Service
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
force_recreate=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
force_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and allow_recreate:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=True,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None):
for service in self.get_services(service_names, include_deps=True):
service.pull()
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
| apache-2.0 | -3,246,921,781,161,121,000 | 33.483146 | 186 | 0.560362 | false |
mahendra-r/edx-platform | lms/djangoapps/certificates/tests/test_queue.py | 43 | 5953 | # -*- coding: utf-8 -*-
"""Tests for the XQueue certificates interface. """
from contextlib import contextmanager
import json
from mock import patch, Mock
from nose.plugins.attrib import attr
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory
# It is really unfortunate that we are using the XQueue client
# code from the capa library. In the future, we should move this
# into a shared library. We import it here so we can mock it
# and verify that items are being correctly added to the queue
# in our `XQueueCertInterface` implementation.
from capa.xqueue_interface import XQueueInterface
from certificates.queue import XQueueCertInterface
from certificates.models import (
ExampleCertificateSet,
ExampleCertificate,
GeneratedCertificate,
CertificateStatuses,
)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
"""Test the "add to queue" operation of the XQueue interface. """
def setUp(self):
super(XQueueCertInterfaceAddCertificateTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
is_active=True,
mode="honor",
)
self.xqueue = XQueueCertInterface()
def test_add_cert_callback_url(self):
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user, self.course.id)
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
def test_no_create_action_in_queue_for_html_view_certs(self):
"""
Tests there is no certificate create message in the queue if generate_pdf is False
"""
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
self.xqueue.add_cert(self.user, self.course.id, generate_pdf=False)
# Verify that add_cert method does not add message to queue
self.assertFalse(mock_send.called)
certificate = GeneratedCertificate.objects.get(user=self.user, course_id=self.course.id)
self.assertEqual(certificate.status, CertificateStatuses.downloadable)
self.assertIsNotNone(certificate.verify_uuid)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceExampleCertificateTest(TestCase):
"""Tests for the XQueue interface for certificate generation. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
TEMPLATE = 'test.pdf'
DESCRIPTION = 'test'
ERROR_MSG = 'Kaboom!'
def setUp(self):
super(XQueueCertInterfaceExampleCertificateTest, self).setUp()
self.xqueue = XQueueCertInterface()
def test_add_example_cert(self):
cert = self._create_example_cert()
with self._mock_xqueue() as mock_send:
self.xqueue.add_example_cert(cert)
# Verify that the correct payload was sent to the XQueue
self._assert_queue_task(mock_send, cert)
# Verify the certificate status
self.assertEqual(cert.status, ExampleCertificate.STATUS_STARTED)
def test_add_example_cert_error(self):
cert = self._create_example_cert()
with self._mock_xqueue(success=False):
self.xqueue.add_example_cert(cert)
# Verify the error status of the certificate
self.assertEqual(cert.status, ExampleCertificate.STATUS_ERROR)
self.assertIn(self.ERROR_MSG, cert.error_reason)
def _create_example_cert(self):
"""Create an example certificate. """
cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
return ExampleCertificate.objects.create(
example_cert_set=cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE
)
@contextmanager
def _mock_xqueue(self, success=True):
"""Mock the XQueue method for sending a task to the queue. """
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None) if success else (1, self.ERROR_MSG)
yield mock_send
def _assert_queue_task(self, mock_send, cert):
"""Check that the task was added to the queue. """
expected_header = {
'lms_key': cert.access_key,
'lms_callback_url': 'https://edx.org/update_example_certificate?key={key}'.format(key=cert.uuid),
'queue_name': 'certificates'
}
expected_body = {
'action': 'create',
'username': cert.uuid,
'name': u'John Doë',
'course_id': unicode(self.COURSE_KEY),
'template_pdf': 'test.pdf',
'example_certificate': True
}
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
actual_body = json.loads(kwargs['body'])
self.assertEqual(expected_header, actual_header)
self.assertEqual(expected_body, actual_body)
| agpl-3.0 | 6,997,514,362,479,994,000 | 38.157895 | 109 | 0.669523 | false |
linkedin/indextank-service | storefront/boto/s3/bucketlistresultset.py | 3 | 2380 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
"""
A generator function for listing keys in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_keys(prefix=prefix, marker=marker,
delimiter=delimiter, headers=headers)
for k in rs:
yield k
if k:
marker = k.name
more_results= rs.is_truncated
class BucketListResultSet:
"""
A resultset for listing keys within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.marker = marker
self.headers = headers
def __iter__(self):
return bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter, marker=self.marker, headers=self.headers)
| apache-2.0 | -6,267,443,312,373,075,000 | 40.754386 | 96 | 0.687395 | false |
ingenieroariel/geonode | geonode/upload/forms.py | 1 | 8421 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import files
import tempfile
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from geonode import geoserver, qgis_server
from geonode.layers.forms import JSONField
from geonode.upload.models import UploadFile
from geonode.geoserver.helpers import ogc_server_settings
from geonode.utils import check_ogc_backend
class UploadFileForm(forms.ModelForm):
class Meta:
model = UploadFile
fields = '__all__'
class LayerUploadForm(forms.Form):
base_file = forms.FileField()
dbf_file = forms.FileField(required=False)
shx_file = forms.FileField(required=False)
prj_file = forms.FileField(required=False)
xml_file = forms.FileField(required=False)
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
sld_file = forms.FileField(required=False)
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
qml_file = forms.FileField(required=False)
geogig = forms.BooleanField(required=False)
geogig_store = forms.CharField(required=False)
time = forms.BooleanField(required=False)
mosaic = forms.BooleanField(required=False)
append_to_mosaic_opts = forms.BooleanField(required=False)
append_to_mosaic_name = forms.CharField(required=False)
mosaic_time_regex = forms.CharField(required=False)
mosaic_time_value = forms.CharField(required=False)
time_presentation = forms.CharField(required=False)
time_presentation_res = forms.IntegerField(required=False)
time_presentation_default_value = forms.CharField(required=False)
time_presentation_reference_value = forms.CharField(required=False)
abstract = forms.CharField(required=False)
layer_title = forms.CharField(required=False)
permissions = JSONField()
metadata_uploaded_preserve = forms.BooleanField(required=False)
metadata_upload_form = forms.BooleanField(required=False)
style_upload_form = forms.BooleanField(required=False)
spatial_files = [
"base_file",
"dbf_file",
"shx_file",
"prj_file",
"xml_file",
]
# Adding style file based on the backend
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
spatial_files.append('sld_file')
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
spatial_files.append('qml_file')
spatial_files = tuple(spatial_files)
def clean(self):
requires_datastore = () if ogc_server_settings.DATASTORE else (
'.csv',
'.kml')
types = [t for t in files.types if t.code not in requires_datastore]
def supported_type(ext):
return any([t.matches(ext) for t in types])
cleaned = super(LayerUploadForm, self).clean()
base_name, base_ext = os.path.splitext(cleaned["base_file"].name)
if base_ext.lower() == '.zip':
# for now, no verification, but this could be unified
pass
elif not supported_type(base_ext.lower()[1:]):
supported = " , ".join([t.name for t in types])
raise forms.ValidationError(
"%s files are supported. You uploaded a %s file" %
(supported, base_ext))
if base_ext.lower() == ".shp":
dbf_file = cleaned["dbf_file"]
shx_file = cleaned["shx_file"]
if dbf_file is None or shx_file is None:
raise forms.ValidationError(
"When uploading Shapefiles, .SHX and .DBF files are also required.")
dbf_name, __ = os.path.splitext(dbf_file.name)
shx_name, __ = os.path.splitext(shx_file.name)
if dbf_name != base_name or shx_name != base_name:
raise forms.ValidationError(
"It looks like you're uploading "
"components from different Shapefiles. Please "
"double-check your file selections.")
if cleaned["prj_file"] is not None:
prj_file = cleaned["prj_file"].name
if os.path.splitext(prj_file)[0] != base_name:
raise forms.ValidationError(
"It looks like you're "
"uploading components from different Shapefiles. "
"Please double-check your file selections.")
return cleaned
def write_files(self):
tempdir = tempfile.mkdtemp(dir=settings.FILE_UPLOAD_TEMP_DIR)
for field in self.spatial_files:
f = self.cleaned_data[field]
if f is not None:
path = os.path.join(tempdir, f.name)
with open(path, 'wb') as writable:
for c in f.chunks():
writable.write(c)
absolute_base_file = os.path.join(tempdir,
self.cleaned_data["base_file"].name)
return tempdir, absolute_base_file
class TimeForm(forms.Form):
presentation_strategy = forms.CharField(required=False)
precision_value = forms.IntegerField(required=False)
precision_step = forms.ChoiceField(required=False, choices=[
('years',) * 2,
('months',) * 2,
('days',) * 2,
('hours',) * 2,
('minutes',) * 2,
('seconds',) * 2
])
def __init__(self, *args, **kwargs):
# have to remove these from kwargs or Form gets mad
time_names = kwargs.pop('time_names', None)
text_names = kwargs.pop('text_names', None)
year_names = kwargs.pop('year_names', None)
super(TimeForm, self).__init__(*args, **kwargs)
self._build_choice('time_attribute', time_names)
self._build_choice('end_time_attribute', time_names)
self._build_choice('text_attribute', text_names)
self._build_choice('end_text_attribute', text_names)
widget = forms.TextInput(attrs={'placeholder': 'Custom Format'})
if text_names:
self.fields['text_attribute_format'] = forms.CharField(
required=False, widget=widget)
self.fields['end_text_attribute_format'] = forms.CharField(
required=False, widget=widget)
self._build_choice('year_attribute', year_names)
self._build_choice('end_year_attribute', year_names)
def _resolve_attribute_and_type(self, *name_and_types):
return [(self.cleaned_data[n], t) for n, t in name_and_types
if self.cleaned_data.get(n, None)]
def _build_choice(self, att, names):
if names:
names.sort()
choices = [('', '<None>')] + [(a, a) for a in names]
self.fields[att] = forms.ChoiceField(
choices=choices, required=False)
def clean(self):
starts = self._resolve_attribute_and_type(
('time_attribute', 'Date'),
('text_attribute', 'Text'),
('year_attribute', 'Number'),
)
if len(starts) > 1:
raise ValidationError('multiple start attributes')
ends = self._resolve_attribute_and_type(
('end_time_attribute', 'Date'),
('end_text_attribute', 'Text'),
('end_year_attribute', 'Number'),
)
if len(ends) > 1:
raise ValidationError('multiple end attributes')
if len(starts) > 0:
self.cleaned_data['start_attribute'] = starts[0]
if len(ends) > 0:
self.cleaned_data['end_attribute'] = ends[0]
return self.cleaned_data
# @todo implement clean
class SRSForm(forms.Form):
srs = forms.CharField(required=True)
| gpl-3.0 | 492,939,210,307,070,340 | 38.721698 | 88 | 0.606698 | false |
stanxii/wen9000 | node.js/node_modules/npm/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/generator/msvs.py | 41 | 109235 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
# TODO(jeanluc) The way we currently generate libraries makes Visual
# Studio 2010 unhappy. We get a lot of warnings like:
# warning MSB8012: TargetPath(...\Debug\gles2_c_lib.lib) does not match
# the Library's OutputFile property value (...\Debug\lib\gles2_c_lib.lib).
# This may cause your project to build incorrectly. To correct this,
# please make sure that $(OutDir), $(TargetName) and $(TargetExt) property
# values match the value specified in %(Lib.OutputFile).
# Despite the warnings, this compile correctly. It would be nice to get rid
# of the warnings.
# TODO(jeanluc) I had: 'LIB_DIR': '$(OutDir)lib',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_shard',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Fix the paths
# If the argument starts with a slash, it's probably a command line switch
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)','%INPUTDIR%') for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1))/2*4)*'\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set(_FixPaths(inputs))
outputs = set(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
if version.UsesVcxproj():
_GenerateMSBuildProject(project, options, version, generator_flags)
else:
_GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, project_dir, sources, excluded_sources, list_excluded))
# Add in files.
_VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = set()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
# TODO(jeanluc) If we want to avoid the MSB8012 warnings in
# VisualStudio 2010, we will have to change the value of $(OutDir)
# to contain the \lib suffix, rather than doing it as below.
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)\\lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
# TODO(jeanluc) If we want to avoid the MSB8012 warning, we should
# add code like the following to place libraries in their own directory.
# if config_type == '4':
# output_dir = spec.get('product_dir', output_dir + '\\lib')
prepared_attrs['OutputDirectory'] = output_dir
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(spec, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%d' % (parts[0], number)
return '#'.join(parts)
def _ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = _ShardTargets(target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version, generator_flags)
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_lines_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
logging_section,
message_section,
write_lines_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['TargetName', target_name],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
def _GetMSBuildAttributes(spec, config, build_file):
# Use the MSVS attributes and convert them. In the future, we may want to
# support Gyp files specifying 'msbuild_configuration_attributes' directly.
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = {
'0': 'MultiByte',
'1': 'Unicode'
}[msvs_attributes[a]]
elif a == 'ConfigurationType':
msbuild_attributes[a] = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[msvs_attributes[a]]
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
for name, values in sorted(properties.iteritems()):
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalDependencies',
'AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
_VerifySourcesExist(source.contents, root_dir)
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
print 'Warning: Missing input file ' + full_path + ' pwd=' +\
os.getcwd()
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
msbuildproj_dir = os.path.dirname(project.path)
if msbuildproj_dir and not os.path.exists(msbuildproj_dir):
os.makedirs(msbuildproj_dir)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded))
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
_VerifySourcesExist(sources, project_dir)
for (_, configuration) in configurations.iteritems():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path)
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = set()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
command = ' && '.join(commands)
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| lgpl-2.1 | -8,163,682,286,098,596,000 | 36.218058 | 134 | 0.641049 | false |
vancepym/ogre | Tools/Blender2.6Export/ogre_mesh_exporter/global_properties.py | 16 | 9771 | # ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
import bpy, os, sys, configparser
from bpy.props import *
STATIC_CONFIG_FILENAME = "ogre_mesh_exporter.cfg"
class SelectedObject(bpy.types.PropertyGroup):
name = StringProperty(name = "Name", default = "Unknown", options = set())
objectName = StringProperty(name = "Object", default = "Unknown", options = set())
class SelectedObjectList(bpy.types.PropertyGroup):
def onSelectionChanged(self, context):
# Set the selected object as active.
bpy.context.scene.objects.active = bpy.data.objects[self.collection[self.collectionIndex].objectName]
collection = CollectionProperty(type = SelectedObject, options = set())
collectionIndex = IntProperty(min = -1, default = -1, options = set(), update=onSelectionChanged)
class GlobalProperties(bpy.types.PropertyGroup):
# ##############################################
# Material Properties
exportMaterials = BoolProperty(
name = "Export Materials",
description = "Enable/Disable material file exporting.",
default = True,
options = set()
)
materialFile = StringProperty(
name = "Material File",
description = "File name of material.",
default = "Scene.material",
options = set()
)
copyTextures = BoolProperty(
name = "Copy Textures",
description = "Copy textures to export path.",
default = False,
options = set()
)
materialExportMode = EnumProperty(
name = "Material Export Mode",
description = "Diffrent Material Export Modes.",
items = (("rend", "Rendering Materials", "Export using rendering materials."),
("game", "Game Engine Materials", "Export using game engine materials."),
("custom", "Custom Materials", "Export using custom template based materials."),
),
default = "rend",
options = set()
)
templatePath = StringProperty(
name = "Template Path",
description = "Path to material templates for generating custom materials.",
subtype = "DIR_PATH",
options = set()
)
# ##############################################
# Mesh Properties
exportMeshes = BoolProperty(
name = "Export Meshes",
description = "Enable/Disable mesh & skeleton file exporting.",
default = True,
options = set()
)
exportPath = StringProperty(
name = "Export Path",
description = "Path to export files.",
subtype = "DIR_PATH",
options = set()
)
fixUpAxisToY = BoolProperty(
name = "Fix Up Axis to Y",
description = "Fix up axis as Y instead of Z.",
default = True,
options = set()
)
requireMaterials = BoolProperty(
name = "Require Materials",
description = "Generate Error message when part of a mesh is not assigned with a material.",
default = True,
options = set()
)
applyModifiers = BoolProperty(
name = "Apply Modifiers",
description = "Apply mesh modifiers before export. (Slow and may break vertex order for morph targets!)",
default = False,
options = set()
)
skeletonNameFollowMesh = BoolProperty(
name = "Skeleton Name Follow Mesh",
description = "Use mesh name for exported skeleton name instead of the armature name.",
default = True,
options = set()
)
runOgreXMLConverter = BoolProperty(
name = "OgreXMLConverter",
description = "Run OgreXMLConverter on exported XML files.",
default = True,
options = set()
)
# ##############################################
# XML Converter Properties
# This is only a temporary property for editing due to blender's limitation of it's dynamic properties.
# The true value is stored in the globally shared config file.
# This means that this value will be the same for all blend file opened.
ogreXMLConverterPath = StringProperty(
name = "Ogre XML Converter Path",
description = "Path to OgreXMLConverter.",
subtype = "FILE_PATH",
options = {'SKIP_SAVE'}
)
# Saved to the shared config file as above.
ogreXMLConverterAdditionalArg = StringProperty(
name = "Additional Arguments",
description = "Additional Arguments outside of the provided options below. Note that this is shared across all blend files.",
options = {'SKIP_SAVE'}
)
useXMLConverterOptions = BoolProperty(
name = "Use XML Converter Options",
description = "Use the settings set by this XML converter option. These options are saved in blend file. If you want a globally shared option, please uncheck this and use the 'Additional Arguments' option.",
default = True,
options = {'SKIP_SAVE'}
)
extremityPoints = IntProperty(
name = "Extremity Points",
description = "Generate no more than num eXtremes for every submesh. (For submesh render sorting when using alpha materials on submesh)",
soft_min = 0,
soft_max = 65536,
options = {'SKIP_SAVE'}
)
edgeLists = BoolProperty(
name = "Edge Lists",
description = "Generate edge lists. (Useful for outlining or doing stencil shadows)",
default = False,
options = {'SKIP_SAVE'}
)
tangent = BoolProperty(
name = "Tangent",
description = "Generate tangent.",
default = False,
options = {'SKIP_SAVE'}
)
tangentSemantic = EnumProperty(
name = "Tangent Semantic",
description = "Tangent Semantic to use.",
items=(("uvw", "uvw", "Use UV semantic."),
("tangent", "tangent", "Use tangent semantic."),
),
default= "tangent",
options = {'SKIP_SAVE'}
)
tangentSize = EnumProperty(
name = "Tangent Size",
description = "Size of tangent.",
items=(("4", "4 component (parity)", "Use 4 component tangent where 4th component is parity."),
("3", "3 component", "Use 3 component tangent."),
),
default= "3",
options = {'SKIP_SAVE'}
)
splitMirrored = BoolProperty(
name = "Split Mirrored",
description = "Split tangent vertices at UV mirror points.",
default = False,
options = {'SKIP_SAVE'}
)
splitRotated = BoolProperty(
name = "Split Rotated",
description = "Split tangent vertices where basis is rotated > 90 degrees.",
default = False,
options = {'SKIP_SAVE'}
)
reorganiseVertBuff = BoolProperty(
name = "Reorganise Vertex Buffers",
description = "Reorganise vertex buffer to make it GPU vertex cache friendly.",
default = True,
options = {'SKIP_SAVE'}
)
optimiseAnimation = BoolProperty(
name = "Optimise Animation",
description = "Optimise out redundant tracks & keyframes.",
default = True,
options = {'SKIP_SAVE'}
)
# ##############################################
# Log properties.
logPageSize = IntProperty(
name = "Log Page Size",
description = "Size of a visible log page",
default = 10,
options = {'SKIP_SAVE'}
)
logPercentage = IntProperty(
name = "Log Percentage",
description = "Log progress",
default = 100, min = 0, max = 100,
subtype = 'PERCENTAGE',
options = {'SKIP_SAVE'}
)
# ##############################################
# temporary collection for listing selected meshes.
selectedObjectList = PointerProperty(type = SelectedObjectList)
def onDummyTrueChanged(self, context):
# Never let Dummy change.
self.dummyTrue = True
def onDummyFalseChanged(self, context):
# Never let Dummy change.
self.dummyFalse = False
# Dummy property for tab use. (NEVER SET)
dummyTrue = BoolProperty(
default = True,
update = onDummyTrueChanged,
options = {'SKIP_SAVE'})
# Dummy property for label box use. (NEVER SET)
dummyFalse = BoolProperty(
default = False,
update = onDummyFalseChanged,
options = {'SKIP_SAVE'})
# Load static data from config file.
def loadStaticConfig():
global OGRE_XML_CONVERTERPATH
global_settings = bpy.context.scene.ogre_mesh_exporter
# load static data from config file.
config_path = bpy.utils.user_resource('CONFIG')
config_filepath = os.path.join(config_path, STATIC_CONFIG_FILENAME)
config = configparser.ConfigParser()
config.read(config_filepath)
if sys.platform.startswith('win'):
global_settings.ogreXMLConverterPath = _parseConfig(config, "PATHS", "OgreXMLConverter", "C:\\OgreCommandLineTools\\OgreXmlConverter.exe")
elif sys.platform.startswith('linux'):
global_settings.ogreXMLConverterPath = _parseConfig(config, "PATHS", "OgreXMLConverter", "/usr/bin/OgreXMLConverter")
# Parse static config data.
def _parseConfig(config, section, key, default):
try:
return config.get(section, key)
except configparser.Error:
return default
# Save static data to config file.
def saveStaticConfig():
global_settings = bpy.context.scene.ogre_mesh_exporter
config_path = bpy.utils.user_resource('CONFIG')
config_filepath = os.path.join(config_path, STATIC_CONFIG_FILENAME)
config = configparser.ConfigParser()
config.add_section("PATHS")
config.set("PATHS", "OgreXMLConverter", global_settings.ogreXMLConverterPath)
config.read(config_filepath)
with open(config_filepath, 'w') as configfile:
config.write(configfile)
| mit | -5,635,659,593,871,601,000 | 33.284211 | 209 | 0.701361 | false |
LittleLama/Sick-Beard-BoxCar2 | cherrypy/lib/static.py | 39 | 14178 | import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
import os
import re
import stat
import time
from urllib import unquote
import cherrypy
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % content_length
message = "Invalid Range (first-byte-pos greater than Content-Length)"
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
import mimetools
boundary = mimetools.choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield "\r\n"
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
yield "--" + boundary
yield "\r\nContent-type: %s" % content_type
yield ("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, content_length))
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop - start):
yield chunk
yield "\r\n"
# Final boundary
yield "--" + boundary + "--"
# Apache compatibility:
yield "\r\n"
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, "application/x-download", "attachment", name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root="", match="", content_types=None, index="",
debug=False):
"""Serve a static resource from the given (root +) dir.
If 'match' is given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
If content_types is given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
If 'index' is provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = "Static dir requires an absolute dir (or root)."
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = "/"
section = section.rstrip(r"\/")
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r"\/"))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r"\/")
return handled
def staticfile(filename, root=None, match="", content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
If 'match' is given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
If content_types is given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| gpl-3.0 | 3,149,126,068,219,001,300 | 39.976879 | 90 | 0.598886 | false |
havt/odoo | addons/account/product.py | 374 | 2897 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices to value sales."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices to value expenses."),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'taxes_id': fields.many2many('account.tax', 'product_taxes_rel',
'prod_id', 'tax_id', 'Customer Taxes',
domain=[('parent_id','=',False),('type_tax_use','in',['sale','all'])]),
'supplier_taxes_id': fields.many2many('account.tax',
'product_supplier_taxes_rel', 'prod_id', 'tax_id',
'Supplier Taxes', domain=[('parent_id', '=', False),('type_tax_use','in',['purchase','all'])]),
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices instead of the default one to value sales for the current product."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices instead of the default one to value expenses for the current product."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,300,921,492,807,791,600 | 43.569231 | 129 | 0.577149 | false |
open-io/oio-swift | tests/unit/controllers/test_container.py | 1 | 5114 | import unittest
from mock import patch
from mock import MagicMock as Mock
from oioswift.common.ring import FakeRing
from oioswift import server as proxy_server
from swift.common.swob import Request
from swift.proxy.controllers.base import headers_to_container_info
from swift.common.request_helpers import get_sys_meta_prefix
from tests.unit import FakeStorageAPI, debug_logger
class TestContainerController(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('proxy-server')
self.storage = FakeStorageAPI(logger=self.logger)
self.storage.account.account_show = Mock(return_value={
'containers': 0,
'objects': 0,
'bytes': 0,
'ctime': 0,
'metadata': {}})
self.account_info = {
'status': 200,
'container_count': '10',
'total_object_count': '100',
'bytes': '1000',
'meta': {},
'sysmeta': {}
}
self.app = proxy_server.Application(
{'sds_namespace': 'NS'}, account_ring=FakeRing(),
container_ring=FakeRing(), storage=self.storage,
logger=self.logger)
class FakeAccountInfoContainerController(
proxy_server.ContainerController):
def account_info(controller, *args, **kwargs):
patch_path = 'swift.proxy.controllers.base.get_info'
with patch(patch_path) as mock_get_info:
mock_get_info.return_value = dict(self.account_info)
return super(FakeAccountInfoContainerController,
controller).account_info(
*args, **kwargs)
_orig_get_controller = self.app.get_controller
def wrapped_get_controller(*args, **kwargs):
with patch('swift.proxy.server.ContainerController',
new=FakeAccountInfoContainerController):
return _orig_get_controller(*args, **kwargs)
self.app.get_controller = wrapped_get_controller
def test_container_info(self):
req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'}, method='HEAD')
self.storage.container.container_show = Mock(return_value={})
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
self.assertIn('swift.infocache', resp.environ)
self.assertIn('container/a/c', resp.environ['swift.infocache'])
self.assertEqual(
headers_to_container_info(resp.headers, resp.status_int),
resp.environ['swift.infocache']['container/a/c'])
def test_swift_owner(self):
owner_headers = {'properties': {
'x-container-read': 'value', 'x-container-write': 'value',
'x-container-sync-key': 'value', 'x-container-sync-to': 'value'}}
req = Request.blank('/v1/a/c', method='HEAD')
self.storage.container.container_get_properties = Mock(
return_value=owner_headers)
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for k in owner_headers['properties']:
self.assertTrue(k not in resp.headers)
req = Request.blank(
'/v1/a/c', environ={'swift_owner': True}, method='HEAD')
self.storage.container.container_get_properties = Mock(
return_value=owner_headers)
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for k in owner_headers['properties']:
self.assertIn(k, resp.headers)
def test_sys_meta_headers_PUT(self):
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in, method='PUT')
self.storage.container.container_create = Mock()
req.get_response(self.app)
meta = \
self.storage.container.container_create.call_args[1]['properties']
self.assertEqual(meta[sys_meta_key], 'foo')
self.assertEqual(meta[user_meta_key], 'bar')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in, method='POST')
self.storage.container.container_set_properties = Mock(
return_value="")
req.get_response(self.app)
meta = self.storage.container.container_set_properties.call_args[0][2]
self.assertEqual(meta[sys_meta_key], 'foo')
self.assertEqual(meta[user_meta_key], 'bar')
| apache-2.0 | -9,132,456,947,462,701,000 | 41.264463 | 79 | 0.592687 | false |
libcrosswind/libcrosswind | platform/windows/compilers/x64/TDM-GCC-64/gdb64/bin/lib/idlelib/idle_test/test_calltips.py | 27 | 7140 | import unittest
import idlelib.CallTips as ct
CTi = ct.CallTips() # needed for get_entity test in 2.7
import textwrap
import types
import warnings
default_tip = ''
# Test Class TC is used in multiple get_argspec test methods
class TC(object):
'doc'
tip = "(ai=None, *args)"
def __init__(self, ai=None, *b): 'doc'
__init__.tip = "(self, ai=None, *args)"
def t1(self): 'doc'
t1.tip = "(self)"
def t2(self, ai, b=None): 'doc'
t2.tip = "(self, ai, b=None)"
def t3(self, ai, *args): 'doc'
t3.tip = "(self, ai, *args)"
def t4(self, *args): 'doc'
t4.tip = "(self, *args)"
def t5(self, ai, b=None, *args, **kw): 'doc'
t5.tip = "(self, ai, b=None, *args, **kwargs)"
def t6(no, self): 'doc'
t6.tip = "(no, self)"
def __call__(self, ci): 'doc'
__call__.tip = "(self, ci)"
# attaching .tip to wrapped methods does not work
@classmethod
def cm(cls, a): 'doc'
@staticmethod
def sm(b): 'doc'
tc = TC()
signature = ct.get_arg_text # 2.7 and 3.x use different functions
class Get_signatureTest(unittest.TestCase):
# The signature function must return a string, even if blank.
# Test a variety of objects to be sure that none cause it to raise
# (quite aside from getting as correct an answer as possible).
# The tests of builtins may break if the docstrings change,
# but a red buildbot is better than a user crash (as has happened).
# For a simple mismatch, change the expected output to the actual.
def test_builtins(self):
# 2.7 puts '()\n' where 3.x does not, other minor differences
# Python class that inherits builtin methods
class List(list): "List() doc"
# Simulate builtin with no docstring for default argspec test
class SB: __call__ = None
def gtest(obj, out):
self.assertEqual(signature(obj), out)
if List.__doc__ is not None:
gtest(List, '()\n' + List.__doc__)
gtest(list.__new__,
'T.__new__(S, ...) -> a new object with type S, a subtype of T')
gtest(list.__init__,
'x.__init__(...) initializes x; see help(type(x)) for signature')
append_doc = "L.append(object) -- append object to end"
gtest(list.append, append_doc)
gtest([].append, append_doc)
gtest(List.append, append_doc)
gtest(types.MethodType, '()\ninstancemethod(function, instance, class)')
gtest(SB(), default_tip)
def test_signature_wrap(self):
# This is also a test of an old-style class
if textwrap.TextWrapper.__doc__ is not None:
self.assertEqual(signature(textwrap.TextWrapper), '''\
(width=70, initial_indent='', subsequent_indent='', expand_tabs=True,
replace_whitespace=True, fix_sentence_endings=False, break_long_words=True,
drop_whitespace=True, break_on_hyphens=True)''')
def test_docline_truncation(self):
def f(): pass
f.__doc__ = 'a'*300
self.assertEqual(signature(f), '()\n' + 'a' * (ct._MAX_COLS-3) + '...')
def test_multiline_docstring(self):
# Test fewer lines than max.
self.assertEqual(signature(list),
"()\nlist() -> new empty list\n"
"list(iterable) -> new list initialized from iterable's items")
# Test max lines and line (currently) too long.
def f():
pass
s = 'a\nb\nc\nd\n'
f.__doc__ = s + 300 * 'e' + 'f'
self.assertEqual(signature(f),
'()\n' + s + (ct._MAX_COLS - 3) * 'e' + '...')
def test_functions(self):
def t1(): 'doc'
t1.tip = "()"
def t2(a, b=None): 'doc'
t2.tip = "(a, b=None)"
def t3(a, *args): 'doc'
t3.tip = "(a, *args)"
def t4(*args): 'doc'
t4.tip = "(*args)"
def t5(a, b=None, *args, **kwds): 'doc'
t5.tip = "(a, b=None, *args, **kwargs)"
doc = '\ndoc' if t1.__doc__ is not None else ''
for func in (t1, t2, t3, t4, t5, TC):
self.assertEqual(signature(func), func.tip + doc)
def test_methods(self):
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth in (TC.t1, TC.t2, TC.t3, TC.t4, TC.t5, TC.t6, TC.__call__):
self.assertEqual(signature(meth), meth.tip + doc)
self.assertEqual(signature(TC.cm), "(a)" + doc)
self.assertEqual(signature(TC.sm), "(b)" + doc)
def test_bound_methods(self):
# test that first parameter is correctly removed from argspec
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth, mtip in ((tc.t1, "()"), (tc.t4, "(*args)"), (tc.t6, "(self)"),
(tc.__call__, '(ci)'), (tc, '(ci)'), (TC.cm, "(a)"),):
self.assertEqual(signature(meth), mtip + doc)
def test_starred_parameter(self):
# test that starred first parameter is *not* removed from argspec
class C:
def m1(*args): pass
def m2(**kwds): pass
def f1(args, kwargs, *a, **k): pass
def f2(args, kwargs, args1, kwargs1, *a, **k): pass
c = C()
self.assertEqual(signature(C.m1), '(*args)')
self.assertEqual(signature(c.m1), '(*args)')
self.assertEqual(signature(C.m2), '(**kwargs)')
self.assertEqual(signature(c.m2), '(**kwargs)')
self.assertEqual(signature(f1), '(args, kwargs, *args1, **kwargs1)')
self.assertEqual(signature(f2),
'(args, kwargs, args1, kwargs1, *args2, **kwargs2)')
def test_no_docstring(self):
def nd(s): pass
TC.nd = nd
self.assertEqual(signature(nd), "(s)")
self.assertEqual(signature(TC.nd), "(s)")
self.assertEqual(signature(tc.nd), "()")
def test_attribute_exception(self):
class NoCall(object):
def __getattr__(self, name):
raise BaseException
class Call(NoCall):
def __call__(self, ci):
pass
for meth, mtip in ((NoCall, '()'), (Call, '()'),
(NoCall(), ''), (Call(), '(ci)')):
self.assertEqual(signature(meth), mtip)
def test_non_callables(self):
for obj in (0, 0.0, '0', b'0', [], {}):
self.assertEqual(signature(obj), '')
class Get_entityTest(unittest.TestCase):
# In 3.x, get_entity changed from 'instance method' to module function
# since 'self' not used. Use dummy instance until change 2.7 also.
def test_bad_entity(self):
self.assertIsNone(CTi.get_entity('1//0'))
def test_good_entity(self):
self.assertIs(CTi.get_entity('int'), int)
class Py2Test(unittest.TestCase):
def test_paramtuple_float(self):
# 18539: (a,b) becomes '.0' in code object; change that but not 0.0
with warnings.catch_warnings():
# Suppess message of py3 deprecation of parameter unpacking
warnings.simplefilter("ignore")
exec "def f((a,b), c=0.0): pass"
self.assertEqual(signature(f), '(<tuple>, c=0.0)')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| gpl-3.0 | 5,536,222,590,827,771,000 | 37.594595 | 82 | 0.558683 | false |
mogoweb/chromium-crosswalk | remoting/tools/verify_resources.py | 25 | 5148 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verifies that GRD resource files define all the strings used by a given
set of source files. For file formats where it is not possible to infer which
strings represent message identifiers, localized strings should be explicitly
annotated with the string "i18n-content", for example:
LocalizeString(/*i18n-content*/"PRODUCT_NAME");
This script also recognises localized strings in HTML and manifest.json files:
HTML: i18n-content="PRODUCT_NAME"
or i18n-value-name-1="BUTTON_NAME"
or i18n-title="TOOLTIP_NAME"
manifest.json: __MSG_PRODUCT_NAME__
Note that these forms must be exact; extra spaces are not permitted, though
either single or double quotes are recognized.
In addition, the script checks that all the messages are still in use; if
this is not the case then a warning is issued, but the script still succeeds.
"""
import json
import os
import optparse
import re
import sys
import xml.dom.minidom as minidom
WARNING_MESSAGE = """
To remove this warning, either remove the unused tags from
resource files, add the files that use the tags listed above to
remoting.gyp, or annotate existing uses of those tags with the
prefix /*i18n-content*/
"""
def LoadTagsFromGrd(filename):
xml = minidom.parse(filename)
tags = []
msgs_and_structs = xml.getElementsByTagName("message")
msgs_and_structs.extend(xml.getElementsByTagName("structure"))
for res in msgs_and_structs:
name = res.getAttribute("name")
if not name or not name.startswith("IDR_"):
raise Exception("Tag name doesn't start with IDR_: %s" % name)
tags.append(name[4:])
return tags
def ExtractTagFromLine(file_type, line):
"""Extract a tag from a line of HTML, C++, JS or JSON."""
if file_type == "html":
# HTML-style (tags)
m = re.search('i18n-content=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
# HTML-style (titles)
m = re.search('i18n-title=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
# HTML-style (substitutions)
m = re.search('i18n-value-name-[1-9]=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
elif file_type == 'js':
# Javascript style
m = re.search('/\*i18n-content\*/[\'"]([^\`"]*)[\'"]', line)
if m: return m.group(1)
elif file_type == 'cc' or file_type == 'mm':
# C++ style
m = re.search('IDR_([A-Z0-9_]*)', line)
if m: return m.group(1)
m = re.search('/\*i18n-content\*/["]([^\`"]*)["]', line)
if m: return m.group(1)
elif file_type == 'json':
# Manifest style
m = re.search('__MSG_(.*)__', line)
if m: return m.group(1)
elif file_type == 'jinja2':
# Jinja2 template file
m = re.search('\{\%\s+trans\s+\%\}([A-Z0-9_]+)\{\%\s+endtrans\s+\%\}', line)
if m: return m.group(1)
return None
def VerifyFile(filename, messages, used_tags):
"""
Parse |filename|, looking for tags and report any that are not included in
|messages|. Return True if all tags are present and correct, or False if
any are missing. If no tags are found, print a warning message and return
True.
"""
base_name, extension = os.path.splitext(filename)
extension = extension[1:]
if extension not in ['js', 'cc', 'html', 'json', 'jinja2', 'mm']:
raise Exception("Unknown file type: %s" % extension)
result = True
matches = False
f = open(filename, 'r')
lines = f.readlines()
for i in xrange(0, len(lines)):
tag = ExtractTagFromLine(extension, lines[i])
if tag:
tag = tag.upper()
used_tags.add(tag)
matches = True
if not tag in messages:
result = False
print '%s/%s:%d: error: Undefined tag: %s' % \
(os.getcwd(), filename, i + 1, tag)
if not matches:
print '%s/%s:0: warning: No tags found' % (os.getcwd(), filename)
f.close()
return result
def main():
parser = optparse.OptionParser(
usage='Usage: %prog [options...] [source_file...]')
parser.add_option('-t', '--touch', dest='touch',
help='File to touch when finished.')
parser.add_option('-r', '--grd', dest='grd', action='append',
help='grd file')
options, args = parser.parse_args()
if not options.touch:
print '-t is not specified.'
return 1
if len(options.grd) == 0 or len(args) == 0:
print 'At least one GRD file needs to be specified.'
return 1
resources = []
for f in options.grd:
resources.extend(LoadTagsFromGrd(f))
used_tags = set([])
exit_code = 0
for f in args:
if not VerifyFile(f, resources, used_tags):
exit_code = 1
warnings = False
for tag in resources:
if tag not in used_tags:
print ('%s/%s:0: warning: %s is defined but not used') % \
(os.getcwd(), sys.argv[2], tag)
warnings = True
if warnings:
print WARNING_MESSAGE
if exit_code == 0:
f = open(options.touch, 'a')
f.close()
os.utime(options.touch, None)
return exit_code
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 7,024,852,471,400,968,000 | 30.582822 | 80 | 0.632284 | false |
hydroshare/hydroshare | theme/templatetags/comments_tags.py | 1 | 3173 |
from future.builtins import int
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.template.defaultfilters import linebreaksbr, urlize
from mezzanine import template
from mezzanine.conf import settings
from theme.forms import ThreadedCommentForm
from mezzanine.generic.models import ThreadedComment
from mezzanine.utils.importing import import_dotted_path
register = template.Library()
@register.inclusion_tag("generic/includes/comments.html", takes_context=True)
def comments_for(context, obj):
"""
Provides a generic context variable name for the object that
comments are being rendered for.
"""
form = ThreadedCommentForm(context["request"], obj, auto_id=True)
try:
context["posted_comment_form"]
except KeyError:
context["posted_comment_form"] = form
context["unposted_comment_form"] = form
context["comment_url"] = reverse("comment")
context["object_for_comments"] = obj
return context.flatten()
@register.inclusion_tag("generic/includes/comment.html", takes_context=True)
def comment_thread(context, parent):
"""
Return a list of child comments for the given parent, storing all
comments in a dict in the context when first called, using parents
as keys for retrieval on subsequent recursive calls from the
comments template.
"""
if "all_comments" not in context:
comments = defaultdict(list)
if "request" in context and context["request"].user.is_staff:
comments_queryset = parent.comments.all()
else:
comments_queryset = parent.comments.visible()
for comment in comments_queryset.select_related("user"):
comments[comment.replied_to_id].append(comment)
context["all_comments"] = comments
parent_id = parent.id if isinstance(parent, ThreadedComment) else None
try:
replied_to = int(context["request"].POST["replied_to"])
except KeyError:
replied_to = 0
context.update({
"comments_for_thread": context["all_comments"].get(parent_id, []),
"no_comments": parent_id is None and not context["all_comments"],
"replied_to": replied_to,
})
return context.flatten()
@register.inclusion_tag("admin/includes/recent_comments.html", takes_context=True)
def recent_comments(context):
"""
Dashboard widget for displaying recent comments.
"""
latest = context["settings"].COMMENTS_NUM_LATEST
comments = ThreadedComment.objects.all().select_related("user")
context["comments"] = comments.order_by("-id")[:latest]
return context.flatten()
@register.filter
def comment_filter(comment_text):
"""
Passed comment text to be rendered through the function defined
by the ``COMMENT_FILTER`` setting. If no function is defined
(the default), Django's ``linebreaksbr`` filter is used.
"""
filter_func = settings.COMMENT_FILTER
if not filter_func:
def filter_func(s):
return linebreaksbr(s, autoescape=True)
elif not callable(filter_func):
filter_func = import_dotted_path(filter_func)
return filter_func(comment_text)
| bsd-3-clause | -8,272,091,699,591,252,000 | 34.255556 | 82 | 0.698078 | false |
gilbertpilz/solum | solum/tests/objects/test_pipeline.py | 2 | 2831 | # Copyright 2014 - Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from solum.objects import registry
from solum.objects.sqlalchemy import execution
from solum.objects.sqlalchemy import pipeline
from solum.tests import base
from solum.tests import utils
class TestPipeline(base.BaseTestCase):
def setUp(self):
super(TestPipeline, self).setUp()
self.db = self.useFixture(utils.Database())
self.ctx = utils.dummy_context()
self.data = [{'project_id': 'project_id_blah',
'uuid': 'ce43e347f0b0422825245b3e5f140a81cef6e65b',
'user_id': 'fred',
'name': 'pipeline1',
'description': 'test pipeline',
'trigger_id': 'trigger-uuid-1234',
'tags': 'pipeline tags',
'plan_id': 'plan_id_1'}]
utils.create_models_from_data(pipeline.Pipeline, self.data, self.ctx)
def test_objects_registered(self):
self.assertTrue(registry.Pipeline)
self.assertTrue(registry.PipelineList)
def test_get_all(self):
lst = pipeline.PipelineList()
self.assertEqual(1, len(lst.get_all(self.ctx)))
def test_check_data(self):
ta = pipeline.Pipeline().get_by_id(self.ctx, self.data[0]['id'])
for key, value in self.data[0].items():
self.assertEqual(value, getattr(ta, key))
def test_check_data_by_trigger_id(self):
ta = pipeline.Pipeline().get_by_trigger_id(self.ctx, self.data[0][
'trigger_id'])
for key, value in self.data[0].items():
self.assertEqual(value, getattr(ta, key))
def test_last_execution(self):
ta = pipeline.Pipeline().get_by_id(self.ctx, self.data[0]['id'])
# add executions
ex1 = execution.Execution()
ex1.uuid = 'first'
ex1.pipeline_id = ta.id
ex1.create(self.ctx)
ex2 = execution.Execution()
ex2.uuid = 'second'
ex2.pipeline_id = ta.id
ex2.create(self.ctx)
extest = ta.last_execution()
self.assertEqual('second', extest.uuid)
def test_last_execution_none(self):
ta = pipeline.Pipeline().get_by_id(self.ctx, self.data[0]['id'])
extest = ta.last_execution()
self.assertIsNone(extest)
| apache-2.0 | 1,428,878,072,190,870,300 | 36.25 | 77 | 0.626987 | false |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/tarfile.py | 80 | 90965 | #!/usr/bin/env python3
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import io
import shutil
import stat
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# OSError (winerror=1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (OSError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, int(n)), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise OSError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise OSError("end of file reached")
dst.write(buf)
return
def filemode(mode):
"""Deprecated in this location; use stat.filemode."""
import warnings
warnings.warn("deprecated in favor of stat.filemode",
DeprecationWarning, 2)
return stat.filemode(mode)
def _safe_print(s):
encoding = getattr(sys.stdout, 'encoding', None)
if encoding is not None:
s = s.encode(encoding, 'backslashreplace').decode(encoding)
print(s, end=' ')
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
self.exception = zlib.error
else:
self._init_write_gz()
elif comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
self.exception = OSError
else:
self.cmp = bz2.BZ2Compressor()
elif comptype == "xz":
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = lzma.LZMADecompressor()
self.exception = lzma.LZMAError
else:
self.cmp = lzma.LZMACompressor()
elif comptype != "tar":
raise CompressionError("unknown compression type %r" % comptype)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except self.exception:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\x1f\x8b\x08"):
return "gz"
elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
return "xz"
else:
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf-8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf-8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf-8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf-8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The file-object for extractfile().
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
modes = {"r": "rb", "a": "r+b", "w": "wb"}
if mode not in modes:
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if (name is None and hasattr(fileobj, "name") and
isinstance(fileobj.name, (str, bytes))):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'r:xz' open for reading with lzma compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'w:xz' open for writing with lzma compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'r|xz' open an lzma compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
'w|xz' open an lzma compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in ("a", "w"):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if mode not in ("r", "a", "w"):
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
except OSError:
if fileobj is not None and mode == 'r':
raise ReadError("not a gzip file")
raise
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except OSError:
fileobj.close()
if mode == 'r':
raise ReadError("not a gzip file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
fileobj = bz2.BZ2File(fileobj or name, mode,
compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (OSError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not a bzip2 file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
"""Open lzma compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (lzma.LZMAError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not an lzma file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open", # bzip2 compressed tar
"xz": "xzopen" # lzma compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
_safe_print(stat.filemode(tarinfo.mode))
_safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid))
if tarinfo.ischr() or tarinfo.isblk():
_safe_print("%10s" %
("%d,%d" % (tarinfo.devmajor, tarinfo.devminor)))
else:
_safe_print("%10d" % tarinfo.size)
_safe_print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6])
_safe_print(tarinfo.name + ("/" if tarinfo.isdir() else ""))
if verbose:
if tarinfo.issym():
_safe_print("-> " + tarinfo.linkname)
if tarinfo.islnk():
_safe_print("link to " + tarinfo.linkname)
print()
def add(self, name, arcname=None, recursive=True, exclude=None, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except OSError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file or a
link, an io.BufferedReader object is returned. Otherwise, None is
returned.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Members with unknown types are treated as regular files.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except FileExistsError:
pass
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
with bltn_open(targetpath, "wb") as target:
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
os.chown(targetpath, u, g)
except OSError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except OSError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except OSError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise OSError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise OSError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if self.index == 0 and self.tarfile.firstmember is not None:
tarinfo = self.tarfile.next()
elif self.index < len(self.tarfile.members):
tarinfo = self.tarfile.members[self.index]
elif not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
raise StopIteration
self.index += 1
return tarinfo
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
def main():
import argparse
description = 'A simple command line interface for tarfile module.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Verbose output')
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', metavar='<tarfile>',
help='Show listing of a tarfile')
group.add_argument('-e', '--extract', nargs='+',
metavar=('<tarfile>', '<output_dir>'),
help='Extract tarfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create tarfile from sources')
group.add_argument('-t', '--test', metavar='<tarfile>',
help='Test if a tarfile is valid')
args = parser.parse_args()
if args.test:
src = args.test
if is_tarfile(src):
with open(src, 'r') as tar:
tar.getmembers()
print(tar.getmembers(), file=sys.stderr)
if args.verbose:
print('{!r} is a tar archive.'.format(src))
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.list:
src = args.list
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.list(verbose=args.verbose)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.extract:
if len(args.extract) == 1:
src = args.extract[0]
curdir = os.curdir
elif len(args.extract) == 2:
src, curdir = args.extract
else:
parser.exit(1, parser.format_help())
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.extractall(path=curdir)
if args.verbose:
if curdir == '.':
msg = '{!r} file is extracted.'.format(src)
else:
msg = ('{!r} file is extracted '
'into {!r} directory.').format(src, curdir)
print(msg)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.create:
tar_name = args.create.pop(0)
_, ext = os.path.splitext(tar_name)
compressions = {
# gz
'gz': 'gz',
'tgz': 'gz',
# xz
'xz': 'xz',
'txz': 'xz',
# bz2
'bz2': 'bz2',
'tbz': 'bz2',
'tbz2': 'bz2',
'tb2': 'bz2',
}
tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w'
tar_files = args.create
with TarFile.open(tar_name, tar_mode) as tf:
for file_name in tar_files:
tf.add(file_name)
if args.verbose:
print('{!r} file created.'.format(tar_name))
else:
parser.exit(1, parser.format_help())
if __name__ == '__main__':
main()
| gpl-2.0 | -5,541,616,673,173,590,000 | 35.111552 | 103 | 0.540285 | false |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/whoosh/query/terms.py | 39 | 17708 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
import fnmatch
import re
from collections import defaultdict
from whoosh import matching
from whoosh.analysis import Token
from whoosh.compat import bytes_type, text_type, u
from whoosh.lang.morph_en import variations
from whoosh.query import qcore
class Term(qcore.Query):
"""Matches documents containing the given term (fieldname+text pair).
>>> Term("content", u"render")
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float)
def __init__(self, fieldname, text, boost=1.0, minquality=None):
self.fieldname = fieldname
self.text = text
self.boost = boost
self.minquality = minquality
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text
and self.boost == other.boost)
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1.0:
r += ", boost=%s" % self.boost
r += ")"
return r
def __unicode__(self):
text = self.text
if isinstance(text, bytes_type):
try:
text = text.decode("ascii")
except UnicodeDecodeError:
text = repr(text)
t = u("%s:%s") % (self.fieldname, text)
if self.boost != 1:
t += u("^") + text_type(self.boost)
return t
__str__ = __unicode__
def __hash__(self):
return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
def has_terms(self):
return True
def tokens(self, boost=1.0):
yield Token(fieldname=self.fieldname, text=self.text,
boost=boost * self.boost, startchar=self.startchar,
endchar=self.endchar, chars=True)
def terms(self, phrases=False):
if self.field():
yield (self.field(), self.text)
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname and q.text == oldtext:
q.text = newtext
return q
def estimate_size(self, ixreader):
fieldname = self.fieldname
if fieldname not in ixreader.schema:
return 0
field = ixreader.schema[fieldname]
try:
text = field.to_bytes(self.text)
except ValueError:
return 0
return ixreader.doc_frequency(fieldname, text)
def matcher(self, searcher, context=None):
fieldname = self.fieldname
text = self.text
if fieldname not in searcher.schema:
return matching.NullMatcher()
field = searcher.schema[fieldname]
try:
text = field.to_bytes(text)
except ValueError:
return matching.NullMatcher()
if (self.fieldname, text) in searcher.reader():
if context is None:
w = searcher.weighting
else:
w = context.weighting
m = searcher.postings(self.fieldname, text, weighting=w)
if self.minquality:
m.set_min_quality(self.minquality)
if self.boost != 1.0:
m = matching.WrappingMatcher(m, boost=self.boost)
return m
else:
return matching.NullMatcher()
class MultiTerm(qcore.Query):
"""Abstract base class for queries that operate on multiple terms in the
same field.
"""
constantscore = False
def _btexts(self, ixreader):
raise NotImplementedError(self.__class__.__name__)
def expanded_terms(self, ixreader, phrases=False):
fieldname = self.field()
if fieldname:
for btext in self._btexts(ixreader):
yield (fieldname, btext)
def tokens(self, boost=1.0, exreader=None):
fieldname = self.field()
if exreader is None:
btexts = [self.text]
else:
btexts = self._btexts(exreader)
for btext in btexts:
yield Token(fieldname=fieldname, text=btext,
boost=boost * self.boost, startchar=self.startchar,
endchar=self.endchar, chars=True)
def simplify(self, ixreader):
fieldname = self.field()
if fieldname not in ixreader.schema:
return qcore.NullQuery()
field = ixreader.schema[fieldname]
existing = []
for btext in sorted(set(self._btexts(ixreader))):
text = field.from_bytes(btext)
existing.append(Term(fieldname, text, boost=self.boost))
if len(existing) == 1:
return existing[0]
elif existing:
from whoosh.query import Or
return Or(existing)
else:
return qcore.NullQuery
def estimate_size(self, ixreader):
fieldname = self.field()
return sum(ixreader.doc_frequency(fieldname, btext)
for btext in self._btexts(ixreader))
def estimate_min_size(self, ixreader):
fieldname = self.field()
return min(ixreader.doc_frequency(fieldname, text)
for text in self._btexts(ixreader))
def matcher(self, searcher, context=None):
from whoosh.query import Or
fieldname = self.field()
constantscore = self.constantscore
reader = searcher.reader()
qs = [Term(fieldname, word) for word in self._btexts(reader)]
if not qs:
return matching.NullMatcher()
if len(qs) == 1:
# If there's only one term, just use it
m = qs[0].matcher(searcher, context)
else:
if constantscore:
# To tell the sub-query that score doesn't matter, set weighting
# to None
if context:
context = context.set(weighting=None)
else:
from whoosh.searching import SearchContext
context = SearchContext(weighting=None)
# Or the terms together
m = Or(qs, boost=self.boost).matcher(searcher, context)
return m
class PatternQuery(MultiTerm):
"""An intermediate base class for common methods of Prefix and Wildcard.
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float)
def __init__(self, fieldname, text, boost=1.0, constantscore=True):
self.fieldname = fieldname
self.text = text
self.boost = boost
self.constantscore = constantscore
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text and self.boost == other.boost
and self.constantscore == other.constantscore)
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1:
r += ", boost=%s" % self.boost
r += ")"
return r
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
^ hash(self.constantscore))
def _get_pattern(self):
raise NotImplementedError
def _find_prefix(self, text):
# Subclasses/instances should set the SPECIAL_CHARS attribute to a set
# of characters that mark the end of the literal prefix
specialchars = self.SPECIAL_CHARS
i = 0
for i, char in enumerate(text):
if char in specialchars:
break
return text[:i]
def _btexts(self, ixreader):
field = ixreader.schema[self.fieldname]
exp = re.compile(self._get_pattern())
prefix = self._find_prefix(self.text)
if prefix:
candidates = ixreader.expand_prefix(self.fieldname, prefix)
else:
candidates = ixreader.lexicon(self.fieldname)
from_bytes = field.from_bytes
for btext in candidates:
text = from_bytes(btext)
if exp.match(text):
yield btext
class Prefix(PatternQuery):
"""Matches documents that contain any terms that start with the given text.
>>> # Match documents containing words starting with 'comp'
>>> Prefix("content", u"comp")
"""
def __unicode__(self):
return "%s:%s*" % (self.fieldname, self.text)
__str__ = __unicode__
def _btexts(self, ixreader):
return ixreader.expand_prefix(self.fieldname, self.text)
def matcher(self, searcher, context=None):
if self.text == "":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
class Wildcard(PatternQuery):
"""Matches documents that contain any terms that match a "glob" pattern.
See the Python ``fnmatch`` module for information about globs.
>>> Wildcard("content", u"in*f?x")
"""
SPECIAL_CHARS = frozenset("*?[")
def __unicode__(self):
return "%s:%s" % (self.fieldname, self.text)
__str__ = __unicode__
def _get_pattern(self):
return fnmatch.translate(self.text)
def normalize(self):
# If there are no wildcard characters in this "wildcard", turn it into
# a simple Term
text = self.text
if text == "*":
from whoosh.query import Every
return Every(self.fieldname, boost=self.boost)
if "*" not in text and "?" not in text:
# If no wildcard chars, convert to a normal term.
return Term(self.fieldname, self.text, boost=self.boost)
elif ("?" not in text and text.endswith("*")
and text.find("*") == len(text) - 1):
# If the only wildcard char is an asterisk at the end, convert to a
# Prefix query.
return Prefix(self.fieldname, self.text[:-1], boost=self.boost)
else:
return self
def matcher(self, searcher, context=None):
if self.text == "*":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
# _btexts() implemented in PatternQuery
class Regex(PatternQuery):
"""Matches documents that contain any terms that match a regular
expression. See the Python ``re`` module for information about regular
expressions.
"""
SPECIAL_CHARS = frozenset("{}()[].?*+^$\\")
def __unicode__(self):
return '%s:r"%s"' % (self.fieldname, self.text)
__str__ = __unicode__
def _get_pattern(self):
return self.text
def _find_prefix(self, text):
if "|" in text:
return ""
if text.startswith("^"):
text = text[1:]
elif text.startswith("\\A"):
text = text[2:]
prefix = PatternQuery._find_prefix(self, text)
lp = len(prefix)
if lp < len(text) and text[lp] in "*?":
# we stripped something starting from * or ? - they both MAY mean
# "0 times". As we had stripped starting from FIRST special char,
# that implies there were only ordinary chars left of it. Thus,
# the very last of them is not part of the real prefix:
prefix = prefix[:-1]
return prefix
def matcher(self, searcher, context=None):
if self.text == ".*":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
# _btexts() implemented in PatternQuery
class ExpandingTerm(MultiTerm):
"""Intermediate base class for queries such as FuzzyTerm and Variations
that expand into multiple queries, but come from a single term.
"""
def has_terms(self):
return True
def terms(self, phrases=False):
if self.field():
yield (self.field(), self.text)
class FuzzyTerm(ExpandingTerm):
"""Matches documents containing words similar to the given term.
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float,
maxdist=float, prefixlength=int)
def __init__(self, fieldname, text, boost=1.0, maxdist=1,
prefixlength=1, constantscore=True):
"""
:param fieldname: The name of the field to search.
:param text: The text to search for.
:param boost: A boost factor to apply to scores of documents matching
this query.
:param maxdist: The maximum edit distance from the given text.
:param prefixlength: The matched terms must share this many initial
characters with 'text'. For example, if text is "light" and
prefixlength is 2, then only terms starting with "li" are checked
for similarity.
"""
self.fieldname = fieldname
self.text = text
self.boost = boost
self.maxdist = maxdist
self.prefixlength = prefixlength
self.constantscore = constantscore
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text
and self.maxdist == other.maxdist
and self.prefixlength == other.prefixlength
and self.boost == other.boost
and self.constantscore == other.constantscore)
def __repr__(self):
r = "%s(%r, %r, boost=%f, maxdist=%d, prefixlength=%d)"
return r % (self.__class__.__name__, self.fieldname, self.text,
self.boost, self.maxdist, self.prefixlength)
def __unicode__(self):
r = u("%s:%s") % (self.fieldname, self.text) + u("~")
if self.maxdist > 1:
r += u("%d") % self.maxdist
if self.boost != 1.0:
r += u("^%f") % self.boost
return r
__str__ = __unicode__
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
^ hash(self.maxdist) ^ hash(self.prefixlength)
^ hash(self.constantscore))
def _btexts(self, ixreader):
return ixreader.terms_within(self.fieldname, self.text, self.maxdist,
prefix=self.prefixlength)
class Variations(ExpandingTerm):
"""Query that automatically searches for morphological variations of the
given word in the same field.
"""
def __init__(self, fieldname, text, boost=1.0):
self.fieldname = fieldname
self.text = text
self.boost = boost
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1:
r += ", boost=%s" % self.boost
r += ")"
return r
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text and self.boost == other.boost)
def __hash__(self):
return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
def _btexts(self, ixreader):
fieldname = self.fieldname
to_bytes = ixreader.schema[fieldname].to_bytes
for word in variations(self.text):
try:
btext = to_bytes(word)
except ValueError:
continue
if (fieldname, btext) in ixreader:
yield btext
def __unicode__(self):
return u("%s:<%s>") % (self.fieldname, self.text)
__str__ = __unicode__
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname and q.text == oldtext:
q.text = newtext
return q
| bsd-3-clause | -7,170,904,261,926,309,000 | 32.537879 | 80 | 0.589169 | false |
duncanwp/iris | lib/iris/experimental/__init__.py | 17 | 1074 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Experimental code can be introduced to Iris through this package.
Changes to experimental code may be more extensive than in the rest of the
codebase. The code is expected to graduate, eventually, to "full status".
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
| lgpl-3.0 | -4,453,205,441,519,182,000 | 40.307692 | 74 | 0.75419 | false |
veger/ansible | lib/ansible/modules/cloud/azure/azure_rm_containerinstance_facts.py | 33 | 9452 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerinstance_facts
version_added: "2.8"
short_description: Get Azure Container Instance facts.
description:
- Get facts of Container Instance.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the container instance.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get specific Container Instance facts
azure_rm_containerinstance_facts:
resource_group: resource_group_name
name: container_group_name
- name: List Container Instances in a specified resource group name
azure_rm_containerinstance_facts:
resource_group: resource_group_name
'''
RETURN = '''
container_groups:
description: A list of Container Instance dictionaries.
returned: always
type: complex
contains:
id:
description:
- The resource id.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/demo/providers/Microsoft.ContainerInstance/containerGroups/my
containers"
resource_group:
description:
- Resource group where the container exists.
returned: always
type: str
sample: testrg
name:
description:
- The resource name.
returned: always
type: str
sample: mycontainers
location:
description:
- The resource location.
returned: always
type: str
sample: westus
os_type:
description:
- The OS type of containers.
returned: always
type: str
sample: linux
ip_address:
description:
- IP address of the container instance.
returned: always
type: str
sample: 173.15.18.1
ports:
description:
- List of ports exposed by the container instance.
returned: always
type: list
sample: [ 80, 81 ]
containers:
description:
- The containers within the container group.
returned: always
type: complex
sample: containers
contains:
name:
description:
- The name of the container instance.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/demo/providers/Microsoft.ContainerInstance/containerGroups/my
containers"
image:
description:
- The container image name.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/demo/providers/Microsoft.ContainerInstance/containerGroups/my
containers"
memory:
description:
- The required memory of the containers in GB.
returned: always
type: float
sample: 1.5
cpu:
description:
- The required number of CPU cores of the containers.
returned: always
type: int
sample: 1
ports:
description:
- List of ports exposed within the container group.
returned: always
type: list
sample: [ 80, 81 ]
tags:
description: Tags assigned to the resource. Dictionary of string:string pairs.
type: dict
sample: { "tag1": "abc" }
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMContainerInstanceFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict(
changed=False,
ansible_facts=dict()
)
self.resource_group = None
self.name = None
super(AzureRMContainerInstanceFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.name is not None):
self.results['containerinstances'] = self.get()
elif (self.resource_group is not None):
self.results['containerinstances'] = self.list_by_resource_group()
else:
self.results['containerinstances'] = self.list_all()
return self.results
def get(self):
response = None
results = []
try:
response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group,
container_group_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Container Instances.')
if response is not None and self.has_tags(response.tags, self.tags):
results.append(self.format_item(response))
return results
def list_by_resource_group(self):
response = None
results = []
try:
response = self.containerinstance_client.container_groups.list_by_resource_group(resource_group_name=self.resource_group)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not list facts for Container Instances.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def list_all(self):
response = None
results = []
try:
response = self.containerinstance_client.container_groups.list()
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not list facts for Container Instances.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
containers = d['containers']
ports = d['ip_address']['ports']
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
for port_index in range(len(ports)):
ports[port_index] = ports[port_index]['port']
for container_index in range(len(containers)):
old_container = containers[container_index]
new_container = {
'name': old_container['name'],
'image': old_container['image'],
'memory': old_container['resources']['requests']['memory_in_gb'],
'cpu': old_container['resources']['requests']['cpu'],
'ports': []
}
for port_index in range(len(old_container['ports'])):
new_container['ports'].append(old_container['ports'][port_index]['port'])
containers[container_index] = new_container
d = {
'id': d['id'],
'resource_group': resource_group,
'name': d['name'],
'os_type': d['os_type'],
'ip_address': 'public' if d['ip_address']['type'] == 'Public' else 'none',
'ports': ports,
'location': d['location'],
'containers': containers,
'tags': d.get('tags', None)
}
return d
def main():
AzureRMContainerInstanceFacts()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,795,814,746,785,511,000 | 32.164912 | 157 | 0.544964 | false |
chirilo/remo | vendor-local/lib/python/rest_framework/utils/serializer_helpers.py | 18 | 3752 | from __future__ import unicode_literals
import collections
from rest_framework.compat import OrderedDict, unicode_to_repr
class ReturnDict(OrderedDict):
"""
Return object from `serialier.data` for the `Serializer` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnDict, self).__init__(*args, **kwargs)
def copy(self):
return ReturnDict(self, serializer=self.serializer)
def __repr__(self):
return dict.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (dict, (dict(self),))
class ReturnList(list):
"""
Return object from `serialier.data` for the `SerializerList` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnList, self).__init__(*args, **kwargs)
def __repr__(self):
return list.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (list, (list(self),))
class BoundField(object):
"""
A field object that also includes `.value` and `.error` properties.
Returned when iterating over a serializer instance,
providing an API similar to Django forms and form fields.
"""
def __init__(self, field, value, errors, prefix=''):
self._field = field
self.value = value
self.errors = errors
self.name = prefix + self.field_name
def __getattr__(self, attr_name):
return getattr(self._field, attr_name)
@property
def _proxy_class(self):
return self._field.__class__
def __repr__(self):
return unicode_to_repr('<%s value=%s errors=%s>' % (
self.__class__.__name__, self.value, self.errors
))
class NestedBoundField(BoundField):
"""
This `BoundField` additionally implements __iter__ and __getitem__
in order to support nested bound fields. This class is the type of
`BoundField` that is used for serializer fields.
"""
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.value.get(key) if self.value else None
error = self.errors.get(key) if self.errors else None
if hasattr(field, 'fields'):
return NestedBoundField(field, value, error, prefix=self.name + '.')
return BoundField(field, value, error, prefix=self.name + '.')
class BindingDict(collections.MutableMapping):
"""
This dict-like object is used to store fields on a serializer.
This ensures that whenever fields are added to the serializer we call
`field.bind()` so that the `field_name` and `parent` attributes
can be set correctly.
"""
def __init__(self, serializer):
self.serializer = serializer
self.fields = OrderedDict()
def __setitem__(self, key, field):
self.fields[key] = field
field.bind(field_name=key, parent=self.serializer)
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __repr__(self):
return dict.__repr__(self.fields)
| bsd-3-clause | 2,045,609,699,276,944,400 | 30.266667 | 80 | 0.624733 | false |
krdlab/ansible-modules-core | cloud/amazon/ec2_key.py | 51 | 7841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== [email protected]'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
if not module.check_mode:
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
key = None
changed = True
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | -1,606,291,014,715,016,700 | 31.135246 | 108 | 0.546869 | false |
yx91490/pgcli | pgcli/packages/counter.py | 20 | 6273 | #copied from http://code.activestate.com/recipes/576611-counter-class/
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
| bsd-3-clause | 746,782,599,030,463,100 | 32.545455 | 85 | 0.535629 | false |
popazerty/enigma2-4.3 | lib/python/Components/UsageConfig.py | 2 | 55113 | import os
from time import time
from enigma import eDVBDB, eEPGCache, setTunerTypePriorityOrder, setPreferredTuner, setSpinnerOnOff, setEnableTtCachingOnOff, eEnv, Misc_Options, eBackgroundFileEraser, eServiceEvent
from Components.About import about
from Components.Harddisk import harddiskmanager
from config import ConfigSubsection, ConfigYesNo, config, ConfigSelection, ConfigText, ConfigNumber, ConfigSet, ConfigLocations, NoSave, ConfigClock, ConfigInteger, ConfigBoolean, ConfigPassword, ConfigIP, ConfigSlider, ConfigSelectionNumber
from Tools.Directories import resolveFilename, SCOPE_HDD, SCOPE_TIMESHIFT, SCOPE_AUTORECORD, SCOPE_SYSETC, defaultRecordingLocation, fileExists
from boxbranding import getBoxType, getMachineBuild, getMachineName, getBrandOEM
from Components.NimManager import nimmanager
from Components.ServiceList import refreshServiceList
from SystemInfo import SystemInfo
from Tools.HardwareInfo import HardwareInfo
def InitUsageConfig():
config.misc.useNTPminutes = ConfigSelection(default = "30", choices = [("30", "30" + " " +_("minutes")), ("60", _("Hour")), ("1440", _("Once per day"))])
if getBrandOEM() == 'vuplus':
config.misc.remotecontrol_text_support = ConfigYesNo(default = True)
else:
config.misc.remotecontrol_text_support = ConfigYesNo(default = False)
config.workaround = ConfigSubsection()
config.workaround.deeprecord = ConfigYesNo(default = False)
config.usage = ConfigSubsection()
config.usage.shutdownOK = ConfigBoolean(default = True)
config.usage.shutdownNOK_action = ConfigSelection(default = "normal", choices = [("normal", _("just boot")), ("standby", _("goto standby")), ("deepstandby", _("goto deep-standby"))])
config.usage.boot_action = ConfigSelection(default = "normal", choices = [("normal", _("just boot")), ("standby", _("goto standby"))])
config.usage.showdish = ConfigSelection(default = "flashing", choices = [("flashing", _("Flashing")), ("normal", _("Not Flashing")), ("off", _("Off"))])
config.usage.multibouquet = ConfigYesNo(default = True)
config.usage.alternative_number_mode = ConfigYesNo(default = False)
def alternativeNumberModeChange(configElement):
eDVBDB.getInstance().setNumberingMode(configElement.value)
refreshServiceList()
config.usage.alternative_number_mode.addNotifier(alternativeNumberModeChange)
config.usage.crypto_icon_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename"))])
config.usage.crypto_icon_mode.addNotifier(refreshServiceList)
config.usage.panicbutton = ConfigYesNo(default = False)
config.usage.servicetype_icon_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename"))])
config.usage.servicetype_icon_mode.addNotifier(refreshServiceList)
choicelist = [("-1", _("Divide")), ("0", _("Disable"))]
for i in range(100,1300,100):
choicelist.append(("%d" % i, ngettext("%d pixel wide", "%d pixels wide", i) % i))
config.usage.servicelist_column = ConfigSelection(default="0", choices=choicelist)
config.usage.servicelist_column.addNotifier(refreshServiceList)
config.usage.service_icon_enable = ConfigYesNo(default = False)
config.usage.service_icon_enable.addNotifier(refreshServiceList)
config.usage.servicelist_cursor_behavior = ConfigSelection(default = "keep", choices = [
("standard", _("Standard")),
("keep", _("Keep service")),
("reverseB", _("Reverse bouquet buttons")),
("keep reverseB", _("Keep service") + " + " + _("Reverse bouquet buttons"))])
config.usage.servicelist_keep_service = ConfigYesNo(default = True)
config.usage.multiepg_ask_bouquet = ConfigYesNo(default = False)
config.usage.showpicon = ConfigYesNo(default = True)
config.usage.show_dvdplayer = ConfigYesNo(default = False)
config.usage.quickzap_bouquet_change = ConfigYesNo(default = False)
config.usage.e1like_radio_mode = ConfigYesNo(default = True)
choicelist = []
for i in range(1, 11):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
config.usage.infobar_timeout = ConfigSelection(default = "5", choices = [("0", _("No timeout"))] + choicelist)
config.usage.show_infobar_on_zap = ConfigYesNo(default = True)
config.usage.show_infobar_on_skip = ConfigYesNo(default = True)
config.usage.show_infobar_on_event_change = ConfigYesNo(default = False)
config.usage.show_infobar_channel_number = ConfigYesNo(default = False)
config.usage.show_infobar_lite = ConfigYesNo(default = False)
config.usage.show_infobar_channel_number = ConfigYesNo(default = False)
config.usage.show_second_infobar = ConfigSelection(default = "2", choices = [("0", _("Off")), ("1", _("Event Info")), ("2", _("2nd Infobar INFO"))])
config.usage.second_infobar_timeout = ConfigSelection(default = "0", choices = [("0", _("No timeout"))] + choicelist)
def showsecondinfobarChanged(configElement):
if config.usage.show_second_infobar.value != "INFOBAREPG":
SystemInfo["InfoBarEpg"] = True
else:
SystemInfo["InfoBarEpg"] = False
config.usage.show_second_infobar.addNotifier(showsecondinfobarChanged, immediate_feedback = True)
config.usage.infobar_frontend_source = ConfigSelection(default = "tuner", choices = [("settings", _("LameDB")), ("tuner", _("Tuner"))])
config.usage.show_picon_bkgrn = ConfigSelection(default = "transparent", choices = [("none", _("Disabled")), ("transparent", _("Transparent")), ("blue", _("Blue")), ("red", _("Red")), ("black", _("Black")), ("white", _("White")), ("lightgrey", _("Light Grey")), ("grey", _("Grey"))])
config.usage.show_spinner = ConfigYesNo(default = True)
config.usage.enable_tt_caching = ConfigYesNo(default = True)
config.usage.sort_settings = ConfigYesNo(default = False)
config.usage.sort_menus = ConfigYesNo(default = False)
config.usage.sort_pluginlist = ConfigYesNo(default = True)
config.usage.sort_extensionslist = ConfigYesNo(default = False)
config.usage.movieplayer_pvrstate = ConfigYesNo(default = False)
choicelist = []
for i in (10, 30):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
for i in (60, 120, 300, 600, 1200, 1800):
m = i / 60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
for i in (3600, 7200, 14400):
h = i / 3600
choicelist.append(("%d" % i, ngettext("%d hour", "%d hours", h) % h))
config.usage.hdd_standby = ConfigSelection(default = "60", choices = [("0", _("No standby"))] + choicelist)
config.usage.output_12V = ConfigSelection(default = "do not change", choices = [
("do not change", _("Do not change")), ("off", _("Off")), ("on", _("On")) ])
config.usage.pip_zero_button = ConfigSelection(default = "swapstop", choices = [
("standard", _("Standard")), ("swap", _("Swap PiP and main picture")),
("swapstop", _("Move PiP to main picture")), ("stop", _("Stop PiP")) ])
config.usage.pip_hideOnExit = ConfigSelection(default = "without popup", choices = [
("no", _("No")), ("popup", _("With popup")), ("without popup", _("Without popup")) ])
if not os.path.exists(resolveFilename(SCOPE_HDD)):
try:
os.mkdir(resolveFilename(SCOPE_HDD),0755)
except:
pass
config.usage.default_path = ConfigText(default = resolveFilename(SCOPE_HDD))
if not config.usage.default_path.value.endswith('/'):
tmpvalue = config.usage.default_path.value
config.usage.default_path.setValue(tmpvalue + '/')
config.usage.default_path.save()
def defaultpathChanged(configElement):
tmpvalue = config.usage.default_path.value
try:
if not os.path.exists(tmpvalue):
os.system("mkdir -p %s" %tmpvalue)
except:
print "Failed to create recording path: %s" %tmpvalue
if not config.usage.default_path.value.endswith('/'):
config.usage.default_path.setValue(tmpvalue + '/')
config.usage.default_path.save()
config.usage.default_path.addNotifier(defaultpathChanged, immediate_feedback = False)
config.usage.timer_path = ConfigText(default = "<default>")
config.usage.autorecord_path = ConfigText(default = "<default>")
config.usage.instantrec_path = ConfigText(default = "<default>")
if not os.path.exists(resolveFilename(SCOPE_TIMESHIFT)):
try:
os.mkdir(resolveFilename(SCOPE_TIMESHIFT),0755)
except:
pass
config.usage.timeshift_path = ConfigText(default = resolveFilename(SCOPE_TIMESHIFT))
if not config.usage.default_path.value.endswith('/'):
tmpvalue = config.usage.timeshift_path.value
config.usage.timeshift_path.setValue(tmpvalue + '/')
config.usage.timeshift_path.save()
def timeshiftpathChanged(configElement):
if not config.usage.timeshift_path.value.endswith('/'):
tmpvalue = config.usage.timeshift_path.value
config.usage.timeshift_path.setValue(tmpvalue + '/')
config.usage.timeshift_path.save()
config.usage.timeshift_path.addNotifier(timeshiftpathChanged, immediate_feedback = False)
config.usage.allowed_timeshift_paths = ConfigLocations(default = [resolveFilename(SCOPE_TIMESHIFT)])
if not os.path.exists(resolveFilename(SCOPE_AUTORECORD)):
try:
os.mkdir(resolveFilename(SCOPE_AUTORECORD),0755)
except:
pass
config.usage.autorecord_path = ConfigText(default = resolveFilename(SCOPE_AUTORECORD))
if not config.usage.default_path.value.endswith('/'):
tmpvalue = config.usage.autorecord_path.value
config.usage.autorecord_path.setValue(tmpvalue + '/')
config.usage.autorecord_path.save()
def autorecordpathChanged(configElement):
if not config.usage.autorecord_path.value.endswith('/'):
tmpvalue = config.usage.autorecord_path.value
config.usage.autorecord_path.setValue(tmpvalue + '/')
config.usage.autorecord_path.save()
config.usage.autorecord_path.addNotifier(autorecordpathChanged, immediate_feedback = False)
config.usage.allowed_autorecord_paths = ConfigLocations(default = [resolveFilename(SCOPE_AUTORECORD)])
config.usage.movielist_trashcan = ConfigYesNo(default=True)
config.usage.movielist_trashcan_days = ConfigSelectionNumber(min = 1, max = 31, stepwidth = 1, default = 7, wraparound = True)
config.usage.movielist_trashcan_network_clean = ConfigYesNo(default=False)
config.usage.movielist_trashcan_days = ConfigSelectionNumber(min = 1, max = 31, stepwidth = 1, default = 8, wraparound = True)
config.usage.movielist_trashcan_reserve = ConfigNumber(default = 40)
config.usage.on_movie_start = ConfigSelection(default = "ask", choices = [
("ask", _("Ask user")), ("resume", _("Resume from last position")), ("beginning", _("Start from the beginning")) ])
config.usage.on_movie_stop = ConfigSelection(default = "movielist", choices = [
("ask", _("Ask user")), ("movielist", _("Return to movie list")), ("quit", _("Return to previous service")) ])
config.usage.on_movie_eof = ConfigSelection(default = "movielist", choices = [
("ask", _("Ask user")), ("movielist", _("Return to movie list")), ("quit", _("Return to previous service")), ("pause", _("Pause movie at end")), ("playlist", _("Play next (return to movie list)")),
("playlistquit", _("Play next (return to previous service)")), ("loop", _("Continues play (loop)")), ("repeatcurrent", _("Repeat"))])
config.usage.next_movie_msg = ConfigYesNo(default = True)
config.usage.leave_movieplayer_onExit = ConfigSelection(default = "no", choices = [
("no", _("No")), ("popup", _("With popup")), ("without popup", _("Without popup")) ])
config.usage.setup_level = ConfigSelection(default = "expert", choices = [
("simple", _("Simple")),
("intermediate", _("Intermediate")),
("expert", _("Expert")) ])
config.usage.on_long_powerpress = ConfigSelection(default = "show_menu", choices = [
("show_menu", _("Show shutdown menu")),
("shutdown", _("Immediate shutdown")),
("standby", _("Standby")) ] )
config.usage.on_short_powerpress = ConfigSelection(default = "standby", choices = [
("show_menu", _("Show shutdown menu")),
("shutdown", _("Immediate shutdown")),
("standby", _("Standby")) ] )
choicelist = [("0", "Disabled")]
for i in (5, 30, 60, 300, 600, 900, 1200, 1800, 2700, 3600):
if i < 60:
m = ngettext("%d second", "%d seconds", i) % i
else:
m = abs(i / 60)
m = ngettext("%d minute", "%d minutes", m) % m
choicelist.append(("%d" % i, m))
config.usage.screen_saver = ConfigSelection(default = "0", choices = choicelist)
config.usage.check_timeshift = ConfigYesNo(default = True)
config.usage.alternatives_priority = ConfigSelection(default = "0", choices = [
("0", "DVB-S/-C/-T"),
("1", "DVB-S/-T/-C"),
("2", "DVB-C/-S/-T"),
("3", "DVB-C/-T/-S"),
("4", "DVB-T/-C/-S"),
("5", "DVB-T/-S/-C") ])
nims = [("-1", _("auto"))]
rec_nims = [("-2", _("Disabled")), ("-1", _("auto"))]
for x in nimmanager.nim_slots:
nims.append((str(x.slot), x.getSlotName()))
rec_nims.append((str(x.slot), x.getSlotName()))
config.usage.frontend_priority = ConfigSelection(default = "-1", choices = nims)
config.usage.recording_frontend_priority = ConfigSelection(default = "-2", choices = rec_nims)
config.misc.disable_background_scan = ConfigYesNo(default = False)
config.usage.jobtaksextensions = ConfigYesNo(default = True)
config.usage.servicenum_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.usage.servicename_fontsize = ConfigSelectionNumber(default = 2, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.usage.serviceinfo_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.usage.serviceitems_per_page = ConfigSelectionNumber(default = 20, stepwidth = 1, min = 3, max = 40, wraparound = True)
config.usage.show_servicelist = ConfigYesNo(default = True)
config.usage.servicelist_mode = ConfigSelection(default = "standard", choices = [
("standard", _("Standard")),
("simple", _("Simple")) ] )
config.usage.servicelistpreview_mode = ConfigYesNo(default = False)
config.usage.tvradiobutton_mode = ConfigSelection(default="BouquetList", choices = [
("ChannelList", _("Channel List")),
("BouquetList", _("Bouquet List")),
("MovieList", _("Movie List"))])
config.usage.channelbutton_mode = ConfigSelection(default="0", choices = [
("0", _("Just change channels")),
("1", _("Channel List")),
("2", _("Bouquet List"))])
config.usage.updownbutton_mode = ConfigSelection(default="1", choices = [
("0", _("Just change channels")),
("1", _("Channel List")),
("2", _("Just change channels revert"))])
config.usage.leftrightbutton_mode = ConfigSelection(default="0", choices = [
("0", _("Just change channels")),
("1", _("Channel List"))])
config.usage.okbutton_mode = ConfigSelection(default="0", choices = [
("0", _("InfoBar")),
("1", _("Channel List"))])
config.usage.show_bouquetalways = ConfigYesNo(default = False)
config.usage.show_event_progress_in_servicelist = ConfigSelection(default = 'barright', choices = [
('barleft', _("Progress bar left")),
('barright', _("Progress bar right")),
('percleft', _("Percentage left")),
('percright', _("Percentage right")),
('no', _("No")) ])
config.usage.show_channel_numbers_in_servicelist = ConfigYesNo(default = True)
config.usage.show_channel_jump_in_servicelist = ConfigSelection(default="alpha", choices = [
("quick", _("Quick Actions")),
("alpha", _("Alpha")),
("number", _("Number"))])
config.usage.show_event_progress_in_servicelist.addNotifier(refreshServiceList)
config.usage.show_channel_numbers_in_servicelist.addNotifier(refreshServiceList)
config.usage.blinking_display_clock_during_recording = ConfigYesNo(default = False)
config.usage.blinking_rec_symbol_during_recording = ConfigYesNo(default = False)
config.usage.show_message_when_recording_starts = ConfigYesNo(default = True)
config.usage.load_length_of_movies_in_moviellist = ConfigYesNo(default = True)
config.usage.show_icons_in_movielist = ConfigSelection(default = 'i', choices = [
('o', _("Off")),
('p', _("Progress")),
('s', _("Small progress")),
('i', _("Icons")),
])
config.usage.movielist_unseen = ConfigYesNo(default = True)
config.usage.swap_snr_on_osd = ConfigYesNo(default = False)
config.usage.swap_time_display_on_osd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_media_time_display_on_osd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_time_remaining_on_osd = ConfigSelection(default = "0", choices = [("0", _("Remaining")), ("1", _("Elapsed")), ("2", _("Elapsed & Remaining")), ("3", _("Remaining & Elapsed"))])
config.usage.elapsed_time_positive_osd = ConfigYesNo(default = False)
config.usage.swap_time_display_on_vfd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_media_time_display_on_vfd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_time_remaining_on_vfd = ConfigSelection(default = "0", choices = [("0", _("Remaining")), ("1", _("Elapsed")), ("2", _("Elapsed & Remaining")), ("3", _("Remaining & Elapsed"))])
config.usage.elapsed_time_positive_vfd = ConfigYesNo(default = False)
config.usage.lcd_scroll_delay = ConfigSelection(default = "10000", choices = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("60000", "1 " + _("minute")),
("300000", "5 " + _("minutes")),
("noscrolling", _("off"))])
config.usage.lcd_scroll_speed = ConfigSelection(default = "300", choices = [
("500", _("slow")),
("300", _("normal")),
("100", _("fast"))])
def SpinnerOnOffChanged(configElement):
setSpinnerOnOff(int(configElement.value))
config.usage.show_spinner.addNotifier(SpinnerOnOffChanged)
def EnableTtCachingChanged(configElement):
setEnableTtCachingOnOff(int(configElement.value))
config.usage.enable_tt_caching.addNotifier(EnableTtCachingChanged)
def TunerTypePriorityOrderChanged(configElement):
setTunerTypePriorityOrder(int(configElement.value))
config.usage.alternatives_priority.addNotifier(TunerTypePriorityOrderChanged, immediate_feedback=False)
def PreferredTunerChanged(configElement):
setPreferredTuner(int(configElement.value))
config.usage.frontend_priority.addNotifier(PreferredTunerChanged)
config.usage.hide_zap_errors = ConfigYesNo(default = True)
config.usage.hide_ci_messages = ConfigYesNo(default = True)
config.usage.show_cryptoinfo = ConfigSelection([("0", _("Off")),("1", _("One line")),("2", _("Two lines"))], "2")
config.usage.show_eit_nownext = ConfigYesNo(default = True)
config.usage.show_vcr_scart = ConfigYesNo(default = False)
config.epg = ConfigSubsection()
config.epg.eit = ConfigYesNo(default = True)
config.epg.mhw = ConfigYesNo(default = False)
config.epg.freesat = ConfigYesNo(default = True)
config.epg.viasat = ConfigYesNo(default = True)
config.epg.netmed = ConfigYesNo(default = True)
config.epg.virgin = ConfigYesNo(default = False)
def EpgSettingsChanged(configElement):
from enigma import eEPGCache
mask = 0xffffffff
if not config.epg.eit.value:
mask &= ~(eEPGCache.NOWNEXT | eEPGCache.SCHEDULE | eEPGCache.SCHEDULE_OTHER)
if not config.epg.mhw.value:
mask &= ~eEPGCache.MHW
if not config.epg.freesat.value:
mask &= ~(eEPGCache.FREESAT_NOWNEXT | eEPGCache.FREESAT_SCHEDULE | eEPGCache.FREESAT_SCHEDULE_OTHER)
if not config.epg.viasat.value:
mask &= ~eEPGCache.VIASAT
if not config.epg.netmed.value:
mask &= ~(eEPGCache.NETMED_SCHEDULE | eEPGCache.NETMED_SCHEDULE_OTHER)
if not config.epg.virgin.value:
mask &= ~(eEPGCache.VIRGIN_NOWNEXT | eEPGCache.VIRGIN_SCHEDULE)
eEPGCache.getInstance().setEpgSources(mask)
config.epg.eit.addNotifier(EpgSettingsChanged)
config.epg.mhw.addNotifier(EpgSettingsChanged)
config.epg.freesat.addNotifier(EpgSettingsChanged)
config.epg.viasat.addNotifier(EpgSettingsChanged)
config.epg.netmed.addNotifier(EpgSettingsChanged)
config.epg.virgin.addNotifier(EpgSettingsChanged)
config.epg.histminutes = ConfigSelectionNumber(min = 0, max = 120, stepwidth = 15, default = 0, wraparound = True)
def EpgHistorySecondsChanged(configElement):
eEPGCache.getInstance().setEpgHistorySeconds(config.epg.histminutes.value*60)
config.epg.histminutes.addNotifier(EpgHistorySecondsChanged)
config.epg.cacheloadsched = ConfigYesNo(default = False)
config.epg.cachesavesched = ConfigYesNo(default = False)
def EpgCacheLoadSchedChanged(configElement):
import EpgLoadSave
EpgLoadSave.EpgCacheLoadCheck()
def EpgCacheSaveSchedChanged(configElement):
import EpgLoadSave
EpgLoadSave.EpgCacheSaveCheck()
config.epg.cacheloadsched.addNotifier(EpgCacheLoadSchedChanged, immediate_feedback = False)
config.epg.cachesavesched.addNotifier(EpgCacheSaveSchedChanged, immediate_feedback = False)
config.epg.cacheloadtimer = ConfigSelectionNumber(default = 24, stepwidth = 1, min = 1, max = 24, wraparound = True)
config.epg.cachesavetimer = ConfigSelectionNumber(default = 24, stepwidth = 1, min = 1, max = 24, wraparound = True)
config.osd.dst_left = ConfigSelectionNumber(default = 0, stepwidth = 1, min = 0, max = 720, wraparound = False)
config.osd.dst_width = ConfigSelectionNumber(default = 720, stepwidth = 1, min = 0, max = 720, wraparound = False)
config.osd.dst_top = ConfigSelectionNumber(default = 0, stepwidth = 1, min = 0, max = 576, wraparound = False)
config.osd.dst_height = ConfigSelectionNumber(default = 576, stepwidth = 1, min = 0, max = 576, wraparound = False)
config.osd.alpha = ConfigSelectionNumber(default = 255, stepwidth = 1, min = 0, max = 255, wraparound = False)
config.osd.alpha_teletext = ConfigSelectionNumber(default = 255, stepwidth = 1, min = 0, max = 255, wraparound = False)
config.av.osd_alpha = NoSave(ConfigNumber(default = 255))
config.osd.threeDmode = ConfigSelection([("off", _("Off")), ("auto", _("Auto")), ("sidebyside", _("Side by Side")),("topandbottom", _("Top and Bottom"))], "auto")
config.osd.threeDznorm = ConfigSlider(default = 50, increment = 1, limits = (0, 100))
config.osd.show3dextensions = ConfigYesNo(default = False)
choiceoptions = [("mode1", _("Mode 1")), ("mode2", _("Mode 2"))]
config.osd.threeDsetmode = ConfigSelection(default = 'mode1' , choices = choiceoptions )
hddchoises = [('/etc/enigma2/', 'Internal Flash')]
for p in harddiskmanager.getMountedPartitions():
if os.path.exists(p.mountpoint):
d = os.path.normpath(p.mountpoint)
if p.mountpoint != '/':
hddchoises.append((p.mountpoint, d))
config.misc.epgcachepath = ConfigSelection(default = '/etc/enigma2/', choices = hddchoises)
config.misc.epgcachefilename = ConfigText(default='epg', fixed_size=False)
config.misc.epgcache_filename = ConfigText(default = (config.misc.epgcachepath.value + config.misc.epgcachefilename.value.replace('.dat','') + '.dat'))
def EpgCacheChanged(configElement):
config.misc.epgcache_filename.setValue(os.path.join(config.misc.epgcachepath.value, config.misc.epgcachefilename.value.replace('.dat','') + '.dat'))
config.misc.epgcache_filename.save()
eEPGCache.getInstance().setCacheFile(config.misc.epgcache_filename.value)
epgcache = eEPGCache.getInstance()
epgcache.save()
if not config.misc.epgcache_filename.value.startswith("/etc/enigma2/"):
if os.path.exists('/etc/enigma2/' + config.misc.epgcachefilename.value.replace('.dat','') + '.dat'):
os.remove('/etc/enigma2/' + config.misc.epgcachefilename.value.replace('.dat','') + '.dat')
config.misc.epgcachepath.addNotifier(EpgCacheChanged, immediate_feedback = False)
config.misc.epgcachefilename.addNotifier(EpgCacheChanged, immediate_feedback = False)
config.misc.showradiopic = ConfigYesNo(default = True)
config.misc.bootvideo = ConfigYesNo(default = False)
def setHDDStandby(configElement):
for hdd in harddiskmanager.HDDList():
hdd[1].setIdleTime(int(configElement.value))
config.usage.hdd_standby.addNotifier(setHDDStandby, immediate_feedback=False)
if SystemInfo["12V_Output"]:
def set12VOutput(configElement):
Misc_Options.getInstance().set_12V_output(configElement.value == "on" and 1 or 0)
config.usage.output_12V.addNotifier(set12VOutput, immediate_feedback=False)
config.usage.keymap = ConfigText(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"))
if getMachineName().lower().startswith('xp') or getMachineName().lower().startswith('lx') or getBoxType().startswith('atemio'):
if fileExists(eEnv.resolve("${datadir}/enigma2/keymap.usr")):
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xpe"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.xpe"), _("Xpeed keymap - keymap.xpe")),
(eEnv.resolve("${datadir}/enigma2/keymap.usr"), _("User keymap - keymap.usr")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
else:
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xpe"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.xpe"), _("Xpeed keymap - keymap.xpe")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
else:
if fileExists(eEnv.resolve("${datadir}/enigma2/keymap.usr")):
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.usr"), _("User keymap - keymap.usr")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
else:
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
config.network = ConfigSubsection()
if SystemInfo["WakeOnLAN"]:
def wakeOnLANChanged(configElement):
if getBoxType() in ('et10000', 'gbquadplus', 'gbquad', 'gb800ueplus', 'gb800seplus', 'gbultraue', 'gbultrase', 'gbipbox', 'quadbox2400', 'mutant2400'):
open(SystemInfo["WakeOnLAN"], "w").write(configElement.value and "on" or "off")
else:
open(SystemInfo["WakeOnLAN"], "w").write(configElement.value and "enable" or "disable")
config.network.wol = ConfigYesNo(default = False)
config.network.wol.addNotifier(wakeOnLANChanged)
config.network.AFP_autostart = ConfigYesNo(default = False)
config.network.NFS_autostart = ConfigYesNo(default = False)
config.network.OpenVPN_autostart = ConfigYesNo(default = False)
config.network.Samba_autostart = ConfigYesNo(default = True)
config.network.Inadyn_autostart = ConfigYesNo(default = False)
config.network.uShare_autostart = ConfigYesNo(default = False)
config.softwareupdate = ConfigSubsection()
config.softwareupdate.autosettingsbackup = ConfigYesNo(default = False)
config.softwareupdate.autoimagebackup = ConfigYesNo(default = False)
config.softwareupdate.check = ConfigYesNo(default = False)
config.softwareupdate.checktimer = ConfigSelectionNumber(min = 1, max = 48, stepwidth = 1, default = 24, wraparound = True)
config.softwareupdate.updatelastcheck = ConfigInteger(default=0)
config.softwareupdate.updatefound = NoSave(ConfigBoolean(default = False))
config.softwareupdate.updatebeta = ConfigYesNo(default = False)
config.softwareupdate.updateisunstable = ConfigYesNo(default = False)
config.timeshift = ConfigSubsection()
choicelist = [("0", "Disabled")]
for i in (2, 3, 4, 5, 10, 20, 30):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
for i in (60, 120, 300):
m = i / 60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
config.timeshift.startdelay = ConfigSelection(default = "0", choices = choicelist)
config.timeshift.showinfobar = ConfigYesNo(default = True)
config.timeshift.stopwhilerecording = ConfigYesNo(default = False)
config.timeshift.favoriteSaveAction = ConfigSelection([("askuser", _("Ask user")),("savetimeshift", _("Save and stop")),("savetimeshiftandrecord", _("Save and record")),("noSave", _("Don't save"))], "askuser")
config.timeshift.autorecord = ConfigYesNo(default = False)
config.timeshift.isRecording = NoSave(ConfigYesNo(default = False))
config.timeshift.timeshiftMaxHours = ConfigSelectionNumber(min = 1, max = 999, stepwidth = 1, default = 12, wraparound = True)
config.timeshift.deleteAfterZap = ConfigYesNo(default = True)
config.seek = ConfigSubsection()
config.seek.baractivation = ConfigSelection([("leftright", _("Long Left/Right")),("ffrw", _("Long << / >>"))], "leftright")
config.seek.sensibility = ConfigSelectionNumber(min = 1, max = 10, stepwidth = 1, default = 10, wraparound = True)
config.seek.selfdefined_13 = ConfigSelectionNumber(min = 1, max = 300, stepwidth = 1, default = 15, wraparound = True)
config.seek.selfdefined_46 = ConfigSelectionNumber(min = 1, max = 600, stepwidth = 1, default = 60, wraparound = True)
config.seek.selfdefined_79 = ConfigSelectionNumber(min = 1, max = 1200, stepwidth = 1, default = 300, wraparound = True)
config.seek.speeds_forward = ConfigSet(default=[2, 4, 8, 16, 32, 64, 128], choices=[2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128])
config.seek.speeds_backward = ConfigSet(default=[2, 4, 8, 16, 32, 64, 128], choices=[1, 2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128])
config.seek.speeds_slowmotion = ConfigSet(default=[2, 4, 8], choices=[2, 4, 6, 8, 12, 16, 25])
config.seek.enter_forward = ConfigSelection(default = "2", choices = ["2", "4", "6", "8", "12", "16", "24", "32", "48", "64", "96", "128"])
config.seek.enter_backward = ConfigSelection(default = "1", choices = ["1", "2", "4", "6", "8", "12", "16", "24", "32", "48", "64", "96", "128"])
config.seek.on_pause = ConfigSelection(default = "play", choices = [
("play", _("Play")),
("step", _("Single step (GOP)")),
("last", _("Last speed")) ])
config.crash = ConfigSubsection()
config.crash.details = ConfigYesNo(default = True)
config.crash.enabledebug = ConfigYesNo(default = False)
config.crash.debugloglimit = ConfigSelectionNumber(min = 1, max = 10, stepwidth = 1, default = 4, wraparound = True)
config.crash.daysloglimit = ConfigSelectionNumber(min = 1, max = 30, stepwidth = 1, default = 8, wraparound = True)
config.crash.sizeloglimit = ConfigSelectionNumber(min = 1, max = 20, stepwidth = 1, default = 10, wraparound = True)
debugpath = [('/home/root/logs/', '/home/root/')]
for p in harddiskmanager.getMountedPartitions():
if os.path.exists(p.mountpoint):
d = os.path.normpath(p.mountpoint)
if p.mountpoint != '/':
debugpath.append((p.mountpoint + 'logs/', d))
config.crash.debug_path = ConfigSelection(default = "/home/root/logs/", choices = debugpath)
def updatedebug_path(configElement):
if not os.path.exists(config.crash.debug_path.value):
os.mkdir(config.crash.debug_path.value,0755)
config.crash.debug_path.addNotifier(updatedebug_path, immediate_feedback = False)
config.usage.timerlist_finished_timer_position = ConfigSelection(default = "end", choices = [("beginning", _("at beginning")), ("end", _("at end"))])
def updateEnterForward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_forward, configElement.value)
config.seek.speeds_forward.addNotifier(updateEnterForward, immediate_feedback = False)
def updateEnterBackward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_backward, configElement.value)
config.seek.speeds_backward.addNotifier(updateEnterBackward, immediate_feedback = False)
def updateEraseSpeed(el):
eBackgroundFileEraser.getInstance().setEraseSpeed(int(el.value))
def updateEraseFlags(el):
eBackgroundFileEraser.getInstance().setEraseFlags(int(el.value))
config.misc.erase_speed = ConfigSelection(default="20", choices = [
("10", "10 MB/s"),
("20", "20 MB/s"),
("50", "50 MB/s"),
("100", "100 MB/s")])
config.misc.erase_speed.addNotifier(updateEraseSpeed, immediate_feedback = False)
config.misc.erase_flags = ConfigSelection(default="1", choices = [
("0", _("Disable")),
("1", _("Internal hdd only")),
("3", _("Everywhere"))])
config.misc.erase_flags.addNotifier(updateEraseFlags, immediate_feedback = False)
if SystemInfo["ZapMode"]:
def setZapmode(el):
file = open(zapfile, "w")
file.write(el.value)
file.close()
if os.path.exists("/proc/stb/video/zapping_mode"):
zapfile = "/proc/stb/video/zapping_mode"
else:
zapfile = "/proc/stb/video/zapmode"
zapoptions = [("mute", _("Black screen")), ("hold", _("Hold screen")), ("mutetilllock", _("Black screen till locked")), ("holdtilllock", _("Hold till locked"))]
config.misc.zapmode = ConfigSelection(default = "mute", choices = zapoptions )
config.misc.zapmode.addNotifier(setZapmode, immediate_feedback = False)
config.usage.historymode = ConfigSelection(default = "1", choices = [("1", _("Show menu")), ("0", _("Just zap")), ("2", _("Show Zap-History Browser"))])
config.usage.bookmarkmode = ConfigSelection(default = "0", choices = [("1", _("Show EMC")), ("0", _("Show Movielist")), ("2", _("Show Simple Movie List"))])
config.subtitles = ConfigSubsection()
config.subtitles.ttx_subtitle_colors = ConfigSelection(default = "1", choices = [
("0", _("original")),
("1", _("white")),
("2", _("yellow")) ])
config.subtitles.ttx_subtitle_original_position = ConfigYesNo(default = False)
config.subtitles.subtitle_position = ConfigSelection( choices = ["0", "10", "20", "30", "40", "50", "60", "70", "80", "90", "100", "150", "200", "250", "300", "350", "400", "450"], default = "50")
config.subtitles.subtitle_alignment = ConfigSelection(choices = [("left", _("left")), ("center", _("center")), ("right", _("right"))], default = "center")
config.subtitles.subtitle_rewrap = ConfigYesNo(default = False)
config.subtitles.subtitle_borderwidth = ConfigSelection(choices = ["1", "2", "3", "4", "5"], default = "3")
config.subtitles.subtitle_fontsize = ConfigSelection(choices = ["16", "18", "20", "22", "24", "26", "28", "30", "32", "34", "36", "38", "40", "42", "44", "46", "48", "50", "52", "54"], default = "34")
subtitle_delay_choicelist = []
for i in range(-900000, 1845000, 45000):
if i == 0:
subtitle_delay_choicelist.append(("0", _("No delay")))
else:
subtitle_delay_choicelist.append(("%d" % i, "%2.1f sec" % (i / 90000.)))
config.subtitles.subtitle_noPTSrecordingdelay = ConfigSelection(default = "315000", choices = subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_yellow = ConfigYesNo(default = False)
config.subtitles.dvb_subtitles_original_position = ConfigSelection(default = "0", choices = [("0", _("Original")), ("1", _("Fixed")), ("2", _("Relative"))])
config.subtitles.dvb_subtitles_centered = ConfigYesNo(default = True)
config.subtitles.subtitle_bad_timing_delay = ConfigSelection(default = "0", choices = subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_backtrans = ConfigSelection(default = "0", choices = [
("0", _("No transparency")),
("25", "10%"),
("50", "20%"),
("75", "30%"),
("100", "40%"),
("125", "50%"),
("150", "60%"),
("175", "70%"),
("200", "80%"),
("225", "90%"),
("255", _("Full transparency"))])
config.subtitles.pango_subtitle_colors = ConfigSelection(default = "0", choices = [
("0", _("alternative")),
("1", _("white")),
("2", _("yellow")) ])
config.subtitles.pango_subtitles_delay = ConfigSelection(default = "0", choices = subtitle_delay_choicelist)
config.subtitles.pango_subtitles_fps = ConfigSelection(default = "1", choices = [
("1", _("Original")),
("23976", _("23.976")),
("24000", _("24")),
("25000", _("25")),
("29970", _("29.97")),
("30000", _("30"))])
config.subtitles.pango_autoturnon = ConfigYesNo(default = True)
config.autolanguage = ConfigSubsection()
audio_language_choices=[
("---", _("None")),
("und", _("Undetermined")),
("orj dos ory org esl qaa und mis mul ORY ORJ Audio_ORJ", _("Original")),
("ara", _("Arabic")),
("eus baq", _("Basque")),
("bul", _("Bulgarian")),
("hrv", _("Croatian")),
("ces cze", _("Czech")),
("dan", _("Danish")),
("dut ndl Dutch", _("Dutch")),
("eng qaa Englisch", _("English")),
("est", _("Estonian")),
("fin", _("Finnish")),
("fra fre", _("French")),
("deu ger", _("German")),
("ell gre", _("Greek")),
("heb", _("Hebrew")),
("hun", _("Hungarian")),
("ita", _("Italian")),
("lav", _("Latvian")),
("lit", _("Lithuanian")),
("ltz", _("Luxembourgish")),
("nor", _("Norwegian")),
("pol", _("Polish")),
("por", _("Portuguese")),
("fas per", _("Persian")),
("ron rum", _("Romanian")),
("rus", _("Russian")),
("srp", _("Serbian")),
("slk slo", _("Slovak")),
("slv", _("Slovenian")),
("spa", _("Spanish")),
("swe", _("Swedish")),
("tha", _("Thai")),
("tur Audio_TUR", _("Turkish"))]
def setEpgLanguage(configElement):
eServiceEvent.setEPGLanguage(configElement.value)
config.autolanguage.audio_epglanguage = ConfigSelection(audio_language_choices[:1] + audio_language_choices [2:], default="---")
config.autolanguage.audio_epglanguage.addNotifier(setEpgLanguage)
def setEpgLanguageAlternative(configElement):
eServiceEvent.setEPGLanguageAlternative(configElement.value)
config.autolanguage.audio_epglanguage_alternative = ConfigSelection(audio_language_choices[:1] + audio_language_choices [2:], default="---")
config.autolanguage.audio_epglanguage_alternative.addNotifier(setEpgLanguageAlternative)
config.autolanguage.audio_autoselect1 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect2 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect3 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect4 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_defaultac3 = ConfigYesNo(default = True)
config.autolanguage.audio_defaultddp = ConfigYesNo(default = False)
config.autolanguage.audio_usecache = ConfigYesNo(default = True)
subtitle_language_choices = audio_language_choices[:1] + audio_language_choices [2:]
config.autolanguage.subtitle_autoselect1 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect2 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect3 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect4 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_hearingimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultdvb = ConfigYesNo(default = False)
config.autolanguage.subtitle_usecache = ConfigYesNo(default = True)
config.autolanguage.equal_languages = ConfigSelection(default = "15", choices = [
("0", _("None")),("1", "1"),("2", "2"),("3", "1,2"),
("4", "3"),("5", "1,3"),("6", "2,3"),("7", "1,2,3"),
("8", "4"),("9", "1,4"),("10", "2,4"),("11", "1,2,4"),
("12", "3,4"),("13", "1,3,4"),("14", "2,3,4"),("15", _("All"))])
config.logmanager = ConfigSubsection()
config.logmanager.showinextensions = ConfigYesNo(default = False)
config.logmanager.user = ConfigText(default='', fixed_size=False)
config.logmanager.useremail = ConfigText(default='', fixed_size=False)
config.logmanager.usersendcopy = ConfigYesNo(default = True)
config.logmanager.path = ConfigText(default = "/")
config.logmanager.additionalinfo = NoSave(ConfigText(default = ""))
config.logmanager.sentfiles = ConfigLocations(default='')
config.plisettings = ConfigSubsection()
config.plisettings.Subservice = ConfigYesNo(default = True)
config.plisettings.ShowPressedButtons = ConfigYesNo(default = False)
config.plisettings.ColouredButtons = ConfigYesNo(default = True)
config.plisettings.InfoBarEpg_mode = ConfigSelection(default="0", choices = [
("0", _("as plugin in extended bar")),
("1", _("with long OK press")),
("2", _("with exit button")),
("3", _("with left/right buttons"))])
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CoolTVGuide/plugin.pyo"):
config.plisettings.PLIEPG_mode = ConfigSelection(default="cooltvguide", choices = [
("pliepg", _("Show Graphical EPG")),
("single", _("Show Single EPG")),
("multi", _("Show Multi EPG")),
("eventview", _("Show Eventview")),
("cooltvguide", _("Show CoolTVGuide")),
("etportal", _("Show EtPortal"))])
config.plisettings.PLIINFO_mode = ConfigSelection(default="coolinfoguide", choices = [
("eventview", _("Show Eventview")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG")),
("coolsingleguide", _("Show CoolSingleGuide")),
("coolinfoguide", _("Show CoolInfoGuide")),
("cooltvguide", _("Show CoolTVGuide")),
("etportal", _("Show EtPortal"))])
config.plisettings.PLIFAV_mode = ConfigSelection(default="coolinfoguide", choices = [
("eventview", _("Show Eventview")),
("showfavourites", _("Show Favourites")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG")),
("coolsingleguide", _("Show CoolSingleGuide")),
("coolinfoguide", _("Show CoolInfoGuide")),
("cooltvguide", _("Show CoolTVGuide")),
("emc", _("Show Enhanced Movie Center")),
("mediaportal", _("Show Media Portal")),
("dreamplex", _("Show DreamPlex")),
("etportal", _("Show EtPortal"))])
else:
config.plisettings.PLIEPG_mode = ConfigSelection(default="pliepg", choices = [
("pliepg", _("Show Graphical EPG")),
("single", _("Show Single EPG")),
("multi", _("Show Multi EPG")),
("eventview", _("Show Eventview"))])
config.plisettings.PLIINFO_mode = ConfigSelection(default="eventview", choices = [
("eventview", _("Show Eventview")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG"))])
config.plisettings.PLIFAV_mode = ConfigSelection(default="eventview", choices = [
("eventview", _("Show Eventview")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG"))])
config.epgselection = ConfigSubsection()
config.epgselection.sort = ConfigSelection(default="0", choices = [("0", _("Time")),("1", _("Alphanumeric"))])
config.epgselection.overjump = ConfigYesNo(default = False)
config.epgselection.infobar_type_mode = ConfigSelection(choices = [("graphics",_("Multi EPG")), ("single", _("Single EPG"))], default = "graphics")
if SystemInfo.get("NumVideoDecoders", 1) > 1:
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen")), ("2", _("PiP"))], default = "1")
else:
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen"))], default = "1")
config.epgselection.infobar_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.infobar_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.infobar_itemsperpage = ConfigSelectionNumber(default = 2, stepwidth = 1, min = 1, max = 4, wraparound = True)
if SystemInfo.get("NumVideoDecoders", 1) > 1:
if HardwareInfo().is_nextgen():
previewdefault = "2"
else:
previewdefault = "1"
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen")), ("2", _("PiP"))], default = previewdefault)
else:
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen"))], default = "1")
config.epgselection.infobar_roundto = ConfigSelection(default = "15", choices = [("15", _("%d minutes") % 15), ("30", _("%d minutes") % 30), ("60", _("%d minutes") % 60)])
config.epgselection.infobar_prevtime = ConfigClock(default = time())
config.epgselection.infobar_prevtimeperiod = ConfigSelection(default = "300", choices = [("60", _("%d minutes") % 60), ("90", _("%d minutes") % 90), ("120", _("%d minutes") % 120), ("150", _("%d minutes") % 150), ("180", _("%d minutes") % 180), ("210", _("%d minutes") % 210), ("240", _("%d minutes") % 240), ("270", _("%d minutes") % 270), ("300", _("%d minutes") % 300)])
config.epgselection.infobar_primetimehour = ConfigSelectionNumber(default = 20, stepwidth = 1, min = 00, max = 23, wraparound = True)
config.epgselection.infobar_primetimemins = ConfigSelectionNumber(default = 00, stepwidth = 1, min = 00, max = 59, wraparound = True)
config.epgselection.infobar_servicetitle_mode = ConfigSelection(default = "servicename", choices = [("servicename", _("Service Name")),("picon", _("Picon")),("picon+servicename", _("Picon and Service Name")) ])
config.epgselection.infobar_servfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.infobar_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.infobar_timelinefs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.infobar_timeline24h = ConfigYesNo(default = True)
config.epgselection.infobar_servicewidth = ConfigSelectionNumber(default = 250, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.infobar_piconwidth = ConfigSelectionNumber(default = 100, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.infobar_infowidth = ConfigSelectionNumber(default = 50, stepwidth = 25, min = 0, max = 150, wraparound = True)
config.epgselection.enhanced_preview_mode = ConfigYesNo(default = True)
config.epgselection.enhanced_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.enhanced_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.enhanced_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.enhanced_itemsperpage = ConfigSelectionNumber(default = 18, stepwidth = 1, min = 12, max = 40, wraparound = True)
config.epgselection.multi_showbouquet = ConfigYesNo(default = False)
config.epgselection.multi_preview_mode = ConfigYesNo(default = True)
config.epgselection.multi_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.multi_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.multi_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.multi_itemsperpage = ConfigSelectionNumber(default = 18, stepwidth = 1, min = 12, max = 40, wraparound = True)
config.epgselection.graph_showbouquet = ConfigYesNo(default = False)
config.epgselection.graph_preview_mode = ConfigYesNo(default = True)
config.epgselection.graph_type_mode = ConfigSelection(choices = [("graphics",_("Graphics")), ("text", _("Text"))], default = "graphics")
config.epgselection.graph_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.graph_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.graph_info = ConfigSelection(choices = [("Channel Info", _("Channel Info")), ("Single EPG", _("Single EPG"))], default = "Channel Info")
config.epgselection.graph_infolong = ConfigSelection(choices = [("Channel Info", _("Channel Info")), ("Single EPG", _("Single EPG"))], default = "Single EPG")
config.epgselection.graph_roundto = ConfigSelection(default = "15", choices = [("15", _("%d minutes") % 15), ("30", _("%d minutes") % 30), ("60", _("%d minutes") % 60)])
config.epgselection.graph_prevtime = ConfigClock(default = time())
config.epgselection.graph_prevtimeperiod = ConfigSelection(default = "180", choices = [("60", _("%d minutes") % 60), ("90", _("%d minutes") % 90), ("120", _("%d minutes") % 120), ("150", _("%d minutes") % 150), ("180", _("%d minutes") % 180), ("210", _("%d minutes") % 210), ("240", _("%d minutes") % 240), ("270", _("%d minutes") % 270), ("300", _("%d minutes") % 300)])
config.epgselection.graph_primetimehour = ConfigSelectionNumber(default = 20, stepwidth = 1, min = 00, max = 23, wraparound = True)
config.epgselection.graph_primetimemins = ConfigSelectionNumber(default = 00, stepwidth = 1, min = 00, max = 59, wraparound = True)
config.epgselection.graph_servicetitle_mode = ConfigSelection(default = "picon+servicename", choices = [("servicename", _("Service Name")),("picon", _("Picon")),("picon+servicename", _("Picon and Service Name")) ])
config.epgselection.graph_channel1 = ConfigYesNo(default = False)
config.epgselection.graph_servfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.graph_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.graph_timelinefs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.graph_timeline24h = ConfigYesNo(default = True)
config.epgselection.graph_itemsperpage = ConfigSelectionNumber(default = 8, stepwidth = 1, min = 3, max = 16, wraparound = True)
config.epgselection.graph_pig = ConfigYesNo(default = True)
config.epgselection.graph_heightswitch = NoSave(ConfigYesNo(default = False))
config.epgselection.graph_servicewidth = ConfigSelectionNumber(default = 250, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.graph_piconwidth = ConfigSelectionNumber(default = 100, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.graph_infowidth = ConfigSelectionNumber(default = 50, stepwidth = 25, min = 0, max = 150, wraparound = True)
softcams = sorted(filter(lambda x: x.startswith('softcam.'), os.listdir("/etc/init.d/")))
config.oscaminfo = ConfigSubsection()
config.oscaminfo.showInExtensions = ConfigYesNo(default=False)
config.oscaminfo.userdatafromconf = ConfigYesNo(default = False)
config.oscaminfo.autoupdate = ConfigYesNo(default = False)
config.oscaminfo.username = ConfigText(default = "username", fixed_size = False, visible_width=12)
config.oscaminfo.password = ConfigPassword(default = "password", fixed_size = False)
config.oscaminfo.ip = ConfigIP( default = [ 127,0,0,1 ], auto_jump=True)
config.oscaminfo.port = ConfigInteger(default = 16002, limits=(0,65536) )
config.oscaminfo.intervall = ConfigSelectionNumber(min = 1, max = 600, stepwidth = 1, default = 10, wraparound = True)
SystemInfo["OScamInstalled"] = False
config.cccaminfo = ConfigSubsection()
config.cccaminfo.showInExtensions = ConfigYesNo(default=False)
config.cccaminfo.serverNameLength = ConfigSelectionNumber(min = 10, max = 100, stepwidth = 1, default = 22, wraparound = True)
config.cccaminfo.name = ConfigText(default="Profile", fixed_size=False)
config.cccaminfo.ip = ConfigText(default="192.168.2.12", fixed_size=False)
config.cccaminfo.username = ConfigText(default="", fixed_size=False)
config.cccaminfo.password = ConfigText(default="", fixed_size=False)
config.cccaminfo.port = ConfigInteger(default=16001, limits=(1, 65535))
config.cccaminfo.profile = ConfigText(default="", fixed_size=False)
config.cccaminfo.ecmInfoEnabled = ConfigYesNo(default=True)
config.cccaminfo.ecmInfoTime = ConfigSelectionNumber(min = 1, max = 10, stepwidth = 1, default = 5, wraparound = True)
config.cccaminfo.ecmInfoForceHide = ConfigYesNo(default=True)
config.cccaminfo.ecmInfoPositionX = ConfigInteger(default=50)
config.cccaminfo.ecmInfoPositionY = ConfigInteger(default=50)
config.cccaminfo.blacklist = ConfigText(default="/media/cf/CCcamInfo.blacklisted", fixed_size=False)
config.cccaminfo.profiles = ConfigText(default="/media/cf/CCcamInfo.profiles", fixed_size=False)
SystemInfo["CCcamInstalled"] = False
if os.path.islink('/etc/init.d/softcam'):
for softcam in softcams:
if "cccam" in os.readlink('/etc/init.d/softcam').lower():
config.cccaminfo.showInExtensions = ConfigYesNo(default=True)
SystemInfo["CCcamInstalled"] = True
elif "oscam" in os.readlink('/etc/init.d/softcam').lower():
config.oscaminfo.showInExtensions = ConfigYesNo(default=True)
SystemInfo["OScamInstalled"] = True
config.streaming = ConfigSubsection()
config.streaming.stream_ecm = ConfigYesNo(default = False)
config.streaming.descramble = ConfigYesNo(default = True)
config.streaming.stream_eit = ConfigYesNo(default = True)
config.streaming.stream_ait = ConfigYesNo(default = True)
config.pluginbrowser = ConfigSubsection()
config.pluginbrowser.po = ConfigYesNo(default = False)
config.pluginbrowser.src = ConfigYesNo(default = False)
def updateChoices(sel, choices):
if choices:
defval = None
val = int(sel.value)
if not val in choices:
tmp = choices[:]
tmp.reverse()
for x in tmp:
if x < val:
defval = str(x)
break
sel.setChoices(map(str, choices), defval)
def preferredPath(path):
if config.usage.setup_level.index < 2 or path == "<default>":
return None # config.usage.default_path.value, but delay lookup until usage
elif path == "<current>":
return config.movielist.last_videodir.value
elif path == "<timer>":
return config.movielist.last_timer_videodir.value
else:
return path
def preferredTimerPath():
return preferredPath(config.usage.timer_path.value)
def preferredInstantRecordPath():
return preferredPath(config.usage.instantrec_path.value)
def defaultMoviePath():
return defaultRecordingLocation(config.usage.default_path.value)
def refreshServiceList(configElement = None):
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance is not None:
servicelist = InfoBarInstance.servicelist
if servicelist:
servicelist.setMode()
| gpl-2.0 | -1,456,264,806,924,437,200 | 58.389009 | 374 | 0.6947 | false |
Peter92/MouseTrack | mousetracks/utils/sockets.py | 1 | 2511 | """This is part of the Mouse Tracks Python application.
Source: https://github.com/Peter92/MouseTracks
"""
#Easy to use wrappers for sockets
from __future__ import absolute_import
import psutil
import socket
import struct
from select import select
from .compatibility import pickle
def send_msg(sock, msg):
"""Prefix each messge with length."""
msg = pickle.dumps(msg)
msg = struct.pack('>I', len(msg)) + msg
sock.sendall(msg)
def recv_msg(sock):
"""Receive the message."""
#Read message length
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
#Read message data
return pickle.loads(recvall(sock, msglen))
def recvall(sock, n):
"""Receive socket data and detect if the connection was closed."""
data = ''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def msg_empty(sock):
"""Detect if socket is empty."""
return not select([sock],[],[],0)[0]
def get_ip(sock):
"""Get the IP address the socket is bound to."""
return sock.getsockname()[0]
def get_port(sock):
"""Get the port the socket is bound to."""
return sock.getsockname()[1]
def get_free_port():
"""Find a free port resulting from using port 0."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
return port
def force_close_port(port, process_name=None):
"""Terminate a process that is bound to a port.
The process name can be set (eg. python), which will
ignore any other process that doesn't start with it.
"""
for proc in psutil.process_iter():
for conn in proc.connections():
if conn.laddr[1] == port:
#Don't close if it belongs to SYSTEM
#On windows using .username() results in AccessDenied
#TODO: Needs testing on other operating systems
try:
proc.username()
except psutil.AccessDenied:
pass
else:
if process_name is None or proc.name().startswith(process_name):
try:
proc.kill()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass | gpl-3.0 | 1,274,394,929,446,219,000 | 26.010753 | 84 | 0.581442 | false |
namccart/gnuradio | gr-digital/python/digital/qa_burst_shaper.py | 18 | 14818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks, digital
import pmt
import numpy as np
import sys
def make_length_tag(offset, length):
return gr.python_to_tag({'offset' : offset,
'key' : pmt.intern('packet_len'),
'value' : pmt.from_long(length),
'srcid' : pmt.intern('qa_burst_shaper')})
def make_tag(offset, key, value):
return gr.python_to_tag({'offset' : offset,
'key' : pmt.intern(key),
'value' : value,
'srcid' : pmt.intern('qa_burst_shaper')})
def compare_tags(a, b):
return a.offset == b.offset and pmt.equal(a.key, b.key) and \
pmt.equal(a.value, b.value)
class qa_burst_shaper (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_ff (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length - len(window)), window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10,
dtype=complex) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5, dtype=complex),
-4.0*np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad, dtype=complex), window[0:5],
np.ones(length - len(window), dtype=complex),
window[5:10], np.zeros(postpad,
dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_ff_with_phasing (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5)
for i in xrange(5):
phasing[i] = ((-1.0)**i)
expected = np.concatenate((np.zeros(prepad), phasing*window[0:5],
np.ones(length), phasing*window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc_with_phasing (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10,
dtype=complex) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5, dtype=complex),
-4.0*np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5, dtype=complex)
for i in xrange(5):
phasing[i] = complex((-1.0)**i)
expected = np.concatenate((np.zeros(prepad, dtype=complex),
phasing*window[0:5],
np.ones(length, dtype=complex),
phasing*window[5:10],
np.zeros(postpad, dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_odd_window (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
-4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:6],
np.ones(length - len(window) - 1),
window[5:11], np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_short_burst (self):
prepad = 10
postpad = 10
length = 9
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
-4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:4],
np.ones(1), window[5:9],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_consecutive_bursts (self):
prepad = 10
postpad = 10
length1 = 15
length2 = 25
data = np.concatenate((np.ones(length1), -1.0*np.ones(length2),
np.zeros(10))) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length1), make_length_tag(length1, length2))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0*window[0:5],
-1.0*np.ones(length2 - len(window)),
-1.0*window[5:10], np.zeros(postpad)))
etags = (make_length_tag(0, length1 + prepad + postpad),
make_length_tag(length1 + prepad + postpad,
length2 + prepad + postpad))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in xrange(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_gap (self):
prepad = 10
postpad = 10
length = 20
gap_len = 5
data = np.arange(2*length + 10,
dtype=float) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
ewindow = window * np.array([1,-1,1,-1,1,1,-1,1,-1,1],dtype=float)
tags = (make_length_tag(0, length),
make_length_tag(length + gap_len, length))
expected = np.concatenate((np.zeros(prepad), ewindow[0:5],
np.arange(0, length, dtype=float),
ewindow[5:10], np.zeros(postpad),
np.zeros(prepad), ewindow[0:5],
np.arange(length + gap_len,
2*length + gap_len, dtype=float),
ewindow[5:10], np.zeros(postpad)))
burst_len = length + len(window) + prepad + postpad
etags = (make_length_tag(0, burst_len),
make_length_tag(burst_len, burst_len))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in xrange(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_propagation (self):
prepad = 10
postpad = 10
length1 = 15
length2 = 25
gap_len = 5
lentag1_offset = 0
lentag2_offset = length1 + gap_len
tag1_offset = 0 # accompanies first length tag
tag2_offset = length1 + gap_len # accompanies second length tag
tag3_offset = 2 # in ramp-up state
tag4_offset = length1 + 2 # in gap; tag will be dropped
tag5_offset = length1 + gap_len + 7 # in copy state
data = np.concatenate((np.ones(length1), np.zeros(gap_len),
-1.0*np.ones(length2), np.zeros(10)))
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(lentag1_offset, length1),
make_length_tag(lentag2_offset, length2),
make_tag(tag1_offset, 'head', pmt.intern('tag1')),
make_tag(tag2_offset, 'head', pmt.intern('tag2')),
make_tag(tag3_offset, 'body', pmt.intern('tag3')),
make_tag(tag4_offset, 'body', pmt.intern('tag4')),
make_tag(tag5_offset, 'body', pmt.intern('tag5')))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0*window[0:5],
-1.0*np.ones(length2 - len(window)),
-1.0*window[5:10], np.zeros(postpad)))
elentag1_offset = 0
elentag2_offset = length1 + prepad + postpad
etag1_offset = 0
etag2_offset = elentag2_offset
etag3_offset = prepad + tag3_offset
etag5_offset = 2*prepad + postpad + tag5_offset - gap_len
etags = (make_length_tag(elentag1_offset, length1 + prepad + postpad),
make_length_tag(elentag2_offset, length2 + prepad + postpad),
make_tag(etag1_offset, 'head', pmt.intern('tag1')),
make_tag(etag2_offset, 'head', pmt.intern('tag2')),
make_tag(etag3_offset, 'body', pmt.intern('tag3')),
make_tag(etag5_offset, 'body', pmt.intern('tag5')))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for x, y in zip(sorted(sink.tags(), key=gr.tag_t_offset_compare_key()),
sorted(etags, key=gr.tag_t_offset_compare_key())):
self.assertTrue(compare_tags(x, y))
if __name__ == '__main__':
gr_unittest.run(qa_burst_shaper, "qa_burst_shaper.xml")
| gpl-3.0 | 8,686,335,505,416,577,000 | 42.582353 | 83 | 0.526994 | false |
pombredanne/product-definition-center | pdc/apps/release/lib.py | 3 | 6715 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.core.exceptions import ValidationError
from django.db import transaction
import json
import productmd
from productmd.common import create_release_id
from pdc.apps.common import hacks as common_hacks
from pdc.apps.common import models as common_models
from . import models
def _maybe_log(request, created, obj):
"""
Optionally create an entry in changeset.
"""
if created:
request.changeset.add(obj.__class__.__name__,
obj.pk,
'null',
json.dumps(obj.export()))
def _logged_get_or_create(request, model, **kwargs):
"""
Wrapper around `get_or_create` that also creates an entry in changeset.
"""
obj, created = model.objects.get_or_create(**kwargs)
_maybe_log(request, created, obj)
return obj, created
def get_or_create_integrated_release(request, orig_release, release):
"""
Given a PDC release and a release retrieved from compose info specified in
a variant, return the release for integrated layered product. The Product,
ProductVersion and BaseProduct instances will also be created if necessary.
"""
integrated_base_product, _ = _logged_get_or_create(
request, models.BaseProduct,
name=orig_release.name,
short=orig_release.short,
version=orig_release.version.split('.')[0],
release_type=orig_release.release_type
)
integrated_product, _ = _logged_get_or_create(
request, models.Product,
name=release.name,
short=release.short.lower()
)
integrated_product_version, _ = _logged_get_or_create(
request, models.ProductVersion,
product=integrated_product,
name=release.name,
short=release.short.lower(),
version=release.version.split('.')[0]
)
try:
integrated_release, _ = _logged_get_or_create(
request, models.Release,
name=release.name,
short=release.short.lower(),
release_type=orig_release.release_type,
version=release.version,
base_product=integrated_base_product,
integrated_with=orig_release,
product_version=integrated_product_version
)
except ValidationError:
release_id = create_release_id(
release.short.lower(),
release.version,
orig_release.release_type.short,
integrated_base_product.short,
integrated_base_product.version,
integrated_base_product.release_type.short,
)
msg = ('Failed to create release {} for integrated layered product.' +
' A conflicting release already exists.' +
' There is likely a version mismatch between the imported' +
' release and its layered integrated product in the composeinfo.')
raise ValidationError(msg.format(release_id))
return integrated_release
@transaction.atomic
def release__import_from_composeinfo(request, composeinfo_json):
"""
Import release including variants and architectures from composeinfo json.
"""
ci = productmd.composeinfo.ComposeInfo()
common_hacks.deserialize_wrapper(ci.deserialize, composeinfo_json)
if ci.release.is_layered:
release_type_obj = models.ReleaseType.objects.get(short=getattr(ci.base_product, "type", "ga"))
base_product_obj, _ = _logged_get_or_create(
request, models.BaseProduct,
name=ci.base_product.name,
short=ci.base_product.short.lower(),
version=ci.base_product.version,
release_type=release_type_obj,
)
else:
base_product_obj = None
product_obj, _ = _logged_get_or_create(
request, models.Product,
name=ci.release.name,
short=ci.release.short.lower()
)
product_version_obj, _ = _logged_get_or_create(
request, models.ProductVersion,
product=product_obj,
name=ci.release.name,
short=ci.release.short.lower(),
version=ci.release.major_version
)
release_type_obj = models.ReleaseType.objects.get(short=getattr(ci.release, "type", "ga"))
release_obj, _ = _logged_get_or_create(
request, models.Release,
name=ci.release.name,
short=ci.release.short.lower(),
version=ci.release.version,
base_product=base_product_obj,
release_type=release_type_obj,
product_version=product_version_obj,
)
# if not created:
# raise RuntimeError("Release already exists: %s" % release_obj)
# We can't log variants immediately after they are created, as their export
# includes architectures. Therefore they are collected in this list and
# logged once import is done. This also nicely abstracts integrated
# variants that may not be present.
add_to_changelog = []
for variant in ci.variants.get_variants(recursive=True):
variant_type = models.VariantType.objects.get(name=variant.type)
release = variant.release
integrated_variant = None
if release.name:
integrated_release = get_or_create_integrated_release(
request,
release_obj,
release
)
integrated_variant, created = models.Variant.objects.get_or_create(
release=integrated_release,
variant_id=variant.id,
variant_uid=variant.uid,
variant_name=variant.name,
variant_type=models.VariantType.objects.get(name='variant')
)
if created:
add_to_changelog.append(integrated_variant)
variant_obj, created = models.Variant.objects.get_or_create(
release=release_obj,
variant_id=variant.id,
variant_uid=variant.uid,
variant_name=variant.name,
variant_type=variant_type,
)
if created:
add_to_changelog.append(variant_obj)
for arch in variant.arches:
arch_obj = common_models.Arch.objects.get(name=arch)
var_arch_obj, _ = models.VariantArch.objects.get_or_create(
arch=arch_obj,
variant=variant_obj
)
if integrated_variant:
models.VariantArch.objects.get_or_create(
arch=arch_obj,
variant=integrated_variant
)
for obj in add_to_changelog:
_maybe_log(request, True, obj)
return release_obj
| mit | -1,883,109,596,949,143,300 | 34.909091 | 103 | 0.618168 | false |
drufat/sympy | sympy/functions/special/tests/test_gamma_functions.py | 33 | 16080 | from sympy import (
Symbol, gamma, I, oo, nan, zoo, factorial, sqrt, Rational, log,
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin,
cos, O, lowergamma, exp, erf, erfc, exp_polar, harmonic, zeta,conjugate)
from sympy.core.function import ArgumentIndexError
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
verify_numerically as tn)
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
w = Symbol('w', real=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1, 2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(Rational(
-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(
-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(
14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(
17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(
19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x - 1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x + 1)*gamma(x)
assert conjugate(gamma(x)) == gamma(conjugate(x))
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
assert gamma(3*exp_polar(I*pi)/4).is_nonnegative is False
assert gamma(3*exp_polar(I*pi)/4).is_nonpositive is True
# Issue 8526
k = Symbol('k', integer=True, nonnegative=True)
assert isinstance(gamma(k), gamma)
assert gamma(-k) == zoo
def test_gamma_rewrite():
assert gamma(n).rewrite(factorial) == factorial(n - 1)
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/(x + 1) + EulerGamma - 1 + (x + 1)*(-1 - pi**2/12 - EulerGamma**2/2 + \
EulerGamma) + (x + 1)**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 - \
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O((x + 1)**3, (x, -1))
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, y).diff(y) == y**(x - 1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert td(lowergamma(x, randcplx()), x)
assert lowergamma(x, y).diff(x) == \
gamma(x)*polygamma(0, x) - uppergamma(x, y)*log(y) \
- meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(S(1)/3, lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert conjugate(lowergamma(x, y)) == lowergamma(conjugate(x), conjugate(y))
assert conjugate(lowergamma(x, 0)) == conjugate(lowergamma(x, 0))
assert conjugate(lowergamma(x, -oo)) == conjugate(lowergamma(x, -oo))
assert lowergamma(
x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(
k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x - 1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
assert uppergamma(S.Half, x) == sqrt(pi)*erfc(sqrt(x))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(S(1)/3, uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + \
gamma(y)*(1 - exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert conjugate(uppergamma(x, y)) == uppergamma(conjugate(x), conjugate(y))
assert conjugate(uppergamma(x, 0)) == gamma(conjugate(x))
assert conjugate(uppergamma(x, -oo)) == conjugate(uppergamma(x, -oo))
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
def test_polygamma():
from sympy import I
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(0, -oo) == oo
assert polygamma(0, I*oo) == oo
assert polygamma(0, -I*oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369, 20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert polygamma(0, x).rewrite(zeta) == polygamma(0, x)
assert polygamma(1, x).rewrite(zeta) == zeta(2, x)
assert polygamma(2, x).rewrite(zeta) == -2*zeta(3, x)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
assert polygamma(0, x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert polygamma(2, x).rewrite(harmonic) == 2*harmonic(x - 1, 3) - 2*zeta(3)
ni = Symbol("n", integer=True)
assert polygamma(ni, x).rewrite(harmonic) == (-1)**(ni + 1)*(-harmonic(x - 1, ni + 1)
+ zeta(ni + 1))*factorial(ni)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k, exp_polar(2*I*pi)*x) == polygamma(k, x)
# but negative integers are branched!
k = Symbol('n', integer=True)
assert polygamma(k, exp_polar(2*I*pi)*x).args == (k, exp_polar(2*I*pi)*x)
# Polygamma of order -1 is loggamma:
assert polygamma(-1, x) == loggamma(x)
# But smaller orders are iterated integrals and don't have a special name
assert polygamma(-2, x).func is polygamma
# Test a bug
assert polygamma(0, -x).expand(func=True) == polygamma(0, -x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func=True, basic=False) == e
def test_loggamma():
raises(TypeError, lambda: loggamma(2, 3))
raises(ArgumentIndexError, lambda: loggamma(x).fdiff(2))
assert loggamma(-1) == oo
assert loggamma(-2) == oo
assert loggamma(0) == oo
assert loggamma(1) == 0
assert loggamma(2) == 0
assert loggamma(3) == log(2)
assert loggamma(4) == log(6)
n = Symbol("n", integer=True, positive=True)
assert loggamma(n) == log(gamma(n))
assert loggamma(-n) == oo
assert loggamma(n/2) == log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + S.Half))
from sympy import I
assert loggamma(oo) == oo
assert loggamma(-oo) == zoo
assert loggamma(I*oo) == zoo
assert loggamma(-I*oo) == zoo
assert loggamma(zoo) == zoo
assert loggamma(nan) == nan
L = loggamma(S(16)/3)
E = -5*log(3) + loggamma(S(1)/3) + log(4) + log(7) + log(10) + log(13)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(19/S(4))
E = -4*log(4) + loggamma(S(3)/4) + log(3) + log(7) + log(11) + log(15)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(S(23)/7)
E = -3*log(7) + log(2) + loggamma(S(2)/7) + log(9) + log(16)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(19/S(4)-7)
E = -log(9) - log(5) + loggamma(S(3)/4) + 3*log(4) - 3*I*pi
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(23/S(7)-6)
E = -log(19) - log(12) - log(5) + loggamma(S(2)/7) + 3*log(7) - 3*I*pi
assert expand_func(L).doit() == E
assert L.n() == E.n()
assert loggamma(x).diff(x) == polygamma(0, x)
s1 = loggamma(1/(x + sin(x)) + cos(x)).nseries(x, n=4)
s2 = (-log(2*x) - 1)/(2*x) - log(x/pi)/2 + (4 - log(2*x))*x/24 + O(x**2) + \
log(x)*x**2/2
assert (s1 - s2).expand(force=True).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x - S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert ((s1 - s2).expand(force=True)).removeO() == 0
assert loggamma(x).rewrite('intractable') == log(gamma(x))
s1 = loggamma(x).series(x)
assert s1 == -log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + \
pi**4*x**4/360 + x**5*polygamma(4, 1)/120 + O(x**6)
assert s1 == loggamma(x).rewrite('intractable').series(x)
assert conjugate(loggamma(x)) == loggamma(conjugate(x))
assert conjugate(loggamma(0)) == conjugate(loggamma(0))
assert conjugate(loggamma(1)) == loggamma(conjugate(1))
assert conjugate(loggamma(-oo)) == conjugate(loggamma(-oo))
assert loggamma(x).is_real is None
y, z = Symbol('y', real=True), Symbol('z', imaginary=True)
assert loggamma(y).is_real
assert loggamma(z).is_real is False
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x, n=N).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) == \
-log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) == \
x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=11) == \
2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
def test_issue_8657():
n = Symbol('n', negative=True, integer=True)
m = Symbol('m', integer=True)
o = Symbol('o', positive=True)
p = Symbol('p', negative=True, integer=False)
assert gamma(n).is_real is None
assert gamma(m).is_real is None
assert gamma(o).is_real is True
assert gamma(p).is_real is True
assert gamma(w).is_real is None
def test_issue_8524():
x = Symbol('x', positive=True)
y = Symbol('y', negative=True)
z = Symbol('z', positive=False)
p = Symbol('p', negative=False)
q = Symbol('q', integer=True)
r = Symbol('r', integer=False)
e = Symbol('e', even=True, negative=True)
assert gamma(x).is_positive is True
assert gamma(y).is_positive is None
assert gamma(z).is_positive is None
assert gamma(p).is_positive is None
assert gamma(q).is_positive is None
assert gamma(r).is_positive is None
assert gamma(e + S.Half).is_positive is True
assert gamma(e - S.Half).is_positive is False
| bsd-3-clause | 4,201,365,259,345,325,600 | 36.835294 | 94 | 0.574005 | false |
ajjl/ITK | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/typedef.py | 12 | 1454 | # Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines class that describes C++ typedef declaration
"""
from . import declaration
from . import dependencies
class typedef_t(declaration.declaration_t):
"""describes C++ typedef declaration"""
def __init__(self, name='', type=None):
"""creates class that describes C++ typedef"""
declaration.declaration_t.__init__(self, name)
self._type = type
def _get__cmp__items(self):
"""implementation details"""
return [self.type]
def __eq__(self, other):
if not declaration.declaration_t.__eq__(self, other):
return False
return self.type == other.type
def __hash__(self):
return super.__hash__(self)
@property
def type(self):
"""reference to the original :class:`type <type_t>`"""
return self._type
@type.setter
def type(self, type):
self._type = type
def i_depend_on_them(self, recursive=True):
return [dependencies.dependency_info_t(self, self.type)]
@property
def byte_size(self):
"Size of this type in bytes @type: int"
return self._type.byte_size
@property
def byte_align(self):
"alignment of this type in bytes @type: int"
return self._type.byte_align
| apache-2.0 | -7,225,133,819,483,987,000 | 25.436364 | 64 | 0.624484 | false |
december-soul/beets-plugin-importplaycount | importplaycount.py | 1 | 3144 | # coding=utf-8
# Copyright 2014, Rafael Bodill http://github.com/rafi
# vim: set ts=8 sw=4 tw=80 et :
import logging
import requests
import json
from beets.plugins import BeetsPlugin
from beets import ui
from beets import dbcore
from beets import config
from pprint import pprint
from beets.dbcore import types
log = logging.getLogger('beets')
api_url = 'http://ws.audioscrobbler.com/2.0/?method=track.getInfo&mbid=%s&api_key=%s&format=json'
api_url2 = 'http://ws.audioscrobbler.com/2.0/?method=track.getInfo&artist=%s&track=%s&api_key=%s&format=json'
class LastImportPlugin(BeetsPlugin):
def __init__(self):
super(LastImportPlugin, self).__init__()
config['lastfm'].add({
'user': '',
'api_key': '',
})
self.item_types = {
'lastfm_playcount': types.INTEGER,
'lastfm_listeners': types.INTEGER,
}
def commands(self):
cmd = ui.Subcommand('importplaycount',
help='import global last.fm play-count')
def func(lib, opts, args):
import_lastfm(self,lib,args)
cmd.func = func
return [cmd]
def import_lastfm(self, lib, args):
api_key = config['lastfm']['api_key']
if not api_key:
raise ui.UserError('You must specify an api_key for importplaycount')
log.info('Fetching last.fm play count')
for album in lib.albums():
for track in album.items():
count = int(track.get('lastfm_playcount', 0))
listeners = int(track.get('lastfm_listeners', 0))
if count == 0:
try:
page = fetch_track(track.mb_trackid, api_key)
if "track" not in page:
log.error(u'not found by mbid, try search by name')
page = fetch_track2(track.artist, track.title, api_key)
if "track" in page:
if "playcount" in page["track"]:
try:
new_count = int(page["track"]["playcount"].strip())
except ValueError:
new_count = 0
log.info(u'error convert playcount {0}'.format(page["track"]["playcount"]))
try:
new_listeners = int(page["track"]["listeners"].strip())
except ValueError:
new_listeners = 0
log.info(u'error convert listeners {0}'.format(page["track"]["listeners"]))
log.info(u'{0.artist} - {0.title}: \r\t\t\t\t\t\t\t\t change playcount from {1} to {2} \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t listeners from {3} to {4}'.format(track, count, new_count, listeners, new_listeners))
track['lastfm_playcount'] = new_count
track['lastfm_listeners'] = new_listeners
track.store()
else:
log.error(u'skip {0.artist} - {0.title} Track not found on lastfm, error'.format(track))
pprint(page)
else:
log.error(u'skip {0.artist} - {0.title} Track not found on lastfm'.format(track))
except ValueError:
log.error(u'error {0.artist} - {0.title} Track not found on lastfm'.format(track))
log.info('importplaycount: ... done!')
def fetch_track(mbid, api_key):
return requests.get(api_url % (mbid, api_key)).json()
def fetch_track2(artist, title, api_key):
return requests.get(api_url2 % (artist, title, api_key)).json()
| gpl-2.0 | -5,922,102,897,227,746,000 | 33.173913 | 208 | 0.626272 | false |
kindy61/mako | test/test_filters.py | 1 | 7765 | # -*- coding: utf-8 -*-
from mako.template import Template
import unittest
from util import result_lines, flatten_result
class FilterTest(unittest.TestCase):
def test_basic(self):
t = Template("""
${x | myfilter}
""")
assert flatten_result(t.render(x="this is x", myfilter=lambda t: "MYFILTER->%s<-MYFILTER" % t)) == "MYFILTER->this is x<-MYFILTER"
def test_expr(self):
"""test filters that are themselves expressions"""
t = Template("""
${x | myfilter(y)}
""")
def myfilter(y):
return lambda x: "MYFILTER->%s<-%s" % (x, y)
assert flatten_result(t.render(x="this is x", myfilter=myfilter, y="this is y")) == "MYFILTER->this is x<-this is y"
def test_convert_str(self):
"""test that string conversion happens in expressions before sending to filters"""
t = Template("""
${x | trim}
""")
assert flatten_result(t.render(x=5)) == "5"
def test_def(self):
t = Template("""
<%def name="foo()" filter="myfilter">
this is foo
</%def>
${foo()}
""")
assert flatten_result(t.render(x="this is x", myfilter=lambda t: "MYFILTER->%s<-MYFILTER" % t)) == "MYFILTER-> this is foo <-MYFILTER"
def test_import(self):
t = Template("""
<%!
from mako import filters
%>\
trim this string: ${" some string to trim " | filters.trim} continue\
""")
assert t.render().strip()=="trim this string: some string to trim continue"
def test_import_2(self):
t = Template("""
trim this string: ${" some string to trim " | filters.trim} continue\
""", imports=["from mako import filters"])
#print t.code
assert t.render().strip()=="trim this string: some string to trim continue"
def test_encode_filter(self):
t = Template("""# coding: utf-8
some stuff.... ${x}
""", default_filters=['decode.utf8'])
#print t.code
assert t.render_unicode(x="voix m’a réveillé").strip() == u"some stuff.... voix m’a réveillé"
def test_custom_default(self):
t = Template("""
<%!
def myfilter(x):
return "->" + x + "<-"
%>
hi ${'there'}
""", default_filters=['myfilter'])
assert t.render().strip()=="hi ->there<-"
def test_global(self):
t = Template("""
<%page expression_filter="h"/>
${"<tag>this is html</tag>"}
""")
assert t.render().strip() == "<tag>this is html</tag>"
def test_nflag(self):
t = Template("""
${"<tag>this is html</tag>" | n}
""", default_filters=['h', 'unicode'])
assert t.render().strip() == "<tag>this is html</tag>"
t = Template("""
<%page expression_filter="h"/>
${"<tag>this is html</tag>" | n}
""")
assert t.render().strip() == "<tag>this is html</tag>"
t = Template("""
<%page expression_filter="h"/>
${"<tag>this is html</tag>" | n, h}
""")
assert t.render().strip() == "<tag>this is html</tag>"
def testnonexpression(self):
t = Template("""
<%!
def a(text):
return "this is a"
def b(text):
return "this is b"
%>
${foo()}
<%def name="foo()" buffered="True">
this is text
</%def>
""", buffer_filters=['a'])
assert t.render().strip() == "this is a"
t = Template("""
<%!
def a(text):
return "this is a"
def b(text):
return "this is b"
%>
${'hi'}
${foo()}
<%def name="foo()" buffered="True">
this is text
</%def>
""", buffer_filters=['a'], default_filters=['b'])
assert flatten_result(t.render()) == "this is b this is b"
t = Template("""
<%!
class Foo(object):
foo = True
def __str__(self):
return "this is a"
def a(text):
return Foo()
def b(text):
if hasattr(text, 'foo'):
return str(text)
else:
return "this is b"
%>
${'hi'}
${foo()}
<%def name="foo()" buffered="True">
this is text
</%def>
""", buffer_filters=['a'], default_filters=['b'])
assert flatten_result(t.render()) == "this is b this is a"
t = Template("""
<%!
def a(text):
return "this is a"
def b(text):
return "this is b"
%>
${foo()}
${bar()}
<%def name="foo()" filter="b">
this is text
</%def>
<%def name="bar()" filter="b" buffered="True">
this is text
</%def>
""", buffer_filters=['a'])
assert flatten_result(t.render()) == "this is b this is a"
def test_builtins(self):
t = Template("""
${"this is <text>" | h}
""")
assert flatten_result(t.render()) == "this is <text>"
t = Template("""
http://foo.com/arg1=${"hi! this is a string." | u}
""")
assert flatten_result(t.render()) == "http://foo.com/arg1=hi%21+this+is+a+string."
class BufferTest(unittest.TestCase):
def test_buffered_def(self):
t = Template("""
<%def name="foo()" buffered="True">
this is foo
</%def>
${"hi->" + foo() + "<-hi"}
""")
assert flatten_result(t.render()) == "hi-> this is foo <-hi"
def test_unbuffered_def(self):
t = Template("""
<%def name="foo()" buffered="False">
this is foo
</%def>
${"hi->" + foo() + "<-hi"}
""")
assert flatten_result(t.render()) == "this is foo hi-><-hi"
def test_capture(self):
t = Template("""
<%def name="foo()" buffered="False">
this is foo
</%def>
${"hi->" + capture(foo) + "<-hi"}
""")
assert flatten_result(t.render()) == "hi-> this is foo <-hi"
def test_capture_exception(self):
template = Template("""
<%def name="a()">
this is a
<%
raise TypeError("hi")
%>
</%def>
<%
c = capture(a)
%>
a->${c}<-a
""")
try:
template.render()
assert False
except TypeError:
assert True
def test_buffered_exception(self):
template = Template("""
<%def name="a()" buffered="True">
<%
raise TypeError("hi")
%>
</%def>
${a()}
""")
try:
print template.render()
assert False
except TypeError:
assert True
def test_capture_ccall(self):
t = Template("""
<%def name="foo()">
<%
x = capture(caller.body)
%>
this is foo. body: ${x}
</%def>
<%call expr="foo()">
ccall body
</%call>
""")
#print t.render()
assert flatten_result(t.render()) == "this is foo. body: ccall body"
| mit | -7,199,949,501,323,523,000 | 28.161654 | 142 | 0.437282 | false |
minorua/QGIS | python/plugins/processing/core/ProcessingResults.py | 15 | 1581 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingResults.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from qgis.PyQt.QtCore import QObject, pyqtSignal
class ProcessingResults(QObject):
resultAdded = pyqtSignal()
results = []
def addResult(self, icon, name, timestamp, result):
self.results.append(Result(icon, name, timestamp, result))
self.resultAdded.emit()
def getResults(self):
return self.results
class Result:
def __init__(self, icon, name, timestamp, filename):
self.icon = icon
self.name = name
self.timestamp = timestamp
self.filename = filename
resultsList = ProcessingResults()
| gpl-2.0 | 8,691,816,453,463,786,000 | 30.62 | 75 | 0.455408 | false |
engdan77/edoAutoHomeMobile | twisted/protocols/htb.py | 51 | 9330 | # -*- test-case-name: twisted.test.test_htb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Hierarchical Token Bucket traffic shaping.
Patterned after U{Martin Devera's Hierarchical Token Bucket traffic
shaper for the Linux kernel<http://luxik.cdi.cz/~devik/qos/htb/>}.
@seealso: U{HTB Linux queuing discipline manual - user guide
<http://luxik.cdi.cz/~devik/qos/htb/manual/userg.htm>}
@seealso: U{Token Bucket Filter in Linux Advanced Routing & Traffic Control
HOWTO<http://lartc.org/howto/lartc.qdisc.classless.html#AEN682>}
"""
# TODO: Investigate whether we should be using os.times()[-1] instead of
# time.time. time.time, it has been pointed out, can go backwards. Is
# the same true of os.times?
from time import time
from zope.interface import implements, Interface
from twisted.protocols import pcp
class Bucket:
"""
Implementation of a Token bucket.
A bucket can hold a certain number of tokens and it drains over time.
@cvar maxburst: The maximum number of tokens that the bucket can
hold at any given time. If this is C{None}, the bucket has
an infinite size.
@type maxburst: C{int}
@cvar rate: The rate at which the bucket drains, in number
of tokens per second. If the rate is C{None}, the bucket
drains instantaneously.
@type rate: C{int}
"""
maxburst = None
rate = None
_refcount = 0
def __init__(self, parentBucket=None):
"""
Create a L{Bucket} that may have a parent L{Bucket}.
@param parentBucket: If a parent Bucket is specified,
all L{add} and L{drip} operations on this L{Bucket}
will be applied on the parent L{Bucket} as well.
@type parentBucket: L{Bucket}
"""
self.content = 0
self.parentBucket = parentBucket
self.lastDrip = time()
def add(self, amount):
"""
Adds tokens to the L{Bucket} and its C{parentBucket}.
This will add as many of the C{amount} tokens as will fit into both
this L{Bucket} and its C{parentBucket}.
@param amount: The number of tokens to try to add.
@type amount: C{int}
@returns: The number of tokens that actually fit.
@returntype: C{int}
"""
self.drip()
if self.maxburst is None:
allowable = amount
else:
allowable = min(amount, self.maxburst - self.content)
if self.parentBucket is not None:
allowable = self.parentBucket.add(allowable)
self.content += allowable
return allowable
def drip(self):
"""
Let some of the bucket drain.
The L{Bucket} drains at the rate specified by the class
variable C{rate}.
@returns: C{True} if the bucket is empty after this drip.
@returntype: C{bool}
"""
if self.parentBucket is not None:
self.parentBucket.drip()
if self.rate is None:
self.content = 0
else:
now = time()
deltaTime = now - self.lastDrip
deltaTokens = deltaTime * self.rate
self.content = max(0, self.content - deltaTokens)
self.lastDrip = now
return self.content == 0
class IBucketFilter(Interface):
def getBucketFor(*somethings, **some_kw):
"""
Return a L{Bucket} corresponding to the provided parameters.
@returntype: L{Bucket}
"""
class HierarchicalBucketFilter:
"""
Filter things into buckets that can be nested.
@cvar bucketFactory: Class of buckets to make.
@type bucketFactory: L{Bucket}
@cvar sweepInterval: Seconds between sweeping out the bucket cache.
@type sweepInterval: C{int}
"""
implements(IBucketFilter)
bucketFactory = Bucket
sweepInterval = None
def __init__(self, parentFilter=None):
self.buckets = {}
self.parentFilter = parentFilter
self.lastSweep = time()
def getBucketFor(self, *a, **kw):
"""
Find or create a L{Bucket} corresponding to the provided parameters.
Any parameters are passed on to L{getBucketKey}, from them it
decides which bucket you get.
@returntype: L{Bucket}
"""
if ((self.sweepInterval is not None)
and ((time() - self.lastSweep) > self.sweepInterval)):
self.sweep()
if self.parentFilter:
parentBucket = self.parentFilter.getBucketFor(self, *a, **kw)
else:
parentBucket = None
key = self.getBucketKey(*a, **kw)
bucket = self.buckets.get(key)
if bucket is None:
bucket = self.bucketFactory(parentBucket)
self.buckets[key] = bucket
return bucket
def getBucketKey(self, *a, **kw):
"""
Construct a key based on the input parameters to choose a L{Bucket}.
The default implementation returns the same key for all
arguments. Override this method to provide L{Bucket} selection.
@returns: Something to be used as a key in the bucket cache.
"""
return None
def sweep(self):
"""
Remove empty buckets.
"""
for key, bucket in self.buckets.items():
bucket_is_empty = bucket.drip()
if (bucket._refcount == 0) and bucket_is_empty:
del self.buckets[key]
self.lastSweep = time()
class FilterByHost(HierarchicalBucketFilter):
"""
A Hierarchical Bucket filter with a L{Bucket} for each host.
"""
sweepInterval = 60 * 20
def getBucketKey(self, transport):
return transport.getPeer()[1]
class FilterByServer(HierarchicalBucketFilter):
"""
A Hierarchical Bucket filter with a L{Bucket} for each service.
"""
sweepInterval = None
def getBucketKey(self, transport):
return transport.getHost()[2]
class ShapedConsumer(pcp.ProducerConsumerProxy):
"""
Wraps a C{Consumer} and shapes the rate at which it receives data.
"""
# Providing a Pull interface means I don't have to try to schedule
# traffic with callLaters.
iAmStreaming = False
def __init__(self, consumer, bucket):
pcp.ProducerConsumerProxy.__init__(self, consumer)
self.bucket = bucket
self.bucket._refcount += 1
def _writeSomeData(self, data):
# In practice, this actually results in obscene amounts of
# overhead, as a result of generating lots and lots of packets
# with twelve-byte payloads. We may need to do a version of
# this with scheduled writes after all.
amount = self.bucket.add(len(data))
return pcp.ProducerConsumerProxy._writeSomeData(self, data[:amount])
def stopProducing(self):
pcp.ProducerConsumerProxy.stopProducing(self)
self.bucket._refcount -= 1
class ShapedTransport(ShapedConsumer):
"""
Wraps a C{Transport} and shapes the rate at which it receives data.
This is a L{ShapedConsumer} with a little bit of magic to provide for
the case where the consumer it wraps is also a C{Transport} and people
will be attempting to access attributes this does not proxy as a
C{Consumer} (e.g. C{loseConnection}).
"""
# Ugh. We only wanted to filter IConsumer, not ITransport.
iAmStreaming = False
def __getattr__(self, name):
# Because people will be doing things like .getPeer and
# .loseConnection on me.
return getattr(self.consumer, name)
class ShapedProtocolFactory:
"""
Dispense C{Protocols} with traffic shaping on their transports.
Usage::
myserver = SomeFactory()
myserver.protocol = ShapedProtocolFactory(myserver.protocol,
bucketFilter)
Where C{SomeServerFactory} is a L{twisted.internet.protocol.Factory}, and
C{bucketFilter} is an instance of L{HierarchicalBucketFilter}.
"""
def __init__(self, protoClass, bucketFilter):
"""
Tell me what to wrap and where to get buckets.
@param protoClass: The class of C{Protocol} this will generate
wrapped instances of.
@type protoClass: L{Protocol<twisted.internet.interfaces.IProtocol>}
class
@param bucketFilter: The filter which will determine how
traffic is shaped.
@type bucketFilter: L{HierarchicalBucketFilter}.
"""
# More precisely, protoClass can be any callable that will return
# instances of something that implements IProtocol.
self.protocol = protoClass
self.bucketFilter = bucketFilter
def __call__(self, *a, **kw):
"""
Make a C{Protocol} instance with a shaped transport.
Any parameters will be passed on to the protocol's initializer.
@returns: A C{Protocol} instance with a L{ShapedTransport}.
"""
proto = self.protocol(*a, **kw)
origMakeConnection = proto.makeConnection
def makeConnection(transport):
bucket = self.bucketFilter.getBucketFor(transport)
shapedTransport = ShapedTransport(transport, bucket)
return origMakeConnection(shapedTransport)
proto.makeConnection = makeConnection
return proto
| mit | 8,967,728,040,087,093,000 | 30.414141 | 77 | 0.633762 | false |
acsone/Arelle | arelle/plugin/xbrlDB/XbrlSemanticJsonDB.py | 2 | 49520 | '''
XbrlSemanticJsonDB.py implements an JSON database interface for Arelle, based
on a concrete realization of the Abstract Model PWD 2.0 layer. This is a semantic
representation of XBRL information.
This module may save directly to a JSON Server (TBD) or to append to a file of JSON.
This module provides the execution context for saving a dts and instances in
XBRL JSON graph. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
Example dialog or command line parameters for operation:
host: the supporting host for JSON Server or "jsonFile" to append to a JSON file
port: the host port (80 is default) if a JSON Server
user, password: if needed for server
database: the top level path segment for the JSON Server or disk file path if jsonFile
timeout:
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to do:
1) add AMTF cube regions (dimensions)
HF - don't believe this is either feasible or has a use case in a graph model
2) check existence of (shared) documents and contained elements before adding
3) tuple structure declaration (particles in elements of data dictionary?)
4) tuple structure (instance facts)
5) add footnote resources to relationships (and test with EDInet footnote references)
6) test some filings with text blocks (shred them?) (30mB - 50mB sized text blocks?)
7) add mappings to, or any missing relationships, of Charlie's financial model
'''
import os, io, time, json, socket, logging, zlib, datetime
from arelle.ModelDtsObject import ModelConcept, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelDocument import Type
from arelle import XbrlConst, XmlUtil, UrlUtil
import urllib.request
from urllib.error import HTTPError, URLError
from lxml import etree
from decimal import Decimal
import datetime
TRACEJSONFILE = None
#TRACEJSONFILE = r"c:\temp\jsonDBtrace.log" # uncomment to trace JSON on connection (very big file!!!)
JSONFILE_HOSTNAME = "jsonFile"
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, rssItem=None, **kwargs):
jsondb = None
try:
jsondb = XbrlSemanticJsonDatabaseConnection(modelXbrl, user, password, host, port, database, timeout)
jsondb.insertXbrl(rssItem=rssItem)
jsondb.close()
except Exception as ex:
if jsondb is not None:
try:
jsondb.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, db, timeout=10):
if host == JSONFILE_HOSTNAME:
return True
# determine if postgres port
t = 2
while t < timeout:
try:
conn = urllib.request.urlopen("http://{0}:{1}/{2}/status".format(host, port or '80', db))
return True # success but doesn't need password
except HTTPError:
return False # success, this is really a postgres socket, wants user name
except URLError:
return False # something is there but not postgres
except socket.timeout:
t = t + 2 # relax - try again with longer timeout
return False
# top level JSON Graph object keynames
FILINGS = "filings"
DOCUMENTS = "documents"
def modelObjectDocumentUri(modelObject):
return UrlUtil.ensureUrl(modelObject.modelDocument.uri)
def modelObjectUri(modelObject):
return '#'.join((modelObjectDocumentUri(modelObject),
XmlUtil.elementFragmentIdentifier(modelObject)))
def qnameUri(qname, sep='#'):
return sep.join((qname.namespaceURI, qname.localName))
def qnamePrefix_Name(qname, sep=':'):
# substitutte standard prefixes for commonly-defaulted xmlns namespaces
prefix = {XbrlConst.xsd: 'xsd',
XbrlConst.xml: 'xml',
XbrlConst.xbrli: 'xbrli',
XbrlConst.link: 'link',
XbrlConst.gen: 'gen',
XbrlConst.xlink: 'xlink'
}.get(qname.namespaceURI, qname.prefix)
return sep.join((prefix, qname.localName))
def modelObjectQnameUri(modelObject, sep='#'):
return qnameUri(modelObject.qname, sep)
def modelObjectNameUri(modelObject, sep='#'):
return '#'.join((modelObjectDocumentUri(modelObject),
modelObject.name)) # for schema definitions with name attribute
class XJDBException(Exception):
def __init__(self, code, message, **kwargs ):
self.code = code
self.message = message
self.kwargs = kwargs
self.args = ( self.__repr__(), )
def __repr__(self):
return _('[{0}] exception: {1}').format(self.code, self.message % self.kwargs)
def jsonDefaultEncoder(obj):
if isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, (datetime.date, datetime.datetime)):
return XmlUtil.dateunionValue(obj)
raise TypeError("Type {} is not supported for json output".format(type(obj).__name__))
class XbrlSemanticJsonDatabaseConnection():
def __init__(self, modelXbrl, user, password, host, port, database, timeout):
self.modelXbrl = modelXbrl
self.disclosureSystem = modelXbrl.modelManager.disclosureSystem
#self.conn = RexProConnection(host, int(port or '8182'), (database or 'emptygraph'),
# user=user, password=password)
self.isJsonFile = host == JSONFILE_HOSTNAME
if self.isJsonFile:
self.jsonFile = database
else:
connectionUrl = "http://{0}:{1}".format(host, port or '80')
self.url = connectionUrl + '/' + database
# Create an OpenerDirector with support for Basic HTTP Authentication...
auth_handler = urllib.request.HTTPBasicAuthHandler()
if user:
auth_handler.add_password(realm=None,
uri=connectionUrl,
user=user,
passwd=password)
self.conn = urllib.request.build_opener(auth_handler)
self.timeout = timeout or 60
self.verticePropTypes = {}
def close(self, rollback=False):
try:
if not self.isJsonFile:
self.conn.close()
self.__dict__.clear() # dereference everything
except Exception as ex:
self.__dict__.clear() # dereference everything
raise
@property
def isClosed(self):
return not bool(self.__dict__) # closed when dict is empty
def showStatus(self, msg, clearAfter=None):
self.modelXbrl.modelManager.showStatus(msg, clearAfter)
def execute(self, activity, graph=None, query=None):
if graph is not None:
headers = {'User-agent': 'Arelle/1.0',
'Accept': 'application/json',
'Content-Type': "text/json; charset='UTF-8'"}
data = _STR_UNICODE(json.dumps(graph,
sort_keys=True, # allow comparability of json files
ensure_ascii=False,
indent=2,
default=jsonDefaultEncoder)) # might not be unicode in 2.7
elif query is not None:
headers = {'User-agent': 'Arelle/1.0',
'Accept': 'application/json'}
data = ("query=" + query)
else:
return None
# turtle may be mixture of line strings and strings with \n-separated lines
if TRACEJSONFILE:
with io.open(TRACEJSONFILE, 'at', encoding='utf-8') as fh:
fh.write("\n\n>>> sent: \n")
fh.write(data)
if self.isJsonFile and data is not None:
with io.open(self.jsonFile, 'at', encoding='utf-8') as fh:
fh.write(data)
return None
if graph is not None or query is not None:
url = self.url + "/json"
request = urllib.request.Request(url,
data=data.encode('utf-8'),
headers=headers)
try:
with self.conn.open(request, timeout=self.timeout) as fp:
results = fp.read().decode('utf-8')
try:
results = json.loads(results)
except ValueError:
pass # leave results as string
except HTTPError as err:
results = err.fp.read().decode('utf-8')
if TRACEJSONFILE:
with io.open(TRACEJSONFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> received: \n{0}".format(str(results)))
if isinstance(results, str) and query is not None:
parser = etree.HTMLParser()
htmlDoc = etree.parse(io.StringIO(results), parser)
body = htmlDoc.find("//body")
if body is not None:
error = "".join(text for text in body.itertext())
else:
error = results
raise XJDBException("jsonDB:DatabaseError",
_("%(activity)s not successful: %(error)s"),
activity=activity, error=error)
return results
def commit(self, graph):
self.execute("Saving RDF Graph", graph=graph)
def loadGraphRootVertices(self):
self.showStatus("Load/Create graph root vertices")
pass
def getDBsize(self):
self.showStatus("Get database size")
return 0
def insertXbrl(self, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
#initialVcount, initialEcount = self.getDBsize() # don't include in timing, very slow
startedAt = time.time()
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
g = {FILINGS:{},
DOCUMENTS:{}}
self.insertSchema(g)
# self.load() this done in the verify step
self.insertFiling(rssItem,g)
self.insertDocuments(g)
self.insertDataDictionary() # XML namespaces types aspects
#self.insertRelationshipTypeSets()
#self.insertResourceRoleSets()
#self.insertAspectValues()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: data points insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationshipSets()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: Relationships insertion"), time.time() - startedAt)
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: Validation results insertion"), time.time() - startedAt)
#startedAt = time.time()
#self.insertValidCombinations()
#self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: Valid Combinations insertion"), time.time() - startedAt)
self.showStatus("Committing entries")
self.commit(g)
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: insertion committed"), time.time() - startedAt)
#finalVcount, finalEcount = self.getDBsize()
#self.modelXbrl.modelManager.addToLog("added vertices: {0}, edges: {1}, total vertices: {2}, edges: {3}".format(
# finalVcount - initialVcount, finalEcount - initialEcount, finalVcount, finalEcount))
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def insertSchema(self, g):
if True: # if schema not defined
self.showStatus("insert schema")
# Filings schema
# Aspect schema
# Relationships schema
# DataPoints schema
def insertFiling(self, rssItem, g):
self.showStatus("insert filing")
# accession graph -> document vertices
new_filing = {'documents': []}
if self.modelXbrl.modelDocument.creationSoftwareComment:
new_filing['creation_software'] = self.modelXbrl.modelDocument.creationSoftwareComment
datetimeNow = datetime.datetime.now()
datetimeNowStr = XmlUtil.dateunionValue(datetimeNow)
entryUri = modelObjectDocumentUri(self.modelXbrl)
if rssItem is not None: # sec accession
# set self.
new_filing['filingType'] = "SEC filing"
# for an RSS Feed entry from SEC, use rss item's accession information
new_filing['filingNumber'] = filingNumber = rssItem.accessionNumber
new_filing['acceptedTimestamp'] = XmlUtil.dateunionValue(rssItem.acceptanceDatetime)
new_filing['filingDate'] = XmlUtil.dateunionValue(rssItem.filingDate)
new_filing['entityId'] = rssItem.cikNumber
new_filing['entityName'] = rssItem.companyName
new_filing['SICCode'] = rssItem.assignedSic
new_filing['SECHtmlUrl'] = rssItem.htmlUrl
new_filing['entryUrl'] = rssItem.url
self.filingURI = rssItem.htmlUrl
else:
# not an RSS Feed item, make up our own accession ID (the time in seconds of epoch)
intNow = int(time.time())
new_filing['filingNumber'] = filingNumber = str(intNow)
self.filingId = int(time.time()) # only available if entered from an SEC filing
new_filing['filingType'] = "independent filing"
new_filing['acceptedTimestamp'] = datetimeNowStr
new_filing['filingDate'] = datetimeNowStr
new_filing['entryUrl'] = UrlUtil.ensureUrl(self.modelXbrl.fileSource.url)
self.filingURI = filingNumber
g[FILINGS][self.filingURI] = new_filing
self.filing = new_filing
# for now only one report per filing (but SEC may have multiple in future, such as form SD)
self.reportURI = modelObjectDocumentUri(self.modelXbrl)
self.report = {'filing': self.filingURI,
'aspectProxies': {},
'relationshipSets': {},
'dataPoints': {},
'messages': {}}
new_filing['reports'] = {self.reportURI: self.report}
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
def identifyPreexistingDocuments(self):
self.existingDocumentUris = set()
if not self.isJsonFile:
docFilters = []
for modelDocument in self.modelXbrl.urlDocs.values():
if modelDocument.type == Type.SCHEMA:
docFilters.append('STR(?doc) = "{}"'.format(UrlUtil.ensureUrl(modelDocument.uri)))
results = self.execute(
# TBD: fix up for Mongo DB query
"select",
query="""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX DTS: <http://xbrl.org/2013/rdf/DTS/>
SELECT distinct ?doc WHERE { ?doc rdf:type DTS:Document
FILTER( """ + '\n|| '.join(docFilters) + ") .}")
try:
for result in results['results']['bindings']:
doc = result['doc']
if doc.get('type') == 'uri':
self.existingDocumentUris.add(doc['value'])
except KeyError:
pass # no existingDocumentUris
def insertDocuments(self,g):
# accession->documents
#
self.showStatus("insert documents")
documents = self.documents = g[DOCUMENTS]
for modelDocument in self.modelXbrl.urlDocs.values():
docUri = modelObjectDocumentUri(modelDocument)
if docUri not in self.existingDocumentUris:
documents[docUri] = {
'url': docUri,
'documentType': Type.typeName[modelDocument.type],
'references': [modelObjectDocumentUri(doc)
for doc, ref in modelDocument.referencesDocument.items()
if doc.inDTS and ref.referenceTypes & {"href", "import", "include"}],
'resources': {}
}
self.filing['documents'].append(docUri)
if modelDocument.uri == self.modelXbrl.modelDocument.uri: # entry document
self.report['entryPoint'] = docUri
def conceptsUsed(self):
conceptsUsed = set(f.qname for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
conceptsUsed.add(dim.dimensionQname)
if dim.isExplicit:
conceptsUsed.add(dim.memberQname)
else:
conceptsUsed.add(dim.typedMember.qname)
for defaultDim, defaultDimMember in self.modelXbrl.qnameDimensionDefaults.items():
conceptsUsed.add(defaultDim)
conceptsUsed.add(defaultDimMember)
for roleTypes in (self.modelXbrl.roleTypes, self.modelXbrl.arcroleTypes):
for modelRoleTypes in roleTypes.values():
for modelRoleType in modelRoleTypes:
for qn in modelRoleType.usedOns:
conceptsUsed.add(qn)
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
conceptsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
conceptsUsed.add(rel.toModelObject)
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
conceptsUsed.add(qn)
conceptsUsed -= {None} # remove None if in conceptsUsed
return conceptsUsed
def insertDataDictionary(self):
# separate graph
# document-> dataTypeSet -> dataType
# do all schema dataTypeSet vertices
self.type_id = {}
self.aspect_id = {}
self.aspect_proxy = {}
self.aspect_proxy_uri = {}
self.roleType_id = {}
self.arcroleType_id = {}
'''
if any((not self.document_isNew[modelDocument.uri])
for modelDocument in self.modelXbrl.urlDocs.values()):
conceptsUsed = self.conceptsUsed()
'''
conceptsUsed = self.conceptsUsed()
for modelDocument in self.modelXbrl.urlDocs.values():
self.showStatus("insert DataDictionary " + modelDocument.basename)
docUri = modelObjectDocumentUri(modelDocument)
document = self.documents[docUri]
# don't re-output existing documents
if modelDocument.type == Type.SCHEMA:
isNewDocument = True # self.document_isNew[modelDocument.uri]
modelConcepts = [modelConcept
for modelConcept in self.modelXbrl.qnameConcepts.values()
if modelConcept.modelDocument is modelDocument and
(isNewDocument or modelConcept in conceptsUsed)]
if docUri not in self.existingDocumentUris:
# adding document as new
document['dataTypes'] = dataTypes = {}
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument is modelDocument:
dataTypes[modelType.name] = dataType = {
'dataType': modelObjectNameUri(modelType),
'document': modelObjectDocumentUri(modelType),
'url': modelObjectUri(modelType),
'namespaceURI': modelType.qname.namespaceURI,
'localName': modelType.name,
}
xbrliBaseType = modelType.baseXbrliTypeQname
if not isinstance(xbrliBaseType, (tuple,list)):
xbrliBaseType = (xbrliBaseType,)
for baseType in xbrliBaseType:
if baseType is not None:
dataType['baseType'] = qnameUri(baseType)
if baseType.namespaceURI == "http://www.w3.org/2001/XMLSchema":
dataType['baseXsdType'] = qnameUri(baseType)
typeDerivedFrom = modelType.typeDerivedFrom
if not isinstance(typeDerivedFrom, (tuple,list)): # list if a union
typeDerivedFrom = (typeDerivedFrom,)
for dt in typeDerivedFrom:
if dt is not None:
dataType['derivedFrom'] = modelObjectNameUri(dt)
for prop in ('isTextBlock', 'isDomainItemType'):
propertyValue = getattr(modelType, prop, None)
if propertyValue:
dataType[prop] = propertyValue
document['aspects'] = aspects = {}
for modelConcept in modelConcepts:
aspects[modelConcept.name] = aspect = {
'document': modelObjectDocumentUri(modelConcept),
'url': modelObjectUri(modelConcept),
'namespaceURI': modelConcept.qname.namespaceURI,
'localName': modelConcept.name,
'isAbstract': modelConcept.isAbstract
}
if modelConcept.periodType:
aspect['periodType'] = modelConcept.periodType
if modelConcept.balance:
aspect['balance'] = modelConcept.balance
for prop in ('isItem', 'isTuple', 'isLinkPart',
'isNumeric', 'isMonetary', 'isExplicitDimension',
'isDimensionItem', 'isPrimaryItem',
'isTypedDimension', 'isDomainMember', 'isHypercubeItem',
'isShares', 'isTextBlock', 'isNillable'):
propertyValue = getattr(modelConcept, prop, None)
if propertyValue:
aspect[prop] = propertyValue
conceptType = modelConcept.type
if conceptType is not None:
aspect['dataType'] = modelObjectNameUri(conceptType)
substitutionGroup = modelConcept.substitutionGroup
if substitutionGroup is not None:
aspect['substitutionGroup'] = modelObjectNameUri(substitutionGroup)
document['roleTypes'] = roleTypes = {}
for modelRoleTypes in self.modelXbrl.roleTypes.values():
for modelRoleType in modelRoleTypes:
roleTypes[modelRoleType.roleURI] = roleType = {
'document': modelObjectDocumentUri(modelRoleType),
'url': modelObjectUri(modelRoleType),
'roleURI': modelRoleType.roleURI,
'definition': modelRoleType.definition,
'usedOn': [modelObjectUri(self.modelXbrl.qnameConcepts[qn])
for qn in modelRoleType.usedOns]
}
document['arcroleTypes'] = arcroleTypes = {}
for modelArcroleTypes in self.modelXbrl.arcroleTypes.values():
for modelArcroleType in modelArcroleTypes:
arcroleTypes[modelRoleType.roleURI] = arcroleType = {
'document': modelObjectDocumentUri(modelArcroleType),
'url': modelObjectUri(modelArcroleType),
'arcroleURI': modelArcroleType.roleURI,
'definition': modelArcroleType.definition,
'usedOn': [modelObjectUri(self.modelXbrl.qnameConcepts[qn])
for qn in modelArcroleType.usedOns],
'cyclesAllowed': modelArcroleType.cyclesAllowed
}
activity = "Insert data dictionary types, aspects, roles, and arcroles for " + modelDocument.uri
'''
def insertValidCombinations(self):
# document-> validCombinationsSet-> cubes
self.showStatus("insert ValidCombinations")
drsELRs = set(ELR
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.values()
if arcrole == XbrlConst.all)
hasHcRels = self.modelXbrl.relationshipSet(XbrlConst.all).modelRelationships
hcConcepts = set(hasHcRel.toModelObject for hasHcRel in hasHcRels)
# one cube region per head pri item with multiple cube regions
for hcConcept in hcConcepts:
# include any other concepts in this pri item with clean inheritance
for drsELR in drsELRs:
# each ELR is another cube region
for allRel in val.modelXbrl.relationshipSet(XbrlConst.all, ELR)
drsPriItems(val, fromELR, fromPriItem
... this becomes an unweildly large model, don't see a use case for compiling it out
'''
def insertAspectProxies(self, qnames):
aspectQnames = [qname
for qname in qnames
if qname not in self.aspect_proxy_uri and qname in self.modelXbrl.qnameConcepts]
for qname in aspectQnames:
self.insertAspectProxy(qname, qnamePrefix_Name(qname))
def insertAspectProxy(self, aspectQName, aspectProxyUri):
concept = self.modelXbrl.qnameConcepts[aspectQName]
self.report['aspectProxies'][aspectProxyUri] = aspectProxy = {
'report': self.reportURI,
'document': modelObjectDocumentUri(concept),
'name': concept.name
}
self.aspect_proxy[aspectQName] = aspectProxy
self.aspect_proxy_uri[aspectQName] = aspectProxyUri
return aspectProxy
def aspectQnameProxy(self, qname):
if hasattr(qname, "modelDocument"):
return self.aspect_proxy.get(qname.qname)
elif qname in self.modelXbrl.qnameConcepts:
return self.aspect_proxy.get(qname)
return None
def aspectQnameProxyId(self, qname):
if hasattr(qname, "modelDocument"):
return self.aspect_proxy_uri.get(qname.qname)
elif qname in self.modelXbrl.qnameConcepts:
return self.aspect_proxy_uri.get(qname)
return None
def insertDataPoints(self):
# separate graph
# document-> dataTypeSet -> dataType
self.showStatus("insert DataPoints")
# note these initial aspects Qnames used also must be in conceptsUsed above
dimensions = [] # index by hash of dimension
dimensionIds = {} # index for dimension
if self.modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL):
contextAspectValueSelections = {} # contexts processed already
unitIDs = set() # units processed already
periodProxies = {}
entityIdentifierAspectProxies = {}
dataPoints = self.report['dataPoints']
for fact in self.modelXbrl.factsInInstance:
self.insertAspectProxies( (fact.qname,) )
factId = XmlUtil.elementFragmentIdentifier(fact)
dataPoints[factId] = dataPoint = {
'document': modelObjectDocumentUri(fact),
'id': factId,
'sourceLine': fact.sourceline,
'dataPointUrl': modelObjectUri(fact),
'baseItem': self.aspectQnameProxyId(fact.qname)
}
context = fact.context
concept = fact.concept
if context is not None:
if context.entityIdentifier not in entityIdentifierAspectProxies:
entityScheme, entityIdentifier = context.entityIdentifier
entityIdentifierAspectProxy = "{}/{}".format(
qnamePrefix_Name(XbrlConst.qnXbrliIdentifier),
entityIdentifier)
e = self.insertAspectProxy(XbrlConst.qnXbrliIdentifier, entityIdentifierAspectProxy)
e['scheme'] = entityScheme
e['identifier'] = entityIdentifier
entityIdentifierAspectProxies[context.entityIdentifier] = entityIdentifierAspectProxy
else:
entityIdentifierAspectProxy = entityIdentifierAspectProxies[context.entityIdentifier]
dataPoint['entityIdentifier'] = entityIdentifierAspectProxy
if context.isForeverPeriod:
period = "forever"
if context.isInstantPeriod:
endDate = XmlUtil.dateunionValue(context.instantDatetime, subtractOneDay=True).replace(':','_')
period = "instant/{}".format(endDate)
else:
startDate = XmlUtil.dateunionValue(context.startDatetime).replace(':','_')
endDate = XmlUtil.dateunionValue(context.endDatetime, subtractOneDay=True).replace(':','_')
period = "duration/{}/{}".format(startDate, endDate)
if period not in periodProxies:
periodProxy = "{}/{}".format(
qnamePrefix_Name(XbrlConst.qnXbrliPeriod),
period)
p = self.insertAspectProxy(XbrlConst.qnXbrliPeriod, periodProxy)
p['isForever'] = context.isForeverPeriod
p['isInstant'] = context.isInstantPeriod
if context.isStartEndPeriod:
d = context.startDatetime
if d.hour == 0 and d.minute == 0 and d.second == 0:
d = d.date()
p['startDate'] = d
if context.isStartEndPeriod or context.isInstantPeriod:
d = context.endDatetime
if d.hour == 0 and d.minute == 0 and d.second == 0:
d = (d - datetime.timedelta(1)).date()
p['endDate'] = d
periodProxies[period] = periodProxy
else:
periodProxy = periodProxies[period]
dataPoint['period'] = periodProxy
dataPoint['contextUrl'] = modelObjectUri(context)
dataPoint['contextId'] = context.id
if context.id not in contextAspectValueSelections:
contextAspectValueSelections[context.id] = contextAspectValueSelection = []
for dimVal in context.qnameDims.values():
dim = qnamePrefix_Name(dimVal.dimensionQname)
if dimVal.isExplicit:
self.insertAspectProxies( (dimVal.memberQname,) ) # need imediate use of proxy
v = self.aspectQnameProxyId(dimVal.memberQname)
else:
v = dimVal.typedMember.stringValue
dimProxy = "{}/{}".format(dim, v)
d = self.insertAspectProxy(dimVal.dimensionQname, dimProxy)
contextAspectValueSelection.append(dimProxy)
d['aspect'] = dim
if dimVal.isExplicit:
d['aspectValue'] = v
else:
d['typedValue'] = v
else:
contextAspectValueSelection = contextAspectValueSelections[context.id]
dataPoint['aspectValueSelections'] = contextAspectValueSelection
if fact.isNumeric:
if fact.precision == "INF":
dataPoint['precision'] = "INF"
elif fact.precision is not None:
dataPoint['precision'] = fact.precision
if fact.decimals == "INF":
dataPoint['decimals'] = "INF"
elif fact.decimals is not None:
dataPoint['decimals'] = fact.decimals
if fact.unit is not None:
unit = fact.unit
unitProxy = "{}/{}".format(
qnamePrefix_Name(XbrlConst.qnXbrliUnit),
unit.id)
dataPoint['unit'] = unitProxy
if unit.id not in unitIDs:
unitIDs.add(unit.id)
u = self.insertAspectProxy(XbrlConst.qnXbrliUnit, unitProxy)
u['unitId'] = unit.id
mults, divs = unit.measures
u['multiplyMeasures'] = [qnameUri(qn) for qn in mults]
if divs:
u['divideMeasures'] = [qnameUri(qn) for qn in divs]
if fact.xmlLang is None and fact.concept is not None and fact.concept.baseXsdType is not None:
dataPoint['value'] = fact.xValue
# The insert with base XSD type but no language
elif fact.xmlLang is not None:
# assuming string type with language
dataPoint['language'] = fact.xmlLang
dataPoint['value'] = fact.value
else:
# Otherwise insert as plain liternal with no language or datatype
dataPoint['value'] = fact.value
if fact.modelTupleFacts:
dataPoint['tuple'] = [XmlUtil.elementFragmentIdentifier(tupleFact)
for tupleFact in fact.modelTupleFacts]
def resourceId(self,i):
return "<{accessionPrefix}resource/{i}>".format(accessionPrefix=self.thisAccessionPrefix,
i=i)
def insertRelationshipSets(self):
self.showStatus("insert relationship sets")
aspectQnamesUsed = set()
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, linkrole, linkqname, arcqname = relationshipSetKey
if linkqname:
aspectQnamesUsed.add(linkqname)
if arcqname:
aspectQnamesUsed.add(arcqname)
self.insertAspectProxies(aspectQnamesUsed)
relationshipSets = self.report['relationshipSets']
relSetIds = {}
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, linkrole, linkqname, arcqname = relationshipSetKey
if arcrole not in ("XBRL-formulae", "Table-rendering", "XBRL-footnotes") and linkrole and linkqname and arcqname:
# skip paths and qnames for now (add later for non-SEC)
relSetId = "{}/{}".format(
os.path.basename(arcrole),
os.path.basename(linkrole))
relSetIds[relationshipSetKey] = relSetId
relationshipSets[relSetId] = relationshipSet = {
'arcrole': arcrole,
'linkrole': linkrole,
'arcname': self.aspectQnameProxyId(arcqname),
'linkname': self.aspectQnameProxyId(linkqname),
'report': self.reportURI,
'roots': [],
'relationships': []
}
# do tree walk to build relationships with depth annotated, no targetRole navigation
relE = [] # fromV, toV, label
resources = set()
aspectQnamesUsed = set()
resourceIDs = {} # index by object
def walkTree(rels, parentRelId, seq, depth, relationshipSetKey, relationshipSet, visited, relSetId, doVertices):
for rel in rels:
if rel not in visited:
visited.add(rel)
if not doVertices:
_relProp = {'seq': seq,
'depth': depth,
'order': rel.orderDecimal,
'priority': rel.priority,
'relSetId': relSetId
}
if isinstance(rel.fromModelObject, ModelConcept):
if doVertices:
aspectQnamesUsed.add(rel.fromModelObject.qname)
sourceUri = True
else:
sourceQname = rel.fromModelObject.qname
sourceUri = self.aspectQnameProxyId(sourceQname)
sourceId = qnamePrefix_Name(rel.fromModelObject.qname)
else:
sourceUri = None # tbd
toModelObject = rel.toModelObject
if isinstance(toModelObject, ModelConcept):
if doVertices:
aspectQnamesUsed.add(toModelObject.qname)
targetUri = True
else:
targetUri = self.aspectQnameProxyId(toModelObject.qname)
targetId = qnamePrefix_Name(toModelObject.qname)
elif isinstance(toModelObject, ModelResource):
if doVertices:
resources.add(toModelObject)
targetUri = 0 # just can't be None, but doesn't matter on doVertices pass
else:
if rel.preferredLabel:
_relProp['preferredLabel'] = rel.preferredLabel
if rel.arcrole in (XbrlConst.all, XbrlConst.notAll):
_relProp['cubeClosed'] = rel.closed
elif rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember):
_relProp['aspectValueUsable'] = rel.usable
elif rel.arcrole == XbrlConst.summationItem:
_relProp['weight'] = rel.weightDecimal
if relationshipSet.arcrole == "XBRL-dimensions":
_relProp['arcrole'] = rel.arcrole
if toModelObject.role:
_relProp['resourceRole'] = toModelObject.role
targetUri = modelObjectUri(toModelObject)
targetId = toModelObject.modelDocument.basename + '#' + XmlUtil.elementFragmentIdentifier(toModelObject)
else:
targetUri = None # tbd
if sourceUri is not None and targetUri is not None:
targetRelSetId = relSetId
targetRelSetKey = relationshipSetKey
if relationshipSet.arcrole == "XBRL-dimensions" and rel.targetRole:
targetRelSet = self.modelXbrl.relationshipSet(relationshipSet.arcrole, rel.targetRole)
for i, relSetKey in enumerate(self.relationshipSets):
arcrole, ELR, linkqname, arcqname = relSetKey
if arcrole == "XBRL-dimensions" and ELR == rel.targetRole:
targetRelationshipSetId = relSetIds[relSetKey]
targetRelSetKey = relSetKey
break
if not doVertices:
_relProp['targetLinkrole'] = rel.targetRole
_relProp['targetRelSet'] = targetRelationshipSetId
else:
targetRelSetKey = relationshipSetKey
targetRelSet = relationshipSet
if doVertices:
relId = None
else:
_relProp['from'] = sourceUri
_relProp['fromQname'] = sourceQname
_relProp['to'] = targetUri
_arcrole = os.path.basename(rel.arcrole)
relId = "{}/{}/{}/{}".format(
_arcrole,
os.path.basename(rel.linkrole),
sourceId,
targetId)
_relProp['relId'] = relId
_relProp['relSetKey'] = relationshipSetKey
relE.append(_relProp)
seq += 1
seq = walkTree(targetRelSet.fromModelObject(toModelObject), relId, seq, depth+1, targetRelSetKey, targetRelSet, visited, targetRelSetId, doVertices)
visited.remove(rel)
return seq
for doVertices in range(1,-1,-1): # pass 0 = vertices, pass 1 = edges
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, linkrole, linkqname, arcqname = relationshipSetKey
if arcrole not in ("XBRL-formulae", "Table-rendering", "XBRL-footnotes") and linkrole and linkqname and arcqname:
relSetId = relSetIds[relationshipSetKey]
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), None, seq, 1, relationshipSetKey, relationshipSet, set(), relSetId, doVertices)
if doVertices:
if resources:
for resource in resources:
resourceUri = modelObjectUri(resource)
r = {'url': resourceUri,
'value': resource.stringValue
}
if resource.xmlLang:
r['language'] = resource.xmlLang
if resource.role:
r['role'] = resource.role
self.documents[modelObjectDocumentUri(resource)]['resources'][
XmlUtil.elementFragmentIdentifier(resource)] = r
self.insertAspectProxies(aspectQnamesUsed)
else:
for j, rel in enumerate(relE):
relId = rel['relId']
relSetId = rel['relSetId']
relSet = relationshipSets[relSetId]
r = dict((k,v)
for k,v in rel.items()
if k not in ('relId', 'relPredicate', 'relSetId', 'relSetKey', 'fromQname'))
relSet['relationships'].append(r)
if rel.get('depth', 0) == 1:
relSet['roots'].append(r)
sourceQname = rel['fromQname']
if sourceQname in self.aspect_proxy:
self.aspect_proxy[sourceQname] \
.setdefault('relationships', {}) \
.setdefault(rel['relSetId'], []) \
.append(rel['to'])
# TBD: do we want to link resources to the dts (by role, class, or otherwise?)
resourceIDs.clear() # dereferemce objects
resources = None
def insertValidationResults(self):
logEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
logEntries = handler.dbHandlerLogEntries()
break
messages = []
messageRefs = [] # direct link to objects
for i, logEntry in enumerate(logEntries):
messageId = "message/{}".format(i+1)
self.report['messages'][messageId] = m = {
'code': logEntry['code'],
'level': logEntry['level'],
'value': logEntry['message']['text'],
'report': self.reportURI,
'messageId': messageId
}
# capture message ref's
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
aspectObj = None
if isinstance(modelObject, ModelFact):
factId = XmlUtil.elementFragmentIdentifier(modelObject)
dataPoint = self.report['dataPoints'][factId]
dataPoint.setdefault('messages', []).append(messageId)
elif isinstance(modelObject, ModelConcept):
# be sure there's a proxy
self.insertAspectProxies( (modelObject.qname,)) # need imediate use of proxy
self.aspectQnameProxy(modelObject.qname).setdefault('messages', []).append(messageId)
elif isinstance(modelObject, ModelRelationship):
''' TBD
sourceId = qnamePrefix_Name(modelObject.fromModelObject.qname)
toModelObject = modelObject.toModelObject
if isinstance(toModelObject, ModelConcept):
targetId = qnamePrefix_Name(toModelObject.qname)
elif isinstance(toModelObject, ModelResource):
targetId = toModelObject.modelDocument.basename + '#' + XmlUtil.elementFragmentIdentifier(toModelObject)
else:
continue
objUri = URIRef("{}/Relationship/{}/{}/{}/{}".format(
self.reportURI,
os.path.basename(modelObject.arcrole),
os.path.basename(modelObject.linkrole),
sourceId,
targetId) )
'''
else:
continue
if messages:
self.showStatus("insert validation messages")
| apache-2.0 | 1,309,872,667,584,775,700 | 50.583333 | 172 | 0.529261 | false |
akretion/pos-addons | tg_partner_firstname/tg_partner_firstname.py | 11 | 2548 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 - Thierry Godin. All Rights Reserved
# @author Thierry Godin <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import netsvc, tools, pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class inherit_res_partner(osv.osv):
_name='res.partner'
_inherit='res.partner'
def write(self, cr, uid, ids, vals, context=None):
v_name = None
v_firstname = None
if vals.get('name'):
# name to Uppercase
v_name = vals['name'].strip()
vals['name'] = v_name.upper()
if vals.get('firstname'):
# firstname capitalized
v_firstname = vals['firstname'].strip()
vals['firstname'] = v_firstname.title()
result = super(inherit_res_partner,self).write(cr, uid, ids, vals, context=context)
return result
def create(self, cr, uid, vals, context=None):
v_name = None
v_firstname = None
if vals.get('name'):
# name to Uppercase
v_name = vals['name'].strip()
vals['name'] = v_name.upper()
if vals.get('firstname'):
# firstname capitalized
v_firstname = vals['firstname'].strip()
vals['firstname'] = v_firstname.title()
result = super(inherit_res_partner,self).create(cr, uid, vals, context=context)
return result
_columns = {
'firstname' : fields.char('Firstname', size=128),
}
| lgpl-3.0 | -6,088,853,987,410,746,000 | 32.90411 | 91 | 0.570251 | false |
purism/pdak | dak/dakdb/update76.py | 8 | 1666 | #!/usr/bin/env python
# coding=utf8
"""
Add list of closed bugs to changes table
@contact: Debian FTP Master <[email protected]>
@copyright: 2012 Ansgar Burchardt <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
from daklib.config import Config
################################################################################
def do_update(self):
print __doc__
try:
cnf = Config()
c = self.db.cursor()
c.execute("ALTER TABLE changes ADD COLUMN closes TEXT[]")
c.execute("UPDATE config SET value = '76' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 76, rollback issued. Error message: {0}'.format(msg))
| gpl-2.0 | -1,609,357,548,722,367,200 | 34.446809 | 110 | 0.653661 | false |
antsant/namebench | nb_third_party/graphy/backends/google_chart_api/__init__.py | 205 | 2023 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend which can generate charts using the Google Chart API."""
from graphy import line_chart
from graphy import bar_chart
from graphy import pie_chart
from graphy.backends.google_chart_api import encoders
def _GetChartFactory(chart_class, display_class):
"""Create a factory method for instantiating charts with displays.
Returns a method which, when called, will create & return a chart with
chart.display already populated.
"""
def Inner(*args, **kwargs):
chart = chart_class(*args, **kwargs)
chart.display = display_class(chart)
return chart
return Inner
# These helper methods make it easy to get chart objects with display
# objects already setup. For example, this:
# chart = google_chart_api.LineChart()
# is equivalent to:
# chart = line_chart.LineChart()
# chart.display = google_chart_api.encoders.LineChartEncoder(chart)
#
# (If there's some chart type for which a helper method isn't available, you
# can always just instantiate the correct encoder manually, like in the 2nd
# example above).
# TODO: fix these so they have nice docs in ipython (give them __doc__)
LineChart = _GetChartFactory(line_chart.LineChart, encoders.LineChartEncoder)
Sparkline = _GetChartFactory(line_chart.Sparkline, encoders.SparklineEncoder)
BarChart = _GetChartFactory(bar_chart.BarChart, encoders.BarChartEncoder)
PieChart = _GetChartFactory(pie_chart.PieChart, encoders.PieChartEncoder)
| apache-2.0 | 4,969,626,415,838,115,000 | 39.46 | 77 | 0.760751 | false |
papoteur-mga/mageiaSync | setup.py | 1 | 1249 | from distutils.core import setup
import os
LOCALE_DIR= '/usr/share/locale'
locales = []
if os.path.exists('po/locale'):
for lang in os.listdir('po/locale'):
locales.append(os.path.join(lang, 'LC_MESSAGES'))
data_files = [("share/applications/", ["share/applications/mageiasync.desktop"]),
("share/icons/hicolor/scalable/apps", ["share/icons/mageiasync.svg"])
] + [(os.path.join(LOCALE_DIR, locale),
[os.path.join('po', 'locale', locale, 'mageiasync.mo')])
for locale in locales]
setup(
name = 'mageiasync',
version = '0.1.2',
packages = ['mageiaSync'],
scripts = ['mageiasync'],
license = 'GNU General Public License v3 (GPLv3)',
url = 'https://github.com/papoteur-mga/mageiaSync',
description = 'This tool downloads ISO images from mirror or Mageia testing server.',
long_description = 'This tool uses rsync with a GUI',
platforms = ['Linux'],
author = 'Papoteur',
author_email = '[email protected]',
maintainer = 'david_david',
maintainer_email = '[email protected]',
data_files = data_files,
)
| gpl-3.0 | -8,755,315,272,496,259,000 | 36.848485 | 93 | 0.591673 | false |
abstract-open-solutions/OCB | addons/edi/__init__.py | 437 | 1157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import controllers
from . import models
from . import edi_service
from .models.edi import EDIMixin, edi
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,229,211,939,604,810,000 | 43.5 | 78 | 0.630078 | false |
ehashman/oh-mainline | vendor/packages/Django/tests/regressiontests/context_processors/tests.py | 96 | 1344 | """
Tests for Django's bundled context processors.
"""
from django.test import TestCase
class RequestContextProcessorTests(TestCase):
"""
Tests for the ``django.core.context_processors.request`` processor.
"""
urls = 'regressiontests.context_processors.urls'
def test_request_attributes(self):
"""
Test that the request object is available in the template and that its
attributes can't be overridden by GET and POST parameters (#3828).
"""
url = '/request_attrs/'
# We should have the request object in the template.
response = self.client.get(url)
self.assertContains(response, 'Have request')
# Test is_secure.
response = self.client.get(url)
self.assertContains(response, 'Not secure')
response = self.client.get(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
response = self.client.post(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
# Test path.
response = self.client.get(url)
self.assertContains(response, url)
response = self.client.get(url, {'path': '/blah/'})
self.assertContains(response, url)
response = self.client.post(url, {'path': '/blah/'})
self.assertContains(response, url)
| agpl-3.0 | -4,363,970,325,847,624,700 | 35.324324 | 78 | 0.636161 | false |
tectronics/cortex-vfx | test/IECore/SWAReaderTest.py | 12 | 5143 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class SWAReaderTest( unittest.TestCase ) :
def testConstruction( self ) :
r = IECore.SWAReader()
self.assertEqual( r["fileName"].getTypedValue(), "" )
r = IECore.SWAReader( "test/IECore/data/swaFiles/test.swa" )
self.assertEqual( r["fileName"].getTypedValue(), "test/IECore/data/swaFiles/test.swa" )
def testReading( self ) :
r = IECore.SWAReader( "test/IECore/data/swaFiles/test.swa" )
o = r.read()
IECore.ObjectWriter( o, "/tmp/trees4.cob" ).write()
self.failUnless( o.isInstanceOf( IECore.PointsPrimitive.staticTypeId() ) )
self.assertEqual( o.numPoints, 5 + 6 )
self.failUnless( o.arePrimitiveVariablesValid() )
self.failUnless( "P" in o )
self.failUnless( "xAxis" in o )
self.failUnless( "yAxis" in o )
self.failUnless( "zAxis" in o )
self.failUnless( "scale" in o )
self.failUnless( "treeName" in o )
self.failUnless( "treeNameIndices" in o )
self.assertEqual( o["P"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["xAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["yAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["zAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["scale"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeNameIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeName"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.failUnless( isinstance( o["P"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["xAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["yAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["zAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["scale"].data, IECore.FloatVectorData ) )
self.failUnless( isinstance( o["treeNameIndices"].data, IECore.IntVectorData ) )
self.failUnless( isinstance( o["treeName"].data, IECore.StringVectorData ) )
self.assertEqual( o["treeName"].data, IECore.StringVectorData( [ "Acacia_RT", "BroadLeaf_HighDetail" ] ) )
self.assertEqual( o["P"].data[0], IECore.V3f( 3750.05, 1556.86, -2149.22 ) )
self.assertEqual( o["yAxis"].data[0], IECore.V3f( 0.0176831, 0.998519, 0.0514542 ) )
self.assertEqual( o["xAxis"].data[0], IECore.V3f( 0.0179192, -0.0517705, 0.998498 ) )
self.assertEqual( o["zAxis"].data[0], o["xAxis"].data[0].cross( o["yAxis"].data[0] ) )
self.assertAlmostEqual( o["scale"].data[0], 6.4516, 6 )
self.assertAlmostEqual( o["scale"].data[1], 6.7, 6 )
self.assertEqual( o["treeNameIndices"].data, IECore.IntVectorData( [ 0 ] * 5 + [ 1 ] * 6 ) )
def testCanRead( self ) :
self.failUnless( IECore.SWAReader.canRead( "test/IECore/data/swaFiles/test.swa" ) )
self.failIf( IECore.IDXReader.canRead( "test/IECore/data/exrFiles/carPark.exr" ) )
self.failIf( IECore.SWAReader.canRead( "test/IECore/data/idxFiles/test.idx" ) )
self.failIf( IECore.SWAReader.canRead( "test/IECore/data/empty" ) )
def testRegistration( self ) :
r = IECore.Reader.create( "test/IECore/data/swaFiles/test.swa" )
self.failUnless( isinstance( r, IECore.SWAReader ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 1,978,072,474,059,754,000 | 47.065421 | 108 | 0.702703 | false |
foodsnag/foodsnag-web | config.py | 1 | 1242 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
APP_NAME = 'FoodSnag'
MAILER_NAME = 'Snagger'
MAILER_EMAIL = '[email protected]'
MG_KEY = os.environ.get('MG_KEY')
MG_URL = 'https://api.mailgun.net/v3/sandbox86fa708b0be84193924a6900094a11cf.mailgun.org'
SECRET_KEY = os.environ.get('SECRET_KEY') or 'G00DB33F'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
MG_URL = 'https://api.mailgun.net/v3/mg.foodsnag.com'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| mit | -8,743,636,698,525,172,000 | 26.6 | 93 | 0.663446 | false |
fr34k8/paimei | pida/basic_block.py | 7 | 9145 | #
# PIDA Basic Block
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# $Id: basic_block.py 194 2007-04-05 15:31:53Z cameron $
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: [email protected]
@organization: www.openrce.org
'''
try:
from idaapi import *
from idautils import *
from idc import *
except:
pass
import pgraph
from instruction import *
from defines import *
class basic_block (pgraph.node):
'''
'''
id = None
ea_start = None
ea_end = None
depth = None
analysis = None
function = None
instructions = {}
num_instructions = 0
ext = {}
####################################################################################################################
def __init__ (self, ea_start, ea_end, depth=DEPTH_FULL, analysis=ANALYSIS_NONE, function=None):
'''
Analyze the basic block from ea_start to ea_end.
@see: defines.py
@type ea_start: DWORD
@param ea_start: Effective address of start of basic block (inclusive)
@type ea_end: DWORD
@param ea_end: Effective address of end of basic block (inclusive)
@type depth: Integer
@param depth: (Optional, Def=DEPTH_FULL) How deep to analyze the module
@type analysis: Integer
@param analysis: (Optional, Def=ANALYSIS_NONE) Which extra analysis options to enable
@type function: pida.function
@param function: (Optional, Def=None) Pointer to parent function container
'''
# run the parent classes initialization routine first.
super(basic_block, self).__init__(ea_start)
heads = [head for head in Heads(ea_start, ea_end + 1) if isCode(GetFlags(head))]
self.id = ea_start
self.ea_start = ea_start
self.ea_end = ea_end
self.depth = depth
self.analysis = analysis
self.function = function
self.num_instructions = len(heads)
self.instructions = {}
self.ext = {}
# convenience alias.
self.nodes = self.instructions
# bubble up the instruction count to the function. this is in a try except block to catch situations where the
# analysis was not bubbled down from a function.
try:
self.function.num_instructions += self.num_instructions
except:
pass
if self.depth & DEPTH_INSTRUCTIONS:
for ea in heads:
self.instructions[ea] = instr = instruction(ea, self.analysis, self)
####################################################################################################################
def overwrites_register (self, register):
'''
Indicates if the given register is modified by this block.
@type register: String
@param register: The text representation of the register
@rtype: Boolean
@return: True if the register is modified by any instruction in this block.
'''
for ins in self.instructions.values():
if ins.overwrites_register(register):
return True
return False
####################################################################################################################
def ordered_instructions(self):
'''
TODO: deprecated by sorted_instructions().
'''
temp = [key for key in self.instructions.keys()]
temp.sort()
return [self.instructions[key] for key in temp]
####################################################################################################################
def render_node_gml (self, graph):
'''
Overload the default node.render_node_gml() routine to create a custom label. Pass control to the default
node renderer and then return the merged content.
@rtype: String
@return: Contents of rendered node.
'''
self.label = "<span style='font-family: Courier New; font-size: 10pt; color: #000000'>"
self.label += "<p><font color=#004080><b>%08x</b></font></p>" % self.ea_start
self.gml_height = 45
for instruction in self.sorted_instructions():
colored_instruction = instruction.disasm.split()
if colored_instruction[0] == "call":
colored_instruction[0] = "<font color=#FF8040>" + colored_instruction[0] + "</font>"
else:
colored_instruction[0] = "<font color=#004080>" + colored_instruction[0] + "</font>"
colored_instruction = " ".join(colored_instruction)
self.label += "<font color=#999999>%08x</font> %s<br>" % (instruction.ea, colored_instruction)
try: instruction_length = len(instruction.disasm)
except: instruction_length = 0
try: comment_length = len(instruction.comment)
except: comment_length = 0
required_width = (instruction_length + comment_length + 10) * 10
if required_width > self.gml_width:
self.gml_width = required_width
self.gml_height += 20
self.label += "</span>"
return super(basic_block, self).render_node_gml(graph)
####################################################################################################################
def render_node_graphviz (self, graph):
'''
Overload the default node.render_node_graphviz() routine to create a custom label. Pass control to the default
node renderer and then return the merged content.
@type graph: pgraph.graph
@param graph: Top level graph object containing the current node
@rtype: pydot.Node()
@return: Pydot object representing node
'''
self.label = ""
self.shape = "box"
for instruction in self.sorted_instructions():
self.label += "%08x %s\\n" % (instruction.ea, instruction.disasm)
return super(basic_block, self).render_node_graphviz(graph)
####################################################################################################################
def render_node_udraw (self, graph):
'''
Overload the default node.render_node_udraw() routine to create a custom label. Pass control to the default
node renderer and then return the merged content.
@type graph: pgraph.graph
@param graph: Top level graph object containing the current node
@rtype: String
@return: Contents of rendered node.
'''
self.label = ""
for instruction in self.sorted_instructions():
self.label += "%08x %s\\n" % (instruction.ea, instruction.disasm)
return super(basic_block, self).render_node_udraw(graph)
####################################################################################################################
def render_node_udraw_update (self):
'''
Overload the default node.render_node_udraw_update() routine to create a custom label. Pass control to the
default node renderer and then return the merged content.
@rtype: String
@return: Contents of rendered node.
'''
self.label = ""
for instruction in self.sorted_instructions():
self.label += "%08x %s\\n" % (instruction.ea, instruction.disasm)
return super(basic_block, self).render_node_udraw_update()
####################################################################################################################
def sorted_instructions (self):
'''
Return a list of the instructions within the graph, sorted by id.
@rtype: List
@return: List of instructions, sorted by id.
'''
instruction_keys = self.instructions.keys()
instruction_keys.sort()
return [self.instructions[key] for key in instruction_keys] | gpl-2.0 | -4,282,683,101,429,553,700 | 34.883065 | 120 | 0.528923 | false |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/grizzled/grizzled/proxy.py | 19 | 3984 | # NOTE: Documentation is intended to be processed by epydoc and contains
# epydoc markup.
"""
Overview
========
The ``grizzled.forwarder`` module contain classes that make building proxies
easier.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import logging
from types import MethodType
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Forwarder']
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Logging
# ---------------------------------------------------------------------------
log = logging.getLogger('grizzled.proxy')
# ---------------------------------------------------------------------------
# Public classes
# ---------------------------------------------------------------------------
class Forwarder(object):
"""
The ``grizzled.forwarder.Forwarder`` class is intended to be used as
a mixin, to make it easier for classes to forward calls to another
class. The mix ``Forwarder`` into a class, simply include it as
one of the base classes.
**WARNING**: ``Forwarder`` intercepts calls to ``__getattr__``, so
don't mix it in if your class is already overriding ``__getattr__``.
Examples
--------
Forward all unimplemented methods to a file:
.. python::
from grizzled.forwarder import Forwarder
class MyFileWrapper(Forwarder):
def __init__(self, file):
Forwarder.__init__(self, file)
w = MyFileWrapper(open('/tmp/foo'))
for line in w.readlines():
print line
Forward all unimplemented calls, *except* ``name``, to the specified
object. Calls to ``name`` will raise an ``AttributeError``:
.. python::
from grizzled.forwarder import Forwarder
class MyFileWrapper(Forwarder):
def __init__(self, file):
Forwarder.__init__(self, file, 'name')
"""
def __init__(self, wrapped, *exceptions):
"""
Initialize a new ``Forwarder`` that will pass unimplemented calls
(method calls, attribute accesses, etc.) to the specified object.
:Parameters:
wrapped : object
the object to which to pass unknown attributes
exceptions : str
one or more names (as separate arguments) of methods
that should not be intercepted (and will, therefore,
result in ``AttributeError`` exceptions if invoked,
absent any other intervention).
"""
self._wrapped = wrapped
self._exceptions = [e for e in exceptions[0]] # arg tuple to list
def __getattr__(self, name):
# Now that we've intercepted __getattr__, we can't access our own
# attributes directly. Use __getattribute__ to access them.
obj = self.__getattribute__('_wrapped')
exceptions = self.__getattribute__('_exceptions')
if (obj is None) or (name in exceptions):
self.__getattribute__(name)
else:
try:
attr = getattr(obj, name)
if isinstance(obj, MethodType):
return new.instancemethod(attr.im_func, self, obj.__class__)
else:
return attr
except AttributeError:
# Recast error message as being from this class.
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
| lgpl-3.0 | 6,374,495,788,170,516,000 | 32.762712 | 80 | 0.473394 | false |
Mariusz1970/enigma2 | lib/python/Screens/Ipkg.py | 65 | 3790 | from Components.ActionMap import ActionMap
from Components.Ipkg import IpkgComponent
from Components.Label import Label
from Components.Slider import Slider
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from enigma import eTimer
class Ipkg(Screen):
def __init__(self, session, cmdList=None):
if not cmdList: cmdList = []
Screen.__init__(self, session)
self.cmdList = cmdList
self.sliderPackages = {}
self.slider = Slider(0, len(cmdList))
self["slider"] = self.slider
self.activityslider = Slider(0, 100)
self["activityslider"] = self.activityslider
self.status = Label(_("Preparing... Please wait"))
self["status"] = self.status
self.package = Label()
self["package"] = self.package
self.packages = 0
self.error = 0
self.processed_packages = []
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.doActivityTimer)
#self.activityTimer.start(100, False)
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.runningCmd = None
self.runNextCmd()
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit
}, -1)
def runNextCmd(self):
if self.runningCmd is None:
self.runningCmd = 0
else:
self.runningCmd += 1
print len(self.cmdList), self.runningCmd
if len(self.cmdList) - 1 < self.runningCmd:
self.activityslider.setValue(0)
self.slider.setValue(len(self.cmdList))
self.package.setText("")
self.status.setText(ngettext("Done - Installed, upgraded or removed %d package (%s)", "Done - Installed, upgraded or removed %d packages (%s)", self.packages) % (self.packages, ngettext("with %d error", "with %d errors", self.error) % self.error))
return False
else:
cmd = self.cmdList[self.runningCmd]
self.slider.setValue(self.runningCmd)
self.ipkg.startCmd(cmd[0], args = cmd[1])
self.startActivityTimer()
def doActivityTimer(self):
if not self.ipkg.isRunning():
self.stopActivityTimer()
else:
self.activity += 1
if self.activity == 100:
self.activity = 0
self.activityslider.setValue(self.activity)
def startActivityTimer(self):
self.activityTimer.start(100, False)
def stopActivityTimer(self):
self.activityTimer.stop()
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DOWNLOAD:
self.status.setText(_("Downloading"))
elif event == IpkgComponent.EVENT_UPGRADE:
if self.sliderPackages.has_key(param):
self.slider.setValue(self.sliderPackages[param])
self.package.setText(param)
self.status.setText(_("Upgrading"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_INSTALL:
self.package.setText(param)
self.status.setText(_("Installing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_REMOVE:
self.package.setText(param)
self.status.setText(_("Removing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_CONFIGURING:
self.package.setText(param)
self.status.setText(_("Configuring"))
elif event == IpkgComponent.EVENT_ERROR:
self.error += 1
elif event == IpkgComponent.EVENT_DONE:
self.runNextCmd()
elif event == IpkgComponent.EVENT_MODIFIED:
self.session.openWithCallback(
self.modificationCallback,
MessageBox,
_("A configuration file (%s) was modified since Installation.\nDo you want to keep your version?") % param
)
def modificationCallback(self, res):
self.ipkg.write(res and "N" or "Y")
def exit(self):
if not self.ipkg.isRunning():
self.close()
| gpl-2.0 | 2,037,522,467,855,543,800 | 29.564516 | 250 | 0.713984 | false |
avirshup/DockerMake | test/helpers.py | 1 | 5035 | import os
import io
import tarfile
import sys
import pytest
import docker.errors
__client = None
if sys.version_info.major == 2:
file_not_found_error = IOError
else:
file_not_found_error = FileNotFoundError
def get_client():
"""
Returns:
docker.DockerClient
"""
global __client
if __client is None:
__client = docker.from_env()
return __client
def creates_images(*imgnames):
""" Creates fixtures to make sure to remove named images after (and before, if necessary)
running a test
"""
@pytest.fixture
def fixture():
client = get_client()
_clean_ctrs_and_imgs(imgnames, client)
yield
_clean_ctrs_and_imgs(imgnames, client)
return fixture
def _clean_ctrs_and_imgs(imgnames, client):
to_clean = []
for img in imgnames:
if ":" in img:
to_clean.append(img)
else:
for img_obj in client.images.list(img):
to_clean.extend(img_obj.tags)
for name in to_clean:
try:
client.images.remove(name, force=True)
except docker.errors.ImageNotFound:
pass
@pytest.fixture
def experimental_daemon():
_skip_if_daemon_experimental_mode_is(False)
@pytest.fixture
def non_experimental_daemon():
_skip_if_daemon_experimental_mode_is(True)
def _skip_if_daemon_experimental_mode_is(skip_if_on):
client = get_client()
version = client.version()
if version.get("Experimental", False) == skip_if_on:
pytest.skip(
"This test requires a docker daemon with experimental mode *%s*"
% ("disabled" if skip_if_on else "enabled")
)
def assert_file_content(imgname, path, expected_content):
""" Asserts that an image exists with a file at the
specified path containing the specified content
"""
try:
actual_content = get_file_content(imgname, path)
except docker.errors.NotFound:
assert False, "File %s not found" % path
assert actual_content.strip() == expected_content.strip()
def file_exists(imgname, path):
try:
get_file_content(imgname, path)
except docker.errors.NotFound:
return False
else:
return True
def get_file_content(imgname, path):
client = get_client()
try:
image = client.images.get(imgname)
except (docker.errors.ImageNotFound, docker.errors.APIError) as exc:
assert False, "Image %s not found: %s" % (imgname, exc)
container = client.containers.create(image)
tarstream, stat = container.get_archive(path)
content = b"".join(tarstream)
container.remove()
tf = tarfile.open(fileobj=io.BytesIO(content))
val = tf.extractfile(os.path.basename(path)).read().decode("utf-8")
return val
def find_files_in_layers(img, files, tmpdir=None):
""" Scans an image's layers looking for specific files.
There's no API for this, so it's brittle. We're looking at
every layer stored internally for a given image. The solution here just uses `docker save`
to dump the layers to disk and examine them. This was written to parse the format of the
tarfile from docker 18.03.1; I'm not sure how stable this is, either backwards or forwards.
Note that this is used for TESTING ONLY, it's not part of the actual code (right now)
Args:
img (str): image id or name
files (List[str]): list of paths to look for
tmpdir (str): temporary directory to save
Returns:
dict[str, List[str]]: Dict storing the layers each file is present in
"""
import tempfile
import json
client = get_client()
result = {f: [] for f in files}
if tmpdir is None:
tmpdir = tempfile.mkdtemp()
img = client.images.get(img)
tarpath = os.path.join(tmpdir, "image.tar")
with open(tarpath, "wb") as tf:
for chunk in img.save():
tf.write(chunk)
with tarfile.open(tarpath, "r") as tf:
mf_obj = tf.extractfile("manifest.json")
manifest = json.loads(mf_obj.read().decode("utf-8"))
assert len(manifest) == 1
for path_to_layer_tar in manifest[0]["Layers"]:
layer_tar_buffer = tf.extractfile(path_to_layer_tar)
with tarfile.open("r", fileobj=layer_tar_buffer) as layertar:
layer_results = _scan_tar(layertar, files)
for f in layer_results:
result[f].append(path_to_layer_tar[: -len("layer.tar")])
return result
def _scan_tar(tarobj, files):
""" Scans a tar object for specific files.
Args:
tarobj (tarfile.TarFile): tar object
files (List[str]): list of paths to look for
Returns:
List[str]: list of the files present (out of the requested paths)
"""
result = []
for f in files:
try:
tf = tarobj.extractfile(f.lstrip("/"))
except (KeyError, file_not_found_error):
continue
if tf is not None:
result.append(f)
return result
| apache-2.0 | 231,213,801,155,634,050 | 26.513661 | 95 | 0.625223 | false |
fujunwei/chromium-crosswalk | tools/auto_bisect/bisect_results_test.py | 8 | 12580 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from bisect_results import BisectResults
import source_control
class MockDepotRegistry(object):
def ChangeToDepotDir(self, depot):
pass
class MockRevisionState(object):
def __init__(self, revision, index, depot='chromium', value=None,
perf_time=0, build_time=0, passed='?', external=None):
self.depot = depot
self.revision = revision
self.index = index
self.value = value
self.perf_time = perf_time
self.build_time = build_time
self.passed = passed
self.external = external
class MockBisectState(object):
def __init__(self):
self.mock_revision_states = []
mock_bad_val = {'values': [100, 105, 95]}
for i, rev in enumerate(['a', 'b']):
mock_rev_state = MockRevisionState(rev, i, value=mock_bad_val, passed=0)
self.mock_revision_states.append(mock_rev_state)
mock_good_val = {'values': [1, 2, 3]}
for i, rev in enumerate(['c', 'd', 'e'], start=2):
mock_rev_state = MockRevisionState(rev, i, value=mock_good_val, passed=1)
self.mock_revision_states.append(mock_rev_state)
def GetRevisionStates(self):
return self.mock_revision_states
class MockBisectOptions(object):
def __init__(self):
self.repeat_test_count = 3
class BisectResultsTest(unittest.TestCase):
def setUp(self):
self.mock_bisect_state = MockBisectState()
self.mock_depot_registry = MockDepotRegistry()
self.mock_opts = MockBisectOptions()
self.mock_warnings = []
self.original_getcwd = os.getcwd
self.original_chdir = os.chdir
self.original_query_revision_info = source_control.QueryRevisionInfo
os.getcwd = lambda: '/path'
os.chdir = lambda _: None
revision_infos = {'b': {'test': 'b'}, 'c': {'test': 'c'}}
source_control.QueryRevisionInfo = lambda rev: revision_infos[rev]
def tearDown(self):
os.getcwd = self.original_getcwd
os.chdir = self.original_chdir
source_control.QueryRevisionInfo = self.original_query_revision_info
def _AssertConfidence(self, score, bad_values, good_values):
"""Checks whether the given sets of values have a given confidence score.
The score represents our confidence that the two sets of values wouldn't
be as different as they are just by chance; that is, that some real change
occurred between the two sets of values.
Args:
score: Expected confidence score.
bad_values: First list of numbers.
good_values: Second list of numbers.
"""
confidence = BisectResults.ConfidenceScore(bad_values, good_values)
self.assertEqual(score, confidence)
def testConfidenceScoreIsZeroOnTooFewLists(self):
self._AssertConfidence(0.0, [], [1, 2])
self._AssertConfidence(0.0, [1, 2], [])
self._AssertConfidence(0.0, [1], [1, 2])
self._AssertConfidence(0.0, [1, 2], [1])
def testConfidenceScore_ZeroConfidence(self):
# The good and bad sets contain the same values, so the confidence that
# they're different should be zero.
self._AssertConfidence(0.0, [4, 5, 7, 6, 8, 7], [8, 7, 6, 7, 5, 4])
def testConfidenceScore_MediumConfidence(self):
self._AssertConfidence(80.0, [0, 1, 1, 1, 2, 2], [1, 1, 1, 3, 3, 4])
def testConfidenceScore_HighConfidence(self):
self._AssertConfidence(95.0, [0, 1, 1, 1, 2, 2], [1, 2, 2, 3, 3, 4])
def testConfidenceScore_VeryHighConfidence(self):
# Confidence is high if the two sets of values have no internal variance.
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.2, 1.2, 1.2, 1.2])
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.01, 1.01, 1.01, 1.01])
def testConfidenceScore_UnbalancedSampleSize(self):
# The second set of numbers only contains one number, so confidence is 0.
self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2], [1.4])
def testConfidenceScore_EmptySample(self):
# Confidence is zero if either or both samples are empty.
self._AssertConfidence(0.0, [], [])
self._AssertConfidence(0.0, [], [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3])
self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3], [])
def testConfidenceScore_FunctionalTestResults(self):
self._AssertConfidence(80.0, [1, 1, 0, 1, 1, 1, 0, 1], [0, 0, 1, 0, 1, 0])
self._AssertConfidence(99.9, [1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0])
def testConfidenceScore_RealWorldCases(self):
"""This method contains a set of data from actual bisect results.
The confidence scores asserted below were all copied from the actual
results, so the purpose of this test method is mainly to show what the
results for real cases are, and compare when we change the confidence
score function in the future.
"""
self._AssertConfidence(80, [133, 130, 132, 132, 130, 129], [129, 129, 125])
self._AssertConfidence(99.5, [668, 667], [498, 498, 499])
self._AssertConfidence(80, [67, 68], [65, 65, 67])
self._AssertConfidence(0, [514], [514])
self._AssertConfidence(90, [616, 613, 607, 615], [617, 619, 619, 617])
self._AssertConfidence(0, [3.5, 5.8, 4.7, 3.5, 3.6], [2.8])
self._AssertConfidence(90, [3, 3, 3], [2, 2, 2, 3])
self._AssertConfidence(0, [1999004, 1999627], [223355])
self._AssertConfidence(90, [1040, 934, 961], [876, 875, 789])
self._AssertConfidence(90, [309, 305, 304], [302, 302, 299, 303, 298])
def testCorrectlyFindsBreakingRange(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = 1
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[2], results.first_working_revision)
self.assertEqual(revision_states[1], results.last_broken_revision)
def testCorrectlyFindsBreakingRangeNotInOrder(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 1
revision_states[2].passed = 0
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[1], results.first_working_revision)
self.assertEqual(revision_states[2], results.last_broken_revision)
def testCorrectlyFindsBreakingRangeIncompleteBisect(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = '?'
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[3], results.first_working_revision)
self.assertEqual(revision_states[1], results.last_broken_revision)
def testFindBreakingRangeAllPassed(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 1
revision_states[1].passed = 1
revision_states[2].passed = 1
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[0], results.first_working_revision)
self.assertIsNone(results.last_broken_revision)
def testFindBreakingRangeNonePassed(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = 0
revision_states[3].passed = 0
revision_states[4].passed = 0
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertIsNone(results.first_working_revision)
self.assertEqual(revision_states[4], results.last_broken_revision)
def testCorrectlyComputesRegressionStatistics(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[0].value = {'values': [1000, 999, 998]}
revision_states[1].passed = 0
revision_states[1].value = {'values': [980, 1000, 999]}
revision_states[2].passed = 1
revision_states[2].value = {'values': [50, 45, 55]}
revision_states[3].passed = 1
revision_states[3].value = {'values': [45, 56, 45]}
revision_states[4].passed = 1
revision_states[4].value = {'values': [51, 41, 58]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertAlmostEqual(99.9, results.confidence)
self.assertAlmostEqual(1909.86547085, results.regression_size)
self.assertAlmostEqual(7.16625904, results.regression_std_err)
def testFindsCulpritRevisions(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[1].depot = 'chromium'
revision_states[2].depot = 'webkit'
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.culprit_revisions))
self.assertEqual(('b', {'test': 'b'}, 'chromium'),
results.culprit_revisions[0])
def testFindsOtherRegressions(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[0].value = {'values': [100, 100, 100]}
revision_states[1].passed = 0
revision_states[1].value = {'values': [100, 100, 100]}
revision_states[2].passed = 1
revision_states[2].value = {'values': [10, 10, 10]}
revision_states[3].passed = 1
revision_states[3].value = {'values': [100, 100, 100]}
revision_states[4].passed = 1
revision_states[4].value = {'values': [60, 60, 60]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
expected_regressions = [[revision_states[2], revision_states[1], 99.9],
[revision_states[4], revision_states[3], 80.0]]
self.assertEqual(expected_regressions, results.other_regressions)
def testNoResultBasedWarningsForNormalState(self):
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(0, len(results.warnings))
def testWarningForMultipleCulpritRevisions(self):
self.mock_bisect_state.mock_revision_states[2].passed = 'Skipped'
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.warnings))
def testWarningForTooLowRetryLimit(self):
self.mock_opts.repeat_test_count = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.warnings))
def testWarningForTooLowConfidence(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[2].value = {'values': [95, 90, 90]}
revision_states[3].value = {'values': [95, 90, 90]}
revision_states[4].value = {'values': [95, 90, 90]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertGreater(results.confidence, 0)
self.assertEqual(1, len(results.warnings))
def testWarningForZeroConfidence(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[2].value = {'values': [100, 105, 95]}
revision_states[3].value = {'values': [100, 105, 95]}
revision_states[4].value = {'values': [100, 105, 95]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(0, results.confidence)
self.assertEqual(1, len(results.warnings))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -6,049,962,066,716,451,000 | 40.655629 | 79 | 0.671224 | false |
eonpatapon/rally | rally/plugins/openstack/scenarios/ceilometer/queries.py | 14 | 4521 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils
from rally.task import validation
class CeilometerQueries(ceiloutils.CeilometerScenario):
"""Benchmark scenarios for Ceilometer Queries API."""
@validation.required_services(consts.Service.CEILOMETER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ceilometer"]})
def create_and_query_alarms(self, meter_name, threshold, filter=None,
orderby=None, limit=None, **kwargs):
"""Create an alarm and then query it with specific parameters.
This scenario tests POST /v2/query/alarms
An alarm is first created and then fetched using the input query.
:param meter_name: specifies meter name of alarm
:param threshold: specifies alarm threshold
:param filter: optional filter query dictionary
:param orderby: optional param for specifying ordering of results
:param limit: optional param for maximum number of results returned
:param kwargs: optional parameters for alarm creation
"""
if filter:
filter = json.dumps(filter)
self._create_alarm(meter_name, threshold, kwargs)
self._query_alarms(filter, orderby, limit)
@validation.required_services(consts.Service.CEILOMETER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ceilometer"]})
def create_and_query_alarm_history(self, meter_name, threshold,
orderby=None, limit=None, **kwargs):
"""Create an alarm and then query for its history.
This scenario tests POST /v2/query/alarms/history
An alarm is first created and then its alarm_id is used to fetch the
history of that specific alarm.
:param meter_name: specifies meter name of alarm
:param threshold: specifies alarm threshold
:param orderby: optional param for specifying ordering of results
:param limit: optional param for maximum number of results returned
:param kwargs: optional parameters for alarm creation
"""
alarm = self._create_alarm(meter_name, threshold, kwargs)
alarm_filter = json.dumps({"=": {"alarm_id": alarm.alarm_id}})
self._query_alarm_history(alarm_filter, orderby, limit)
@validation.required_services(consts.Service.CEILOMETER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ceilometer"]})
def create_and_query_samples(self, counter_name, counter_type,
counter_unit, counter_volume, resource_id,
filter=None, orderby=None, limit=None,
**kwargs):
"""Create a sample and then query it with specific parameters.
This scenario tests POST /v2/query/samples
A sample is first created and then fetched using the input query.
:param counter_name: specifies name of the counter
:param counter_type: specifies type of the counter
:param counter_unit: specifies unit of the counter
:param counter_volume: specifies volume of the counter
:param resource_id: specifies resource id for the sample created
:param filter: optional filter query dictionary
:param orderby: optional param for specifying ordering of results
:param limit: optional param for maximum number of results returned
:param kwargs: parameters for sample creation
"""
self._create_sample(counter_name, counter_type, counter_unit,
counter_volume, resource_id, **kwargs)
if filter:
filter = json.dumps(filter)
self._query_samples(filter, orderby, limit)
| apache-2.0 | 4,352,998,968,820,633,000 | 45.608247 | 78 | 0.677063 | false |
dneg/gaffer | python/GafferUI/TransformPlugValueWidget.py | 5 | 2819 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
class TransformPlugValueWidget( GafferUI.CompoundPlugValueWidget ) :
def __init__( self, plug, collapsed=True, label=None, **kw ) :
GafferUI.CompoundPlugValueWidget.__init__( self, plug, collapsed, label, self.__summary )
@staticmethod
def __summary( plug ) :
info = []
translate = plug["translate"].getValue()
if translate[0] != 0 or translate[1] != 0 or translate[2] != 0 :
info.append( "Translate " + str( translate ) )
rotate = plug["rotate"].getValue()
if rotate[0] != 0 or rotate[1] != 0 or rotate[2] != 0 :
info.append( "Rotate " + str( rotate ) )
scale = plug["scale"].getValue()
if scale[0] != 1 or scale[1] != 1 or scale[2] != 1 :
if scale[0] != scale[1] or scale[0] != scale[2] :
info.append( "Scale " + str( scale ) )
else :
info.append( "Scale %0g" % scale[0] )
return ", ".join( info )
GafferUI.PlugValueWidget.registerType( Gaffer.TransformPlug.staticTypeId(), TransformPlugValueWidget )
| bsd-3-clause | 3,923,988,517,844,417,500 | 39.855072 | 102 | 0.660163 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/pycrypto-2.3-py2.7-macosx-10.10-x86_64.egg/Crypto/SelfTest/Hash/test_MD2.py | 11 | 2251 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/MD2.py: Self-test for the MD2 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.MD2"""
__revision__ = "$Id$"
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors from RFC 1319
('8350e5a3e24c153df2275c9f80692773', '', "'' (empty string)"),
('32ec01ec4a6dac72c0ab96fb34c0b5d1', 'a'),
('da853b0d3f88d99b30283a69e6ded6bb', 'abc'),
('ab4f496bfb2a530b219ff33031fe06b0', 'message digest'),
('4e8ddff3650292ab5a4108c3aa47940b', 'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('da33def2a42df13975352846c30338cd',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('d5976f79d83d3a0dc9806c3c66f3efd8',
'1234567890123456789012345678901234567890123456'
+ '7890123456789012345678901234567890',
"'1234567890' * 8"),
]
def get_tests(config={}):
from Crypto.Hash import MD2
from common import make_hash_tests
return make_hash_tests(MD2, "MD2", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 | 6,609,686,230,667,523,000 | 36.516667 | 73 | 0.673034 | false |
rrahn/gdf_tools | include/seqan/util/py_lib/seqan/dddoc/core.py | 18 | 37011 | #!/usr/bin/env python
import copy
import operator
import os
import os.path
import pickle
import string
import sys
# Constant for C++ files.
FILETYPE_CPP = 2
# Constant for DDDOC files.
FILETYPE_DDDOC = 1
# Constant for none of the above.
FILETYPE_OTHER = 0
SOURCE_ENCODING = 'iso8859-1'
# Extension of C++ files.
CPP_EXTS = ['c', 'C', 'cpp', 'CPP', 'c++', 'C++', 'h', 'H', 'hpp', 'HPP',
'h++', 'H++']
# Extensions of DDDOC files.
DDDOC_EXTS = ['dddoc', 'DDDOC']
# List of ignored directory names.
IGNORED_DIRS = ['CSV', '.svn', 'seeds2', 'find2', 'cmake']
DATA = None
ID = 0
# Text attribute node keys.
TEXT_ATTRIBUTE_KEYS = set(['text', 'table', 'tableheader', 'code', 'console', 'section',
'subsection', 'image', 'contents', 'note', 'file', 'snippet',
'output'])
# Nodes having paths matching the following patterns are considered text
# container nodes. Their children having only one more component which is in
# TEXT_ATTRIBUTE_KEYS are processed in a special way. The last component is
# replaced with 'text' and their content is prefixed by "type=$key:" where $key
# is the original key. The content of the text container nodes is prefixed with
# "type=$text:" and moved to a child with key 'text'.
TEXT_CONTAINER_PATHS = [
'Indexpage.*.description',
'Page.*.description',
'Page.*.summary',
'Page.*.glossary.*',
'Function.*.example',
'Function.*.summary',
'Function.*.description',
'Function.*.remarks',
'Function.*.status',
'Class.*.example',
'Class.*.summary',
'Class.*.description',
'Class.*.remarks',
'Class.*.status',
'Metafunction.*.example',
'Metafunction.*.summary',
'Metafunction.*.description',
'Metafunction.*.remarks',
'Metafunction.*.status',
'Memfunc.*.example',
'Memfunc.*.summary',
'Memfunc.*.description',
'Memfunc.*.remarks',
'Memfunc.*.status',
'Memvar.*.example',
'Memvar.*.summary',
'Memvar.*.description',
'Memvar.*.remarks',
'Memvar.*.status',
'Macro.*.example',
'Macro.*.summary',
'Macro.*.description',
'Macro.*.remarks',
'Macro.*.status',
'Enum.*.example',
'Enum.*.summary',
'Enum.*.description',
'Enum.*.remarks',
'Enum.*.status',
'Spec.*.example',
'Spec.*.summary',
'Spec.*.description',
'Spec.*.remarks',
'Spec.*.status',
'Shortcut.*.example',
'Shortcut.*.summary',
'Shortcut.*.description',
'Shortcut.*.remarks',
'Shortcut.*.status',
'Tag.*.example',
'Tag.*.summary',
'Tag.*.description',
'Tag.*.remarks',
'Tag.*.status',
'Typedef.*.example',
'Typedef.*.summary',
'Typedef.*.description',
'Typedef.*.remarks',
'Typedef.*.status',
'Demo.*.summary',
'Demo.*.description',
'Demo.*.remarks',
'Demo.*.output',
'Adaption.*.example',
'Adaption.*.summary',
'Adaption.*.description',
'Adaption.*.remarks',
'Adaption.*.status',
'Concept.*.example',
'Concept.*.summary',
'Concept.*.description',
'Concept.*.remarks',
'Concept.*.status',
]
def _pathsMatch(path1, path2):
"""Compare two paths with wildcards."""
if not type(path1) is list:
path1 = splitKeys(path1[int(path1[0] == '.'):], '.') # Strip leading '.', if any.
if not type(path2) is list:
path2 = splitKeys(path2[int(path2[0] == '.'):], '.')
if len(path1) != len(path2):
return False
for i, p1 in enumerate(path1):
p2 = path2[i]
if not (p1 == '*' or p2 == '*' or p1 == p2):
return False
return True
def transformDddocEntry(entry):
"""Performs the text container node transformations.
Returns list of entries to add if any.
"""
for path in TEXT_CONTAINER_PATHS:
if _pathsMatch(path, entry.path) and entry.content: # Is text container.
new_entry = copy.deepcopy(entry)
new_entry.content = 'type=text:' + entry.content
entry.content = ''
return [new_entry] # Done.
if not entry.path[-1] in TEXT_ATTRIBUTE_KEYS:
continue # Skip if last component does not match.
if not _pathsMatch(path, entry.path[:-1]):
continue # Skip non-matching path.
# If we reach here, it is a text node.
## print 'TRANSFORMING ', entry
last = entry.path[-1]
entry.path = entry.path[:-1]
entry.content = 'type=' + last + ':' + entry.content
## print ' to ', entry
return [] # Done
return [] # No updates.
class FileCache(object):
"""Simple file contents cache.
Maps paths to (mtime, file contents) pairs.
Attrs:
path Path to the cache file.
content Dict with cache content mapping file name to pair of mtime
and data associated with the cache.
"""
def __init__(self, path):
self.path = path
self.content = {}
self._tryLoad()
def _tryLoad(self):
try:
with open(self.path, 'rb') as f:
self.content = pickle.load(f)
except:
print >>sys.stderr, 'Could not load cache %s' % self.path
return False
print >>sys.stderr, 'Successfully loaded cache %s' % self.path
return True
def flush(self):
"""Store the cache to its file."""
try:
with open(self.path, 'wb') as f:
pickle.dump(self.content, f)
except:
print >>sys.stderr, 'Could not store cache %s' % self.path
return False
print >>sys.stderr, 'Successfully stored cache %s' % self.path
return True
def has_key(self, key):
"""Returns True if the cache has data for this key."""
return self.content.has_key(key)
def isFresh(self, filename):
"""Returns True if the cache is fresh.
The cache is fresh if the file at the given path is not newer than the
data in the cache.
"""
if not self.has_key(filename):
return False
mtime = os.stat(filename).st_mtime
return mtime >= self.content[filename][0]
def get(self, key, defaultValue=None):
"""Return content of the given entry."""
return self.content.get(key, (None, defaultValue))[1]
def set(self, filename, value):
"""Set cache content and mtime."""
mtime = os.stat(filename).st_mtime
self.content[filename] = (mtime, value)
class DddocEntry(object):
def __init__(self, path, content, filename, line_no_begin, line_no_end):
self.path = path
self.content = content
self.filename = filename
self.line_no_begin = line_no_begin
self.line_no_end = line_no_end
def __str__(self):
tpl = ('DddocEntry(path=%s, content=%s, filename=%s, line_no_begin=%s, '
'line_no_end=%s)')
values = (self.path, self.content, self.filename, self.line_no_begin,
self.line_no_end)
return tpl % tuple(map(repr, values))
def __repr__(self):
return self.__str__()
@classmethod
def cmpPathLocation(klass, lhs, rhs):
"""Comparator, by entry path then filename and line number."""
lhs_t = (lhs.path, lhs.filename, lhs.line_no_begin)
rhs_t = (rhs.path, rhs.filename, rhs.line_no_begin)
if lhs_t < rhs_t:
return -1
elif lhs_t > rhs_t:
return 1
else:
return 0
def splitKeys(text, delimiters, limit=None, _cache={}):
"""Splitting that considers escaping of keys using quotes.
>>> splitKeys('.Adaption.\'std::string\'.summary')
['', 'Adaption', '\'std::string\'', 'summary']
"""
if '\u0001' in text:
text = text.split('\u0001', 1)[0] # Remove optional label, used in inheritance.
if _cache.has_key((text, delimiters)):
return _cache[(text, delimiters)]
count = 0
current = []
result = []
str_delimiter = None
for i in range(0, len(text)):
# Handle text in strings.
if str_delimiter:
if text[i] == str_delimiter:
str_delimiter = None
current.append(text[i])
continue
elif text[i] in '\'"':
str_delimiter = text[i]
current.append(text[i])
continue
# Handle non-in-string text.
if text[i] in delimiters:
result.append(''.join(current))
current = []
count += 1
if limit and count >= limit:
result.append(text[i+1:])
_cache[(text, delimiters)] = result
return result
else:
current.append(text[i])
result.append(''.join(current))
_cache[(text, delimiters)] = result
return result
def cleanPath(path_arr):
"""Takes a list with a path and cleans its element.
Cleaning its element currently only consists in removing singel and double
quotes.
"""
def _cleanPathElement(x):
return x.strip().replace('\'', '').replace('"', '')
return map(_cleanPathElement, path_arr)
class FileLoader(object):
"""File loader helper class.
Attrs:
cache FileCache to use for caching.
entries List of DddocEntries objects.
"""
def __init__(self, cache):
self.cache = cache
self.entries = []
def _loadDDDOCFile(self, filename, cache): # TODO(holtgrew): Make Top-Level Function?
# Try to load from cache.
if cache.isFresh(filename):
return cache.get(filename)
# Load file.
with open(filename, 'rb') as f:
text = [x.decode(SOURCE_ENCODING).encode("ascii", "xmlcharrefreplace") for x in f.readlines()]
cache.set(filename, text)
return text
def _loadCPPFile(self, filename, cache): # TODO(holtgrew): Make Top-Level Function?
if cache.isFresh(filename):
return cache.get(filename)
# TODO(holtgrew): This looks overly complicated.
f = open(filename)
lines = [x.decode(SOURCE_ENCODING).encode("ascii", "xmlcharrefreplace") for x in f.readlines()]
f.close()
ret = []
#test for SEQAN_NO_DDDOC
for line in lines:
if line.find("SEQAN_NO_DDDOC") >= 0:
cache.set(filename, ret)
return ret;
incomment = False
innextcomment = False
inextract = False
for line in lines:
line = line.rstrip()
str_line = ""
if len(line) == 0:
if not innextcomment and not incomment:
str_line = "."
else:
str_line = " "
while len(line) > 0 :
if innextcomment:
if line[len(line)-1] == "\\" :
if inextract: str_line += line[: len(line)-1]
else:
if inextract: str_line += line
innextcomment = False
break
elif incomment:
pos1 = line.find("*/")
if pos1 < 0:
if inextract: str_line += line;
break;
else:
if inextract:
str_line += line[:pos1];
line = line[pos1 + 3:];
else:
line = line[pos1 + 2:];
incomment = False;
else:
pos1 = line.find("/*")
pos2 = line.find("//")
pos3 = line.find('"')
if (pos1 >= 0) and ((pos2 < 0) or (pos1 < pos2)) and ((pos3 < 0) or (pos1 < pos3)):
pos9 = line.find("*/", pos1 + 2)
if (len(line) > pos1 + 2):
inextract = (line[pos1 + 2] == "/") or (line[pos1 + 2] == "*")
else:
inextract = False
if pos9 < 0 :
if inextract: str_line += line[pos1 + 3:]
incomment = True
break
else:
if inextract:
str_line += line[pos1 + 3: pos3]
line = line[pos9 + 3:]
else:
line = line[pos9 + 2:]
elif (pos2 >= 0) and ((pos3 < 0) or (pos2 < pos3)):
pos2b = pos2 + 2;
while ((pos2b < len(line)) and ((line[pos2b] == "/") or (line[pos2b] == "*"))):
pos2b += 1
inextract = (pos2b > pos2 + 2)
if line[len(line)-1] == "\\" :
if inextract: str_line += line[pos2b: len(line)-1]
innextcomment = True
else:
if inextract: str_line += line[pos2b:]
break
elif pos3 >= 0:
pos9 = line.find('"', pos3 + 2)
if pos9 < 0:
line = line[pos9+1:]
break
else:
break
else:
break
ret = ret + [str_line]
cache.set(filename, ret)
return ret
def _getFileType(self, filename): # TODO(holtgrew): Make Top-Level Function?
"""Determines file type from filename.
Determines the file type from the extension of the given filename.
>>> getFileType('test.cpp') == FILETYPE_CPP
True
>>> getFileType('path/file.h') == FILETYPE_CPP
True
>>> getFileType('test.dddoc') == FILETYPE_DDDOC
True
Args:
filename Filename to parse.
Returns:
One of {FILETYPE_CPP, FILETYPE_DDDOC, FILETYPE_OTHER}, depending
on the extension of filename.
"""
# Get file extension.
base, ext = os.path.splitext(filename)
if ext[1:] in CPP_EXTS:
return FILETYPE_CPP
elif ext[1:] in DDDOC_EXTS:
return FILETYPE_DDDOC
else:
return FILETYPE_OTHER
def _loadFile(self, filename):
"""Load the file with the given filename.
The line is then split into DDDoc entries, unwrapping entries that span
more than one line. Finally, the keys are expanded, and surrounding
whitespace is stripped.
"""
## print filename
# Load file contents, through a cache.
file_type = self._getFileType(filename)
if file_type == FILETYPE_CPP:
text = self._loadCPPFile(filename, self.cache)
elif file_type == FILETYPE_DDDOC:
text = self._loadDDDOCFile(filename, self.cache)
else:
raise Error("Unknown file type of file %s." % path)
text.append('.')
## print 'LOADING', filename
## print '\n'.join(text)
# Process all lines in the input, join lines that do not begin with a
# dot with the previous ones. This allows the wrapping of lines.
str = False
dddoc_entries = [] # [(path, filename, begin line no, end line no)]
line_no_begin, line_no_end = 1, 1
for line in text:
## if line and line != '.':
## print 'LINE', line
line_no_end += 1
if not line:
continue
if line[0] == '.':
if str is not False and str[0] == '.' and str != '.' and str.strip(): # Skip empty dummy lines.
dddoc_entries.append([str, filename, line_no_begin, line_no_end])
## print dddoc_entries[-1]
line_no_begin = line_no_end
str = line
if str == '.':
str = False
elif str:
if str[-1] != '\n':
str += '\n'
str += line
# Now, expand the keys of dddoc_entries, e.g. dddoc_entries[i][0].
# TODO(holtgrew): Consider escaping of keys here.
stack = []
stack_len_sum = 0
for entry in dddoc_entries:
## print 'ENTRY', entry
## print 'stack=%s' % (stack)
# Split out $key:$value of the entry and $the.$path.$elements from $key.
maybe_pair = splitKeys(entry[0].strip(), ':', 1)
if len(maybe_pair) == 2:
key, value = splitKeys(entry[0].strip(), ':', 1)
else:
key, value = entry[0].strip(), ''
path = splitKeys(key, '.')[1:]
# Count empty entries in the path.
## print ' ', path
empty_count = reduce(operator.add, [1 for x in path if not x], 0)
## print ' empty_count', empty_count
if empty_count <= len(stack):
stack = stack[:empty_count]
stack_len_sum = reduce(operator.add, map(len, stack), 0)
stack.append(path[empty_count:])
stack_len_sum += len(stack[-1])
path = reduce(operator.add, stack, [])
# Remove any leading and trailing whitespace from value and compute
# updated begin and end line no.
line_count = len(value.splitlines())
value_no_leading = value.lstrip()
line_count2 = len(value_no_leading.splitlines())
line_no_begin = entry[2] + line_count - line_count2
value_no_trailing = value_no_leading.rstrip()
line_count3 = len(value_no_trailing.splitlines())
line_no_end = entry[3] - line_count2 + line_count3
# Store the DDDoc entry.
if path:
self.entries.append(DddocEntry(cleanPath(path), value_no_trailing, filename, line_no_begin, line_no_end))
new_entries = transformDddocEntry(self.entries[-1])
## if new_entries:
## print 'NEW ENTRIES', new_entries
self.entries += new_entries
## print self.entries[-1]
def run(self, search_path):
"""Call parseFile() on files.
All files below search_path will be searched that have file type
FILETYPE_CPP or FILETYPE_DOC as determined by getFileType().
Directories with names of IGNORED_DIRS are skipped.
Args:
search_path String, path to search files under.
"""
for root, dirs, files in os.walk(search_path):
# Parse all files.
for file in files:
if os.path.basename(file).startswith('.'):
continue # Skipp hidden files.
path = os.path.join(root, file)
if self._getFileType(path) in [FILETYPE_CPP, FILETYPE_DDDOC]:
self._loadFile(path)
# Exclude ignored diretories.
for ignored in IGNORED_DIRS:
if ignored in dirs:
dirs.remove(ignored)
class DddocTreeNode(object):
"""Represents one entry in the DddocTree.
Attrs:
tree The DddocTree that the node belongs to.
key The key of this child, last element of path.
path The full path to the child.
entry Range [beg, end) of DddocEntry that this node represents.
children dict with the children as key/value pairs.
texts Array of strings with the texts.
"""
def __init__(self, tree, key, path, entry, children={}):
self.tree = tree
self.key = key
self.path = path
self.entry = entry
self.children = children
self.texts = []
def text(self, spacer=' '):
return spacer.join(self.texts)
def __str__(self):
"""Returns dump for the whole tree in a user-readable manner."""
def _str(node, level=0, prefix=''):
space = ' ' * level
if prefix:
prefix = prefix + ' --> '
res = '%s %sDddocTreeNode(key=%s, texts=%s)' % (space, prefix, repr(node.key), repr(node.texts))
for k, child in node.children.iteritems():
res += '\n' + _str(child, level + 1, k)
return res
return _str(self)
def dump(self, stream=sys.stdout):
"""Debug recursive dumping of a tree node."""
print >>stream, self
class DddocTree(object):
"""Tree with the information from the DDDoc contents.
Attrs:
entries The raw entries.
root The root DddocTreeNode.
glossary_nodes List of nodes that contain glossary entries. Built
in finalize().
"""
def __init__(self, entries):
self.entries = entries
#for e in self.entries:
# print e
self.root = DddocTreeNode(self, 'ROOT', [], (0, 0), self._buildSubtree([], 0, len(entries), 0))
self.cache = None
self.glossary_nodes = []
## self.root.dump()
## for entry in self.entries:
## print entry.path, entry.content
def _enableFindCache(self):
if self.cache is None:
self.cache = {}
def finalize(self):
"""Called after tree will not be modified any more.
Enables caching and builds some indices.
"""
self._enableFindCache()
print >>sys.stderr, 'Indexing Glossary Pages'
if 'Page' in self.root.children:
for key, node in self.root.children['Page'].children.iteritems():
if 'glossary' in node.children:
self.glossary_nodes.append(node.children['glossary'])
print >>sys.stderr, ' Found Page.%s' % node.key
def _buildSubtree(self, path, begin_index, end_index, level):
# First, identify the entries belonging to each node (entry.path[i] are
# equal for i = level, inductively, also i <= level).
prev_key = None
prev_beg = None
subseqs = []
for i in range(begin_index, end_index):
if prev_key != self.entries[i].path[level]:
if prev_key != None:
subseqs.append((prev_beg, i))
prev_key = self.entries[i].path[level]
prev_beg = i
if prev_key != None and prev_beg != end_index: # Handle last.
subseqs.append((prev_beg, end_index))
# Now, subseqs contains a sequence of contiguous half-open intervals.
# Each contains the data for one tree node. There is a possibly empty
# sequence of leading entries with paths of length level + 1 containing
# the data for the current level node. The rest is for the level below.
result = {}
for (b, c) in subseqs:
assert b != c
# Split into entries for this and for next level: [a, b); [b, c).
a = b # [a, b) will be for this vertex.
while b < c and len(self.entries[b].path) == level + 1:
b += 1
# Compute node.
path = self.entries[a].path[:(level + 1)]
key = path[level]
node = DddocTreeNode(self, key, path, (a, b))
## print 'new node', key
for i in range(a, b):
if self.entries[i].content:
node.texts.append(self.entries[i].content)
# Compute subtree.
node.children = self._buildSubtree(path, b, c, level + 1)
result[key] = node
return result
def find(self, path):
"""Query tree for a DddocTreeNode.
The argument path can either be a dot-separated string or a list with
this information. If path is a string then one optional leading dot is
optional. Returns None if nothing could be found.
tree.find(['path', 'to', 'node'])
tree.find('path.to.node')
tree.find('.path.to.node')
"""
## print 'FIND(%s)' % repr(path)
# Try to retrieve from cache if there is a cache.
if not self.cache is None:
if not type(path) is str:
key = '.'.join(path)
else:
key = path
if self.cache.has_key(key):
return self.cache[key]
# Split path if is string, ignore leading dot if any.
if type(path) is str:
path = splitKeys(path, '.')
if path and path[0] == '':
path = path[1:]
# Now, query the tree.
def findRecurse(node, path):
"""Helper function that searches for the node with given path."""
if not path:
return node
if not node.children.has_key(path[0]):
return None
return findRecurse(node.children[path[0]], path[1:])
res = findRecurse(self.root, path)
if not self.cache is None:
self.cache['.'.join(path)] = res
return res
# Paths where the inline summary is moved into a .summary child. See
# documentation of processInlineSummaries() for details.
SUMMARY_PATHS = [
'*.*.param.*',
'*.*.returns',
'*.*.tag.*',
'*.*.value.*',
'*.*.returns.param.*', # TODO(holtgrew): Used for metafunctions, could be improved.
'Adaption.*',
'Class.*',
'Concept.*',
'Demo.*',
'Enum.*',
'Function.*',
'Macro.*',
'Memfunc.*',
'Metafunction.*',
'Shortcut.*',
'Spec.*',
'Tag.*',
]
# TODO(holtgrew): Also use for generateAutomaticReferences()
def _matchTreesInNode(tree, node, path, func, block_paths=[['globals']], level=0):
"""Calls func on nodes matching path."""
## print ' ' * level, '_matchTreesInNode(tree', node.path, path, func, level, ')'
if path:
if path[0] == '*':
for child in node.children.itervalues():
_matchTreesInNode(tree, child, path[1:], func, block_paths, level+1)
else:
if node.children.has_key(path[0]):
_matchTreesInNode(tree, node.children[path[0]], path[1:], func, block_paths, level+1)
else:
for block_path in block_paths:
## print node.path[:len(block_path)], '==', block_path
if node.path[:len(block_path)] == block_path:
return # Path is blocked.
func(node)
def processInlineSummaries(tree, paths):
"""Move inline documentation to .summary subpaths.
The nodes matching the values in path are such that inline values are moved
to .summary subnodes for greater consistency and lower variance.
E.g. the following:
.Function.f.param.x:This is param x.
will be transformed into
.Function.f.param.x
..summary:This is param x.
"""
# First, collect nodes for the summary transfer.
collected_nodes = set()
def f(node):
if node.texts:
collected_nodes.add(node)
for path in paths:
_matchTreesInNode(tree, tree.root, splitKeys(path, '.'), f)
# Then, move the inline summaries into a summary node.
for node in collected_nodes:
if not 'summary' in node.children: # Create node if necessary.
summaryNode = DddocTreeNode(tree, 'summary', node.path + ['summary'], (-2,-2))
node.children['summary'] = summaryNode
node.children['summary'].texts += node.texts
node.texts = []
def generateAutomaticReferences(tree):
"""Interpret the globals.relations entries."""
print >>sys.stderr, 'Generating Automatic References'
relations_node = tree.find('globals.relations')
if not relations_node:
return # Empty, do nothing.
# We first collect all automatic links, scheduled to be added later.
additions = []
def appendToAdditions(node):
for node_path in node.texts:
node_path = splitKeys(node_path, '.')
## print ' ' * level, ' ', node_path
res = tree.find(node_path)
## print ' ' * level, ' ', res is not None
if not res:
continue # Not found, Skip # TODO(holtgrew): Warning?
additions.append((res.path + [key], '.'.join(node.path[:2])))
for key, node in relations_node.children.iteritems():
## print 'GENERATE', key, node
for txt in node.texts:
path = splitKeys(txt, '.')
_matchTreesInNode(tree, tree.root, splitKeys(txt, '.'), appendToAdditions)
# Now, add these additions. This circumvents problems leading to infinite
# recursions.
for path, text in additions:
res = tree.find(path)
if not res:
parent = tree.find(path[:-1])
assert parent
res = DddocTreeNode(tree, path[-1], path, None)
parent.children[path[-1]] = res
if not text in res.texts:
res.texts.append(text)
def generateInheritedElements(tree):
"""Push through inheritances."""
print >>sys.stderr, 'Linking Inherited Entities'
inherit_node = tree.find('globals.inherit')
# Contains children: $TARGET_FIELD:$THROUGH_FIELD.$SOURCE_FIELD
all_paths = set()
depends_on = {}
inheritance_rules = []
# First build a dependency graph.
for target_field, child in inherit_node.children.items():
for txt in child.texts:
arr = splitKeys(txt, '.')
through_field = arr[0]
if len(arr) > 1:
source_field = arr[1]
else:
source_field = target_field
inheritance_rules.append((target_field, through_field, source_field))
def registerDependencies(node):
all_paths.add('.'.join(node.path))
if not through_field in node.children:
return
for path in node.children[through_field].texts:
pth = '.'.join(node.path)
depends_on.setdefault(pth, set()).add(path)
_matchTreesInNode(tree, tree.root, ['*', '*'], registerDependencies)
## print 'ALL PATHS', all_paths
# Now, push through references by inheritance for all paths that are not
# linked to and not completed yet.
done = set()
to_do = all_paths - done - set(depends_on.keys())
while to_do:
# Process all that are not completed and have no dependencies.
if not to_do:
raise Exception('Could not process all dependencies. Cyclic dependencies?')
# Actually perform the preprocessing.
for target_path in to_do:
for target_field, through_field, source_field in inheritance_rules:
target_node = tree.find(target_path)
if not through_field in target_node.children:
continue # Skip if no source children.
## print 'TRYING', target_path, through_field, source_field
for source_path in target_node.children[through_field].texts:
source_node = tree.find(source_path)
if not source_field in source_node.children:
continue # Skip if no source field.
for path in source_node.children[source_field].texts:
if not '\u0001' in path: # We use this ugly hack to add the inheritance source here.
path = path + '\u0001' + '.'.join(source_node.path)
# If necessary then create child in target node.
if not target_field in target_node.children:
target_node.children[target_field] = DddocTreeNode(tree, target_field, target_node.path + [target_field], source_node.children[source_field].entry)
# Copy over path.
target_node.children[target_field].texts.append(path)
## print ' appending', path
# Clear out the stuff that we completed.
to_delete = []
for key in depends_on: # Clear out all done.
depends_on[key] -= to_do
if not depends_on[key]:
to_delete.append(key)
for key in to_delete:
del depends_on[key]
done |= to_do # Add done.
to_do = all_paths - done - set(depends_on.keys())
def removeDuplicateTexts(tree):
"""Remove duplicates from texts members.
Suffixes starting with '\u0001' are ignored for the comparisons
and strings with these suffixes are preferred.
"""
##print 'remove duplicates'
def recurse(node):
in_cleaned = {}
cleaned = []
for txt in node.texts:
clean = txt
pos = txt.find('\u0001')
if pos != -1:
clean = txt[:pos]
##print cleaned, repr(clean)
if clean in in_cleaned:
if '\u0001' in clean and not '\u0001' in cleaned[in_cleaned[clean]]:
cleaned[in_cleaned[clean]] = txt
else:
in_cleaned[clean] = len(cleaned)
cleaned.append(txt)
node.texts = cleaned
for child in node.children.itervalues():
recurse(child)
for child in tree.root.children.itervalues():
recurse(child)
# TODO(holtgrew): If needed, this could easily be generalized.
def buildByTypeAndCatIndex(tree):
"""Build an index into the given DddocTree.
The index will be a two-dimensional dict, mapping (first element of path,
value of cat field) to a list of nodes in the DddocTree.
"""
result = {}
def recurse(result, path, node):
## print path, node.path
if len(path) == 2:
if node.children.has_key('cat'):
for cat in node.children['cat'].texts:
result.setdefault(path[0], {}).setdefault(cat, []).append(node)
else:
result.setdefault(path[0], {})[path[1]] = node
if len(path) < 2:
for key, child in node.children.iteritems():
recurse(result, path + [key], child)
for key, child in tree.root.children.iteritems():
recurse(result, [key], child)
## for k1, v1 in result.iteritems():
## for k2, v2 in v1.iteritems():
## print 'k1=%s\tk2=%s\tv=%s' % (k1, k2, [x.path for x in v2])
return result
class ErrorLogger(object):
def __init__(self):
self.error_count = 0
def invalidReference(self, txt, locations):
self.error_count += 1
if not locations:
print >>sys.stderr, 'ERROR: Invalid Reference %s in unknown location (sorry).' % txt
else:
print >>sys.stderr, 'ERROR: Invalid Reference %s in one of the following locations:' % txt
for filename, line in locations:
print >>sys.stderr, ' %s:%s' % (filename, line)
class App(object):
"""Application object for DDDoc.
Provides a facade to the functionality of the core module.
Usage:
app = App()
app.loadFiles([<files>])
app.loadFiles([<files>])
app.loadingComplete()
Attrs:
data The global state Data object.
"""
def __init__(self):
"""Initialize object members."""
self.cache = FileCache('dddoc_cache.bin')
self.file_loader = FileLoader(self.cache)
self.dddoc_tree = None
self.error_logger = ErrorLogger()
def loadFiles(self, path):
"""Load the files with the given file name."""
self.file_loader.run(path)
def loadingComplete(self):
"""Initialize data object.
This method is called after all calls to loadFiles().
"""
# Save the cache to disk again.
self.cache.flush()
# Sort Dddoc Entries and build tree.
self.file_loader.entries.sort(cmp=DddocEntry.cmpPathLocation)
self.dddoc_tree = DddocTree(self.file_loader.entries)
# Generate automatic references.
generateAutomaticReferences(self.dddoc_tree)
# Perform inheritance as configured in global configuration.
generateInheritedElements(self.dddoc_tree)
# Clean duplicates from 'texts' members
removeDuplicateTexts(self.dddoc_tree)
# Move inline summaries into .summary children.
processInlineSummaries(self.dddoc_tree, SUMMARY_PATHS)
# Finally, after all modifications, enable caching and build indices in
# tree.
self.dddoc_tree.finalize()
def getNextId(self):
"""Returns an identifier.
Each id is only returned once.
"""
assert False, "For future use."
self.next_id += 1
return self.next_id - 1
| gpl-3.0 | -5,976,380,026,296,472,000 | 34.828654 | 175 | 0.538921 | false |
bmhatfield/Diamond | src/collectors/slony/test/testslony.py | 31 | 3625 | #!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import run_only
from mock import patch
from slony import SlonyCollector
def run_only_if_psycopg2_is_available(func):
try:
import psycopg2
except ImportError:
psycopg2 = None
pred = lambda: psycopg2 is not None
return run_only(func, pred)
class TestSlonyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SlonyCollector', {})
self.collector = SlonyCollector(config, None)
def test_import(self):
self.assertTrue(SlonyCollector)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_default(self, publish, _get_stats_by_database):
_get_stats_by_database.return_value = [('foo', 7)]
self.collector.collect()
_get_stats_by_database.assert_called_with(
'localhost',
5432,
'postgres',
'postgres',
'postgres',
'_postgres',
'Node [0-9]+ - postgres@localhost',
)
self.assertPublished(publish, 'foo', 7)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_instances(self, publish, _get_stats_by_database):
def side_effect(host, port, user, pwd, slony_db, slony_schema, node):
if (slony_db, slony_schema) == ('postgres', '_postgres'):
return [('foo', 7)]
elif (slony_db, slony_schema) == ('data', '_data'):
return [('bar', 14)]
_get_stats_by_database.side_effect = side_effect
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
self.assertPublished(publish, 'foo', 7)
self.assertPublished(publish, 'bar', 14)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
def test_override_user_password_nodestr(self, _get_stats_by_database):
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
'user': 'postgres',
'password': 'postgres',
'slony_node_string': '(.*)',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
'user': 'data',
'password': 'data',
'slony_node_string': 'Node (.*)',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'postgres', 'postgres',
'postgres', '_postgres', '(.*)'
)
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'data', 'data',
'data', '_data', 'Node (.*)'
)
| mit | -5,769,736,619,534,606,000 | 30.798246 | 79 | 0.510069 | false |
operasoftware/presto-testo | wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/demo/streaming/frame_based_server.py | 4 | 1803 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
class FrameBasedHashServerProtocol(WebSocketServerProtocol):
"""
Frame-based WebSockets server that computes a running SHA-256 for message data
received. It will respond after every frame received with the digest computed
up to that point. It can receive messages of unlimited number of frames.
Digest is reset upon new message.
"""
def onMessageBegin(self, opcode):
self.sha256 = hashlib.sha256()
def onMessageFrame(self, frame, reserved):
self.sha256.update(str(frame))
digest = self.sha256.hexdigest()
self.sendMessage(digest)
print "Sent digest for frame: %s" % digest
def onMessageEnd(self):
pass
if __name__ == '__main__':
factory = WebSocketServerFactory("ws://localhost:9000")
factory.protocol = FrameBasedHashServerProtocol
listenWS(factory)
reactor.run()
| bsd-3-clause | 5,910,651,103,912,731,000 | 34.795918 | 88 | 0.6467 | false |
gsssrao/MozDef | docker/conf/config.py | 14 | 1961 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez [email protected]
from celery.schedules import crontab, timedelta
import time
import logging
ALERTS = {
'bro_intel.AlertBroIntel': {'schedule': crontab(minute='*/1')},
'bro_notice.AlertBroNotice': {'schedule': crontab(minute='*/1')},
'bruteforce_ssh.AlertBruteforceSsh': {'schedule': crontab(minute='*/1')},
'cloudtrail.AlertCloudtrail': {'schedule': crontab(minute='*/1')},
'fail2ban.AlertFail2ban': {'schedule': crontab(minute='*/1')},
}
RABBITMQ = {
'mqserver': 'localhost',
'mquser': 'guest',
'mqpassword': 'guest',
'mqport': 5672,
'alertexchange': 'alerts',
'alertqueue': 'mozdef.alert'
}
ES = {
'servers': ['http://localhost:9200']
}
OPTIONS = {
'defaulttimezone': 'UTC',
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s',
'datefmt': '%y %b %d, %H:%M:%S',
},
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'celery': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'celery.log',
'formatter': 'standard',
'maxBytes': 1024 * 1024 * 100, # 100 mb
},
},
'loggers': {
'celery': {
'handlers': ['celery', 'console'],
'level': 'DEBUG',
},
}
}
logging.Formatter.converter = time.gmtime
| mpl-2.0 | -3,990,829,244,373,288,000 | 25.146667 | 97 | 0.559408 | false |
janeyuhui/volatilitux | core/config.py | 8 | 4033 |
from .kernel_struct import *
from .raw_dump import *
from .fingerprint import *
from xml.dom.minidom import parse, Document
import os.path
CONFIG_VERSION = 1.0
class Config:
init_task = None
arch = None
offsets = {}
dumpfile = None
debug = False
@classmethod
def setDebug(cls, debug=False):
cls.debug = debug
@classmethod
def setDumpFile(cls, file):
RawDump.setFile(file)
dumpfile = file
@classmethod
def fingerprint(cls, dumpConfig=None):
"""
Fingerprint the given raw memory dump. Save the configuation in the a file (dumpConfig) if needed.
"""
res = Fingerprint(cls.dumpfile, cls.debug)
if(not res.valid):
raise Exception("Unable to fingerprint the given dumpfile. Please use a configuration file.")
cls.setArch(res.arch_name)
cls.init_task = res.init_task
for struct_name, offsets in res.offsets.items():
for field_name, offset in offsets.items():
cls.setOffset(struct_name, field_name, offset)
# Save config
if(dumpConfig is not None):
if os.path.exists(dumpConfig):
raise Exception("The provided filename already exists.")
f = open(dumpConfig, "w")
doc = Document()
config = doc.createElement("config")
config.setAttribute("version", str(CONFIG_VERSION))
doc.appendChild(config)
# init_task address
init_task = doc.createElement("init_task")
init_task.setAttribute("address", "0x%08x" % cls.init_task)
config.appendChild(init_task)
# architecture
arch = doc.createElement("arch")
arch.setAttribute("name", res.arch_name)
config.appendChild(arch)
# structures
for struct_name, offsets in res.offsets.items():
struct = doc.createElement("struct")
struct.setAttribute("name", struct_name)
for field_name, offset in offsets.items():
field = doc.createElement("field")
field.setAttribute("name", field_name)
field.setAttribute("offset", str(offset))
struct.appendChild(field)
config.appendChild(struct)
# Generate XML and write it
xml = doc.toprettyxml(indent=" ")
f.write(xml)
f.close()
print "Configuration exported to %s." % dumpConfig
@classmethod
def setArch(cls, arch):
try:
cls.arch = __import__('mm.arch.'+arch, globals(), locals(), 'va_to_pa')
except:
raise Exception("Invalid arch specified.")
@classmethod
def setOffset(cls, struct_name, field_name, field_offset):
cls_obj = STRUCTS[struct_name]
cls_obj.setFieldOffset(field_name, field_offset)
@classmethod
def setConfigFile(cls, file):
# parse the file
o = parse(file)
# Get init_task addr and the architecure
res = o.getElementsByTagName("init_task")
if(len(res) == 0):
raise Exception("No init_task tag found in config file")
if(not res[0].getAttribute("address")):
raise Exception("Missing address field in init_task tag")
cls.init_task = int(res[0].getAttribute("address"), 16)
res = o.getElementsByTagName("arch")
if(len(res) == 0):
raise Exception("No arch tag found in config file")
cls.setArch(res[0].getAttribute("name"))
# Get the structure field offsets
for s in o.getElementsByTagName("struct"):
struct_name = s.getAttribute("name")
if not struct_name in STRUCTS:
raise Exception("Invalid struct name '%s'" % struct_name)
cls = STRUCTS[struct_name]
for f in s.getElementsByTagName("field"):
field_name = f.getAttribute("name")
field_offset = int(f.getAttribute("offset"))
if not field_name in cls.fields_classes:
raise Exception("Invalid field %s' for struct '%s'" % (field_name, struct_name))
cls.setFieldOffset(field_name, field_offset)
| gpl-2.0 | -3,633,211,295,116,639,000 | 26.073826 | 102 | 0.617158 | false |
tquizzle/Sick-Beard | sickbeard/clients/transmission.py | 30 | 4145 | # Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import json
from base64 import b64encode
import sickbeard
from sickbeard.clients.generic import GenericClient
class TransmissionAPI(GenericClient):
def __init__(self, host=None, username=None, password=None, custom_url=None): #TODO : plug that custom_url argument to live data
super(TransmissionAPI, self).__init__('Transmission', host, username, password)
self.custom_url = sickbeard.TORRENT_CUSTOM_URL if custom_url is None else custom_url
if self.custom_url:
self.url = self.host + 'rpc'
else:
self.url = self.host + 'transmission/rpc'
def _get_auth(self):
post_data = json.dumps({'method': 'session-get',})
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'))
self.auth = re.search('X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1)
except:
return None
self.session.headers.update({'x-transmission-session-id': self.auth})
#Validating Transmission authorization
post_data = json.dumps({'arguments': {},
'method': 'session-get',
})
self._request(method='post', data=post_data)
return self.auth
def _add_torrent_uri(self, result):
arguments = { 'filename': result.url,
'paused': 1 if sickbeard.TORRENT_PAUSED else 0,
'download-dir': sickbeard.TORRENT_PATH
}
post_data = json.dumps({ 'arguments': arguments,
'method': 'torrent-add',
})
self._request(method='post', data=post_data)
return self.response.json['result'] == "success"
def _add_torrent_file(self, result):
arguments = { 'metainfo': b64encode(result.content),
'paused': 1 if sickbeard.TORRENT_PAUSED else 0,
'download-dir': sickbeard.TORRENT_PATH
}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add',
})
self._request(method='post', data=post_data)
return self.response.json['result'] == "success"
def _set_torrent_ratio(self, result):
torrent_id = self._get_torrent_hash(result)
if sickbeard.TORRENT_RATIO == '':
# Use global settings
ratio = None
mode = 0
elif float(sickbeard.TORRENT_RATIO) == 0:
ratio = 0
mode = 2
elif float(sickbeard.TORRENT_RATIO) > 0:
ratio = float(sickbeard.TORRENT_RATIO)
mode = 1 # Stop seeding at seedRatioLimit
arguments = { 'ids': [torrent_id],
'seedRatioLimit': ratio,
'seedRatioMode': mode
}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set',
})
self._request(method='post', data=post_data)
return self.response.json['result'] == "success"
api = TransmissionAPI()
| gpl-3.0 | 8,628,570,561,661,526,000 | 36.342342 | 132 | 0.552473 | false |
kevclarx/ansible | lib/ansible/modules/packaging/os/openbsd_pkg.py | 50 | 26182 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrik Lundin <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openbsd_pkg
author: "Patrik Lundin (@eest)"
version_added: "1.1"
short_description: Manage packages on OpenBSD.
description:
- Manage packages on OpenBSD using the pkg tools.
requirements: [ "python >= 2.5" ]
options:
name:
required: true
description:
- Name of the package.
state:
required: true
choices: [ present, latest, absent ]
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
build:
required: false
choices: [ yes, no ]
default: no
description:
- Build the package from source instead of downloading and installing
a binary. Requires that the port source tree is already installed.
Automatically builds and installs the 'sqlports' package, if it is
not already installed.
version_added: "2.1"
ports_dir:
required: false
default: /usr/ports
description:
- When used in combination with the 'build' option, allows overriding
the default ports source directory.
version_added: "2.1"
clean:
required: false
choices: [ yes, no ]
default: no
description:
- When updating or removing packages, delete the extra configuration
file(s) in the old packages which are annotated with @extra in
the packaging-list.
version_added: "2.3"
quick:
required: false
choices: [ yes, no ]
default: no
description:
- Replace or delete packages quickly; do not bother with checksums
before removing normal files.
version_added: "2.3"
'''
EXAMPLES = '''
# Make sure nmap is installed
- openbsd_pkg:
name: nmap
state: present
# Make sure nmap is the latest version
- openbsd_pkg:
name: nmap
state: latest
# Make sure nmap is not installed
- openbsd_pkg:
name: nmap
state: absent
# Make sure nmap is installed, build it from source if it is not
- openbsd_pkg:
name: nmap
state: present
build: yes
# Specify a pkg flavour with '--'
- openbsd_pkg:
name: vim--no_x11
state: present
# Specify the default flavour to avoid ambiguity errors
- openbsd_pkg:
name: vim--
state: present
# Specify a package branch (requires at least OpenBSD 6.0)
- openbsd_pkg:
name: python%3.5
state: present
# Update all packages on the system
- openbsd_pkg:
name: '*'
state: latest
# Purge a package and it's configuration files
- openbsd_pkg: name=mpd clean=yes state=absent
# Quickly remove a package without checking checksums
- openbsd_pkg: name=qt5 quick=yes state=absent
'''
import os
import platform
import re
import shlex
import sqlite3
from distutils.version import StrictVersion
# Function used for executing commands.
def execute_command(cmd, module):
# Break command line into arguments.
# This makes run_command() use shell=False which we need to not cause shell
# expansion of special characters like '*'.
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
# Function used to find out if a package is currently installed.
def get_package_state(names, pkg_spec, module):
info_cmd = 'pkg_info -Iq'
for name in names:
command = "%s inst:%s" % (info_cmd, name)
rc, stdout, stderr = execute_command(command, module)
if stderr:
module.fail_json(msg="failed in get_package_state(): " + stderr)
if stdout:
# If the requested package name is just a stem, like "python", we may
# find multiple packages with that name.
pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
pkg_spec[name]['installed_state'] = True
else:
pkg_spec[name]['installed_state'] = False
# Function used to make sure a package is present.
def package_present(names, pkg_spec, module):
build = module.params['build']
for name in names:
# It is possible package_present() has been called from package_latest().
# In that case we do not want to operate on the whole list of names,
# only the leftovers.
if pkg_spec['package_latest_leftovers']:
if name not in pkg_spec['package_latest_leftovers']:
module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
continue
else:
module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
if build is True:
port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
if os.path.isdir(port_dir):
if pkg_spec[name]['flavor']:
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
elif pkg_spec[name]['subpackage']:
install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
pkg_spec[name]['subpackage'])
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
else:
module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
else:
install_cmd = 'pkg_add -Im'
if pkg_spec[name]['installed_state'] is False:
# Attempt to install the package
if build is True and not module.check_mode:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
else:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
# a package is found and 1 when it is not. If a version is not
# supplied the tool will exit 0 in both cases.
#
# It is important to note that "version" relates to the
# packages-specs(7) notion of a version. If using the branch syntax
# (like "python%3.5") the version number is considered part of the
# stem, and the pkg_add behavior behaves the same as if the name did
# not contain a version (which it strictly speaking does not).
if pkg_spec[name]['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code for name '%s'" % name)
if pkg_spec[name]['rc']:
pkg_spec[name]['changed'] = False
else:
# Depend on stderr instead.
module.debug("package_present(): depending on stderr for name '%s'" % name)
if pkg_spec[name]['stderr']:
# There is a corner case where having an empty directory in
# installpath prior to the right location will result in a
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
if pkg_spec[name]['style'] == 'branch':
match = re.search("\W%s-[^:]+: ok\W" % pkg_spec[name]['pkgname'], pkg_spec[name]['stdout'])
else:
match = re.search("\W%s-[^:]+: ok\W" % name, pkg_spec[name]['stdout'])
if match:
# It turns out we were able to install the package.
module.debug("package_present(): we were able to install package for name '%s'" % name)
else:
# We really did fail, fake the return code.
module.debug("package_present(): we really did fail for name '%s'" % name)
pkg_spec[name]['rc'] = 1
pkg_spec[name]['changed'] = False
else:
module.debug("package_present(): stderr was not set for name '%s'" % name)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to make sure a package is the latest available version.
def package_latest(names, pkg_spec, module):
if module.params['build'] is True:
module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
upgrade_cmd = 'pkg_add -um'
if module.check_mode:
upgrade_cmd += 'n'
if module.params['clean']:
upgrade_cmd += 'c'
if module.params['quick']:
upgrade_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to upgrade the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search("\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
pkg_spec[name]['changed'] = True
break
# FIXME: This part is problematic. Based on the issues mentioned (and
# handled) in package_present() it is not safe to blindly trust stderr
# as an indicator that the command failed, and in the case with
# empty installpath directories this will break.
#
# For now keep this safeguard here, but ignore it if we managed to
# parse out a successful update above. This way we will report a
# successful run when we actually modify something but fail
# otherwise.
if pkg_spec[name]['changed'] is not True:
if pkg_spec[name]['stderr']:
pkg_spec[name]['rc'] = 1
else:
# Note packages that need to be handled by package_present
module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
pkg_spec['package_latest_leftovers'].append(name)
# If there were any packages that were not installed we call
# package_present() which will handle those.
if pkg_spec['package_latest_leftovers']:
module.debug("package_latest(): calling package_present() to handle leftovers")
package_present(names, pkg_spec, module)
# Function used to make sure a package is not installed.
def package_absent(names, pkg_spec, module):
remove_cmd = 'pkg_delete -I'
if module.check_mode:
remove_cmd += 'n'
if module.params['clean']:
remove_cmd += 'c'
if module.params['quick']:
remove_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to remove the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['changed'] = False
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to parse the package name based on packages-specs(7).
# The general name structure is "stem-version[-flavors]".
#
# Names containing "%" are a special variation not part of the
# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
# description.
def parse_package_name(names, pkg_spec, module):
# Initialize empty list of package_latest() leftovers.
pkg_spec['package_latest_leftovers'] = []
for name in names:
module.debug("parse_package_name(): parsing name: %s" % name)
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
module.fail_json(msg="package name both has a version and is version-less: " + name)
# All information for a given name is kept in the pkg_spec keyed by that name.
pkg_spec[name] = {}
# If name includes a version.
if version_match:
match = re.search("^(?P<stem>.*)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = match.group('version')
pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['style'] = 'version'
else:
module.fail_json(msg="unable to parse package name at version_match: " + name)
# If name includes no version but is version-less ("--").
elif versionless_match:
match = re.search("^(?P<stem>.*)--(?P<flavor>[a-z].*)?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = '-'
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['style'] = 'versionless'
else:
module.fail_json(msg="unable to parse package name at versionless_match: " + name)
# If name includes no version, and is not version-less, it is all a stem.
else:
match = re.search("^(?P<stem>.*)$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = None
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = None
pkg_spec[name]['flavor'] = None
pkg_spec[name]['style'] = 'stem'
else:
module.fail_json(msg="unable to parse package name at else: " + name)
# If the stem contains an "%" then it needs special treatment.
branch_match = re.search("%", pkg_spec[name]['stem'])
if branch_match:
branch_release = "6.0"
if version_match or versionless_match:
module.fail_json(msg="package name using 'branch' syntax also has a version or is version-less: " + name)
if StrictVersion(platform.release()) < StrictVersion(branch_release):
module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
pkg_spec[name]['style'] = 'branch'
# Key names from description in pkg_add(1).
pkg_spec[name]['pkgname'] = pkg_spec[name]['stem'].split('%')[0]
pkg_spec[name]['branch'] = pkg_spec[name]['stem'].split('%')[1]
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec[name]['flavor']:
match = re.search("-$", pkg_spec[name]['flavor'])
if match:
module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
# Function used for figuring out the port path.
def get_package_source_path(name, pkg_spec, module):
pkg_spec[name]['subpackage'] = None
if pkg_spec[name]['stem'] == 'sqlports':
return 'databases/sqlports'
else:
# try for an exact match first
sqlports_db_file = '/usr/local/share/sqlports'
if not os.path.isfile(sqlports_db_file):
module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
conn = sqlite3.connect(sqlports_db_file)
first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
query = first_part_of_query + ' = ?'
module.debug("package_package_source_path(): exact query: %s" % query)
cursor = conn.execute(query, (name,))
results = cursor.fetchall()
# next, try for a fuzzier match
if len(results) < 1:
looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
query = first_part_of_query + ' LIKE ?'
if pkg_spec[name]['flavor']:
looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
cursor = conn.execute(query, (looking_for,))
elif pkg_spec[name]['style'] == 'versionless':
query += ' AND fullpkgname NOT LIKE ?'
module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
else:
module.debug("package_package_source_path(): fuzzy query: %s" % query)
cursor = conn.execute(query, (looking_for,))
results = cursor.fetchall()
# error if we don't find exactly 1 match
conn.close()
if len(results) < 1:
module.fail_json(msg="could not find a port by the name '%s'" % name)
if len(results) > 1:
matches = map(lambda x:x[1], results)
module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
# there's exactly 1 match, so figure out the subpackage, if any, then return
fullpkgpath = results[0][0]
parts = fullpkgpath.split(',')
if len(parts) > 1 and parts[1][0] == '-':
pkg_spec[name]['subpackage'] = parts[1]
return parts[0]
# Function used for upgrading all installed packages.
def upgrade_packages(pkg_spec, module):
if module.check_mode:
upgrade_cmd = 'pkg_add -Imnu'
else:
upgrade_cmd = 'pkg_add -Imu'
# Create a minimal pkg_spec entry for '*' to store return values.
pkg_spec['*'] = {}
# Attempt to upgrade all packages.
pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
# Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok".
match = re.search("\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
if match:
pkg_spec['*']['changed'] = True
else:
pkg_spec['*']['changed'] = False
# It seems we can not trust the return value, so depend on the presence of
# stderr to know if something failed.
if pkg_spec['*']['stderr']:
pkg_spec['*']['rc'] = 1
else:
pkg_spec['*']['rc'] = 0
# ===========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='list'),
state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
build = dict(default='no', type='bool'),
ports_dir = dict(default='/usr/ports'),
quick = dict(default='no', type='bool'),
clean = dict(default='no', type='bool')
),
supports_check_mode = True
)
name = module.params['name']
state = module.params['state']
build = module.params['build']
ports_dir = module.params['ports_dir']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
result['build'] = build
# The data structure used to keep track of package information.
pkg_spec = {}
if build is True:
if not os.path.isdir(ports_dir):
module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
# build sqlports if its not installed yet
parse_package_name(['sqlports'], pkg_spec, module)
get_package_state(['sqlports'], pkg_spec, module)
if not pkg_spec['sqlports']['installed_state']:
module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
package_present(['sqlports'], pkg_spec, module)
asterisk_name = False
for n in name:
if n == '*':
if len(name) != 1:
module.fail_json(msg="the package name '*' can not be mixed with other names")
asterisk_name = True
if asterisk_name:
if state != 'latest':
module.fail_json(msg="the package name '*' is only valid when using state=latest")
else:
# Perform an upgrade of all installed packages.
upgrade_packages(pkg_spec, module)
else:
# Parse package names and put results in the pkg_spec dictionary.
parse_package_name(name, pkg_spec, module)
# Not sure how the branch syntax is supposed to play together
# with build mode. Disable it for now.
for n in name:
if pkg_spec[n]['style'] == 'branch' and module.params['build'] is True:
module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
# Get state for all package names.
get_package_state(name, pkg_spec, module)
# Perform requested action.
if state in ['installed', 'present']:
package_present(name, pkg_spec, module)
elif state in ['absent', 'removed']:
package_absent(name, pkg_spec, module)
elif state == 'latest':
package_latest(name, pkg_spec, module)
# The combined changed status for all requested packages. If anything
# is changed this is set to True.
combined_changed = False
# We combine all error messages in this comma separated string, for example:
# "msg": "Can't find nmapp\n, Can't find nmappp\n"
combined_error_message = ''
# Loop over all requested package names and check if anything failed or
# changed.
for n in name:
if pkg_spec[n]['rc'] != 0:
if pkg_spec[n]['stderr']:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stderr']
else:
combined_error_message = pkg_spec[n]['stderr']
else:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stdout']
else:
combined_error_message = pkg_spec[n]['stdout']
if pkg_spec[n]['changed'] is True:
combined_changed = True
# If combined_error_message contains anything at least some part of the
# list of requested package names failed.
if combined_error_message:
module.fail_json(msg=combined_error_message)
result['changed'] = combined_changed
module.exit_json(**result)
# Import module snippets.
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,888,128,555,258,696,000 | 40.231496 | 156 | 0.573524 | false |
12mo2525/angularMail | node_modules/node-gyp/gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit | 3,921,452,036,828,571,000 | 28.266423 | 78 | 0.59808 | false |
CadishShank/Ardour-5.0.2.0-Cad | tools/cstyle.py | 12 | 9136 | #!/usr/bin/python -tt
#
# Copyright (C) 2005-2012 Erik de Castro Lopo <[email protected]>
#
# Released under the 2 clause BSD license.
"""
This program checks C code for compliance to coding standards used in
libsndfile and other projects I run.
"""
import re
import sys
class Preprocessor:
"""
Preprocess lines of C code to make it easier for the CStyleChecker class to
test for correctness. Preprocessing works on a single line at a time but
maintains state between consecutive lines so it can preprocessess multi-line
comments.
Preprocessing involves:
- Strip C++ style comments from a line.
- Strip C comments from a series of lines. When a C comment starts and
ends on the same line it will be replaced with 'comment'.
- Replace arbitrary C strings with the zero length string.
- Replace '#define f(x)' with '#define f (c)' (The C #define requires that
there be no space between defined macro name and the open paren of the
argument list).
Used by the CStyleChecker class.
"""
def __init__ (self):
self.comment_nest = 0
self.leading_space_re = re.compile ('^(\t+| )')
self.trailing_space_re = re.compile ('(\t+| )$')
self.define_hack_re = re.compile ("(#\s*define\s+[a-zA-Z0-9_]+)\(")
def comment_nesting (self):
"""
Return the currect comment nesting. At the start and end of the file,
this value should be zero. Inside C comments it should be 1 or
(possibly) more.
"""
return self.comment_nest
def __call__ (self, line):
"""
Strip the provided line of C and C++ comments. Stripping of multi-line
C comments works as expected.
"""
line = self.define_hack_re.sub (r'\1 (', line)
line = self.process_strings (line)
# Strip C++ style comments.
if self.comment_nest == 0:
line = re.sub ("( |\t*)//.*", '', line)
# Strip C style comments.
open_comment = line.find ('/*')
close_comment = line.find ('*/')
if self.comment_nest > 0 and close_comment < 0:
# Inside a comment block that does not close on this line.
return ""
if open_comment >= 0 and close_comment < 0:
# A comment begins on this line but doesn't close on this line.
self.comment_nest += 1
return self.trailing_space_re.sub ('', line [:open_comment])
if open_comment < 0 and close_comment >= 0:
# Currently open comment ends on this line.
self.comment_nest -= 1
return self.trailing_space_re.sub ('', line [close_comment + 2:])
if open_comment >= 0 and close_comment > 0 and self.comment_nest == 0:
# Comment begins and ends on this line. Replace it with 'comment'
# so we don't need to check whitespace before and after the comment
# we're removing.
newline = line [:open_comment] + "comment" + line [close_comment + 2:]
return self.__call__ (newline)
return line
def process_strings (self, line):
"""
Given a line of C code, return a string where all literal C strings have
been replaced with the empty string literal "".
"""
for k in range (0, len (line)):
if line [k] == '"':
start = k
for k in range (start + 1, len (line)):
if line [k] == '"' and line [k - 1] != '\\':
return line [:start + 1] + '"' + self.process_strings (line [k + 1:])
return line
class CStyleChecker:
"""
A class for checking the whitespace and layout of a C code.
"""
def __init__ (self, debug):
self.debug = debug
self.filename = None
self.error_count = 0
self.line_num = 1
self.orig_line = ''
self.trailing_newline_re = re.compile ('[\r\n]+$')
self.indent_re = re.compile ("^\s*")
self.last_line_indent = ""
self.last_line_indent_curly = False
self.error_checks = \
[ ( re.compile ("^ "), "leading space as indentation instead of tab - use tabs to indent, spaces to align" )
]
self.warning_checks = \
[ ( re.compile ("{[^\s]"), "missing space after open brace" )
, ( re.compile ("[^\s]}"), "missing space before close brace" )
, ( re.compile ("^[ \t]+$"), "empty line contains whitespace" )
, ( re.compile ("[^\s][ \t]+$"), "contains trailing whitespace" )
, ( re.compile (",[^\s\n]"), "missing space after comma" )
, ( re.compile (";[a-zA-Z0-9]"), "missing space after semi-colon" )
, ( re.compile ("=[^\s\"'=]"), "missing space after assignment" )
# Open and close parenthesis.
, ( re.compile ("[^_\s\(\[\*&']\("), "missing space before open parenthesis" )
, ( re.compile ("\)(-[^>]|[^;,'\s\n\)\]-])"), "missing space after close parenthesis" )
, ( re.compile ("\( [^;]"), "space after open parenthesis" )
, ( re.compile ("[^;] \)"), "space before close parenthesis" )
# Open and close square brace.
, ( re.compile ("\[ "), "space after open square brace" )
, ( re.compile (" \]"), "space before close square brace" )
# Space around operators.
, ( re.compile ("[^\s][\*/%+-][=][^\s]"), "missing space around opassign" )
, ( re.compile ("[^\s][<>!=^/][=]{1,2}[^\s]"), "missing space around comparison" )
# Parens around single argument to return.
, ( re.compile ("\s+return\s+\([a-zA-Z0-9_]+\)\s+;"), "parens around return value" )
]
def get_error_count (self):
"""
Return the current error count for this CStyleChecker object.
"""
return self.error_count
def check_files (self, files):
"""
Run the style checker on all the specified files.
"""
for filename in files:
self.check_file (filename)
def check_file (self, filename):
"""
Run the style checker on the specified file.
"""
self.filename = filename
try:
cfile = open (filename, "r")
except IOError as e:
return
self.line_num = 1
preprocess = Preprocessor ()
while 1:
line = cfile.readline ()
if not line:
break
line = self.trailing_newline_re.sub ('', line)
self.orig_line = line
self.line_checks (preprocess (line))
self.line_num += 1
cfile.close ()
self.filename = None
# Check for errors finding comments.
if preprocess.comment_nesting () != 0:
print ("Weird, comments nested incorrectly.")
sys.exit (1)
return
def line_checks (self, line):
"""
Run the style checker on provided line of text, but within the context
of how the line fits within the file.
"""
indent = len (self.indent_re.search (line).group ())
if re.search ("^\s+}", line):
if not self.last_line_indent_curly and indent != self.last_line_indent:
None # self.error ("bad indent on close curly brace")
self.last_line_indent_curly = True
else:
self.last_line_indent_curly = False
# Now all the stylistic warnings regex checks.
for (check_re, msg) in self.warning_checks:
if check_re.search (line):
self.warning (msg)
# Now all the stylistic error regex checks.
for (check_re, msg) in self.error_checks:
if check_re.search (line):
self.error (msg)
if re.search ("[a-zA-Z0-9_][<>!=^/&\|]{1,2}[a-zA-Z0-9_]", line):
# ignore #include <foo.h> and C++ templates with indirection/pointer/reference operators
if not re.search (".*#include.*[a-zA-Z0-9]/[a-zA-Z]", line) and not re.search ("[a-zA-Z0-9_]>[&\*]*\s", line):
self.error ("missing space around operator")
self.last_line_indent = indent
return
def error (self, msg):
"""
Print an error message and increment the error count.
"""
print ("%s (%d) : STYLE ERROR %s" % (self.filename, self.line_num, msg))
if self.debug:
print ("'" + self.orig_line + "'")
self.error_count += 1
def warning (self, msg):
"""
Print a warning message and increment the error count.
"""
print ("%s (%d) : STYLE WARNING %s" % (self.filename, self.line_num, msg))
if self.debug:
print ("'" + self.orig_line + "'")
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
if len (sys.argv) < 1:
print ("Usage : yada yada")
sys.exit (1)
# Create a new CStyleChecker object
if sys.argv [1] == '-d' or sys.argv [1] == '--debug':
cstyle = CStyleChecker (True)
cstyle.check_files (sys.argv [2:])
else:
cstyle = CStyleChecker (False)
cstyle.check_files (sys.argv [1:])
if cstyle.get_error_count ():
sys.exit (1)
sys.exit (0)
| gpl-2.0 | 4,316,457,760,109,447,700 | 33.606061 | 160 | 0.551226 | false |
bwsblake/lettercounter | django-norel-env/lib/python2.7/site-packages/django/db/models/sql/datastructures.py | 118 | 1115 | """
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, level):
self.level = level
class Empty(object):
pass
class RawValue(object):
def __init__(self, value):
self.value = value
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type):
self.col = col
self.lookup_type = lookup_type
def relabel_aliases(self, change_map):
c = self.col
if isinstance(c, (list, tuple)):
self.col = (change_map.get(c[0], c[0]), c[1])
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.date_trunc_sql(self.lookup_type, col)
| mit | -4,795,290,361,349,960,000 | 24.930233 | 76 | 0.602691 | false |
rahimnathwani/django-countries | django_countries/ioc_data.py | 6 | 3977 | IOC_TO_ISO = {
'AFG': 'AF',
'ALB': 'AL',
'ALG': 'DZ',
'AND': 'AD',
'ANG': 'AO',
'ANT': 'AG',
'ARG': 'AR',
'ARM': 'AM',
'ARU': 'AW',
'ASA': 'AS',
'AUS': 'AU',
'AUT': 'AT',
'AZE': 'AZ',
'BAH': 'BS',
'BAN': 'BD',
'BAR': 'BB',
'BDI': 'BI',
'BEL': 'BE',
'BEN': 'BJ',
'BER': 'BM',
'BHU': 'BT',
'BIH': 'BA',
'BIZ': 'BZ',
'BLR': 'BY',
'BOL': 'BO',
'BOT': 'BW',
'BRA': 'BR',
'BRN': 'BH',
'BRU': 'BN',
'BUL': 'BG',
'BUR': 'BF',
'CAF': 'CF',
'CAM': 'KH',
'CAN': 'CA',
'CAY': 'KY',
'CGO': 'CG',
'CHA': 'TD',
'CHI': 'CL',
'CHN': 'CN',
'CIV': 'CI',
'CMR': 'CM',
'COD': 'CD',
'COK': 'CK',
'COL': 'CO',
'COM': 'KM',
'CPV': 'CV',
'CRC': 'CR',
'CRO': 'HR',
'CUB': 'CU',
'CYP': 'CY',
'CZE': 'CZ',
'DEN': 'DK',
'DJI': 'DJ',
'DMA': 'DM',
'DOM': 'DO',
'ECU': 'EC',
'EGY': 'EG',
'ERI': 'ER',
'ESA': 'SV',
'ESP': 'ES',
'EST': 'EE',
'ETH': 'ET',
'FIJ': 'FJ',
'FIN': 'FI',
'FRA': 'FR',
'FSM': 'FM',
'GAB': 'GA',
'GAM': 'GM',
'GBR': 'GB',
'GBS': 'GW',
'GEO': 'GE',
'GEQ': 'GQ',
'GER': 'DE',
'GHA': 'GH',
'GRE': 'GR',
'GRN': 'GD',
'GUA': 'GT',
'GUI': 'GN',
'GUM': 'GU',
'GUY': 'GY',
'HAI': 'HT',
'HKG': 'HK',
'HON': 'HN',
'HUN': 'HU',
'INA': 'ID',
'IND': 'IN',
'IRI': 'IR',
'IRL': 'IE',
'IRQ': 'IQ',
'ISL': 'IS',
'ISR': 'IL',
'ISV': 'VI',
'ITA': 'IT',
'IVB': 'VG',
'JAM': 'JM',
'JOR': 'JO',
'JPN': 'JP',
'KAZ': 'KZ',
'KEN': 'KE',
'KGZ': 'KG',
'KIR': 'KI',
'KOR': 'KR',
'KSA': 'SA',
'KUW': 'KW',
'LAO': 'LA',
'LAT': 'LV',
'LBA': 'LY',
'LBR': 'LR',
'LCA': 'LC',
'LES': 'LS',
'LIB': 'LB',
'LIE': 'LI',
'LTU': 'LT',
'LUX': 'LU',
'MAD': 'MG',
'MAR': 'MA',
'MAS': 'MY',
'MAW': 'MW',
'MDA': 'MD',
'MDV': 'MV',
'MEX': 'MX',
'MGL': 'MN',
'MHL': 'MH',
'MKD': 'MK',
'MLI': 'ML',
'MLT': 'MT',
'MNE': 'ME',
'MON': 'MC',
'MOZ': 'MZ',
'MRI': 'MU',
'MTN': 'MR',
'MYA': 'MM',
'NAM': 'NA',
'NCA': 'NI',
'NED': 'NL',
'NEP': 'NP',
'NGR': 'NG',
'NIG': 'NE',
'NOR': 'NO',
'NRU': 'NR',
'NZL': 'NZ',
'OMA': 'OM',
'PAK': 'PK',
'PAN': 'PA',
'PAR': 'PY',
'PER': 'PE',
'PHI': 'PH',
'PLE': 'PS',
'PLW': 'PW',
'PNG': 'PG',
'POL': 'PL',
'POR': 'PT',
'PRK': 'KP',
'PUR': 'PR',
'QAT': 'QA',
'ROU': 'RO',
'RSA': 'ZA',
'RUS': 'RU',
'RWA': 'RW',
'SAM': 'WS',
'SEN': 'SN',
'SEY': 'SC',
'SIN': 'SG',
'SKN': 'KN',
'SLE': 'SL',
'SLO': 'SI',
'SMR': 'SM',
'SOL': 'SB',
'SOM': 'SO',
'SRB': 'RS',
'SRI': 'LK',
'STP': 'ST',
'SUD': 'SD',
'SUI': 'CH',
'SUR': 'SR',
'SVK': 'SK',
'SWE': 'SE',
'SWZ': 'SZ',
'SYR': 'SY',
'TAN': 'TZ',
'TGA': 'TO',
'THA': 'TH',
'TJK': 'TJ',
'TKM': 'TM',
'TLS': 'TL',
'TOG': 'TG',
'TPE': 'TW',
'TTO': 'TT',
'TUN': 'TN',
'TUR': 'TR',
'TUV': 'TV',
'UAE': 'AE',
'UGA': 'UG',
'UKR': 'UA',
'URU': 'UY',
'USA': 'US',
'UZB': 'UZ',
'VAN': 'VU',
'VEN': 'VE',
'VIE': 'VN',
'VIN': 'VC',
'YEM': 'YE',
'ZAM': 'ZM',
'ZIM': 'ZW',
}
ISO_TO_IOC = dict((iso, ioc) for ioc, iso in IOC_TO_ISO.items())
def check_ioc_countries(verbosity=1):
"""
Check if all IOC codes map to ISO codes correctly
"""
from django_countries.data import COUNTRIES
if verbosity: # pragma: no cover
print("Checking if all IOC codes map correctly")
for key in ISO_TO_IOC:
assert COUNTRIES.get(key), 'No ISO code for %s' % key
if verbosity: # pragma: no cover
print("Finished checking IOC codes")
| mit | -9,033,245,457,678,440,000 | 16.914414 | 64 | 0.338446 | false |
pytest-dev/pytest | doc/en/conf.py | 2 | 13237 | #
# pytest documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 8 17:54:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
import ast
import os
import sys
from typing import List
from typing import TYPE_CHECKING
from _pytest import __version__ as version
if TYPE_CHECKING:
import sphinx.application
release = ".".join(version.split(".")[:2])
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
autodoc_member_order = "bysource"
autodoc_typehints = "description"
todo_include_todos = 1
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"pallets_sphinx_themes",
"pygments_pytest",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_removed_in",
"sphinxcontrib_trio",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
# General information about the project.
project = "pytest"
copyright = "2015–2021, holger krekel and pytest-dev team"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"naming20.rst",
"test/*",
"old_*",
"*attic*",
"*/attic*",
"funcargs.rst",
"setup.rst",
"example/remoteinterp.rst",
]
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "literal"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# A list of regular expressions that match URIs that should not be checked when
# doing a linkcheck.
linkcheck_ignore = [
"https://github.com/numpy/numpy/blob/master/doc/release/1.16.0-notes.rst#new-deprecations",
"https://blogs.msdn.microsoft.com/bharry/2017/06/28/testing-in-a-cloud-delivery-cadence/",
"http://pythontesting.net/framework/pytest-introduction/",
r"https://github.com/pytest-dev/pytest/issues/\d+",
r"https://github.com/pytest-dev/pytest/pull/\d+",
]
# The number of worker threads to use when checking links (default=5).
linkcheck_workers = 5
# -- Options for HTML output ---------------------------------------------------
sys.path.append(os.path.abspath("_themes"))
html_theme_path = ["_themes"]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "flask"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {"index_logo": None}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "pytest documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "pytest-%s" % release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "img/pytest_logo_curves.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "img/favicon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# html_sidebars = {'index': 'indexsidebar.html'}
html_sidebars = {
"index": [
"slim_searchbox.html",
"sidebarintro.html",
"globaltoc.html",
"links.html",
"sourcelink.html",
],
"**": [
"slim_searchbox.html",
"globaltoc.html",
"relations.html",
"links.html",
"sourcelink.html",
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "pytestdoc"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"contents",
"pytest.tex",
"pytest Documentation",
"holger krekel, trainer and consultant, https://merlinux.eu/",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "img/pytest1.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "pytest"
epub_author = "holger krekel at merlinux eu"
epub_publisher = "holger krekel at merlinux eu"
epub_copyright = "2013-2021, holger krekel et alii"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# -- Options for texinfo output ------------------------------------------------
texinfo_documents = [
(
master_doc,
"pytest",
"pytest Documentation",
(
"Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*"
"Floris Bruynooghe@*others"
),
"pytest",
"simple powerful testing with Python",
"Programming",
1,
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"pluggy": ("https://pluggy.readthedocs.io/en/latest", None),
"python": ("https://docs.python.org/3", None),
}
def configure_logging(app: "sphinx.application.Sphinx") -> None:
"""Configure Sphinx's WarningHandler to handle (expected) missing include."""
import sphinx.util.logging
import logging
class WarnLogFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
"""Ignore warnings about missing include with "only" directive.
Ref: https://github.com/sphinx-doc/sphinx/issues/2150."""
if (
record.msg.startswith('Problems with "include" directive path:')
and "_changelog_towncrier_draft.rst" in record.msg
):
return False
return True
logger = logging.getLogger(sphinx.util.logging.NAMESPACE)
warn_handler = [x for x in logger.handlers if x.level == logging.WARNING]
assert len(warn_handler) == 1, warn_handler
warn_handler[0].filters.insert(0, WarnLogFilter())
def setup(app: "sphinx.application.Sphinx") -> None:
# from sphinx.ext.autodoc import cut_lines
# app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_crossref_type(
"fixture",
"fixture",
objname="built-in fixture",
indextemplate="pair: %s; fixture",
)
app.add_object_type(
"confval",
"confval",
objname="configuration value",
indextemplate="pair: %s; configuration value",
)
app.add_object_type(
"globalvar",
"globalvar",
objname="global variable interpreted by pytest",
indextemplate="pair: %s; global variable interpreted by pytest",
)
configure_logging(app)
# Make Sphinx mark classes with "final" when decorated with @final.
# We need this because we import final from pytest._compat, not from
# typing (for Python < 3.8 compat), so Sphinx doesn't detect it.
# To keep things simple we accept any `@final` decorator.
# Ref: https://github.com/pytest-dev/pytest/pull/7780
import sphinx.pycode.ast
import sphinx.pycode.parser
original_is_final = sphinx.pycode.parser.VariableCommentPicker.is_final
def patched_is_final(self, decorators: List[ast.expr]) -> bool:
if original_is_final(self, decorators):
return True
return any(
sphinx.pycode.ast.unparse(decorator) == "final" for decorator in decorators
)
sphinx.pycode.parser.VariableCommentPicker.is_final = patched_is_final
| mit | -6,366,589,322,623,497,000 | 30.362559 | 95 | 0.675179 | false |
testbetta/git-repo-pub | subcmds/rebase.py | 16 | 4796 | #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from command import Command
from git_command import GitCommand
class Rebase(Command):
common = True
helpSummary = "Rebase local branches on upstream branch"
helpUsage = """
%prog {[<project>...] | -i <project>...}
"""
helpDescription = """
'%prog' uses git rebase to move local changes in the current topic branch to
the HEAD of the upstream history, useful when you have made commits in a topic
branch but need to incorporate new upstream changes "underneath" them.
"""
def _Options(self, p):
p.add_option('-i', '--interactive',
dest="interactive", action="store_true",
help="interactive rebase (single project only)")
p.add_option('-f', '--force-rebase',
dest='force_rebase', action='store_true',
help='Pass --force-rebase to git rebase')
p.add_option('--no-ff',
dest='no_ff', action='store_true',
help='Pass --no-ff to git rebase')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='Pass --quiet to git rebase')
p.add_option('--autosquash',
dest='autosquash', action='store_true',
help='Pass --autosquash to git rebase')
p.add_option('--whitespace',
dest='whitespace', action='store', metavar='WS',
help='Pass --whitespace to git rebase')
p.add_option('--auto-stash',
dest='auto_stash', action='store_true',
help='Stash local modifications before starting')
p.add_option('-m', '--onto-manifest',
dest='onto_manifest', action='store_true',
help='Rebase onto the manifest version instead of upstream '
'HEAD. This helps to make sure the local tree stays '
'consistent if you previously synced to a manifest.')
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
one_project = len(all_projects) == 1
if opt.interactive and not one_project:
print('error: interactive rebase not supported with multiple projects',
file=sys.stderr)
if len(args) == 1:
print('note: project %s is mapped to more than one path' % (args[0],),
file=sys.stderr)
return -1
for project in all_projects:
cb = project.CurrentBranch
if not cb:
if one_project:
print("error: project %s has a detached HEAD" % project.relpath,
file=sys.stderr)
return -1
# ignore branches with detatched HEADs
continue
upbranch = project.GetBranch(cb)
if not upbranch.LocalMerge:
if one_project:
print("error: project %s does not track any remote branches"
% project.relpath, file=sys.stderr)
return -1
# ignore branches without remotes
continue
args = ["rebase"]
if opt.whitespace:
args.append('--whitespace=%s' % opt.whitespace)
if opt.quiet:
args.append('--quiet')
if opt.force_rebase:
args.append('--force-rebase')
if opt.no_ff:
args.append('--no-ff')
if opt.autosquash:
args.append('--autosquash')
if opt.interactive:
args.append("-i")
if opt.onto_manifest:
args.append('--onto')
args.append(project.revisionExpr)
args.append(upbranch.LocalMerge)
print('# %s: rebasing %s -> %s'
% (project.relpath, cb, upbranch.LocalMerge), file=sys.stderr)
needs_stash = False
if opt.auto_stash:
stash_args = ["update-index", "--refresh", "-q"]
if GitCommand(project, stash_args).Wait() != 0:
needs_stash = True
# Dirty index, requires stash...
stash_args = ["stash"]
if GitCommand(project, stash_args).Wait() != 0:
return -1
if GitCommand(project, args).Wait() != 0:
return -1
if needs_stash:
stash_args.append('pop')
stash_args.append('--quiet')
if GitCommand(project, stash_args).Wait() != 0:
return -1
| apache-2.0 | 3,237,998,515,413,097,500 | 32.774648 | 78 | 0.596747 | false |
TheBoegl/letsencrypt | tests/letstest/multitester.py | 1 | 20830 | """
Letsencrypt Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies letsencrypt repo and puts it on the instances
- Runs letsencrypt tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='letsencrypt git repo to use')
parser.add_argument('--branch',
default='~',
help='letsencrypt git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
parser.add_argument('--fast',
action='store_true',
help="use larger instance types to run faster (saves about a minute, probably not worth it)")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-5f490b35' # premade shared boulder AMI 14.04LTS us-east-1
LOGDIR = "" #points to logging / working directory
# boto3/AWS api globals
AWS_SESSION = None
EC2 = None
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def make_security_group():
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = EC2.create_security_group(GroupName="letsencrypt_test",
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(instance_name,
ami_id,
keyname,
machine_type='t2.micro',
security_groups=['letsencrypt_test'],
userdata=""): #userdata contains bash or cloud-init script
new_instance = EC2.create_instances(
ImageId=ami_id,
SecurityGroups=security_groups,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type)[0]
# brief pause to prevent rare error on EC2 delay, should block until ready instead
time.sleep(1.0)
# give instance a name
try:
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
except botocore.exceptions.ClientError as e:
if "InvalidInstanceID.NotFound" in str(e):
# This seems to be ephemeral... retry
time.sleep(1)
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
else:
raise
return new_instance
def terminate_and_clean(instances):
"""
Some AMIs specify EBS stores that won't delete on instance termination.
These must be manually deleted after shutdown.
"""
volumes_to_delete = []
for instance in instances:
for bdmap in instance.block_device_mappings:
if 'Ebs' in bdmap.keys():
if not bdmap['Ebs']['DeleteOnTermination']:
volumes_to_delete.append(bdmap['Ebs']['VolumeId'])
for instance in instances:
instance.terminate()
# can't delete volumes until all attaching instances are terminated
_ids = [instance.id for instance in instances]
all_terminated = False
while not all_terminated:
all_terminated = True
for _id in _ids:
# necessary to reinit object for boto3 to get true state
inst = EC2.Instance(id=_id)
if inst.state['Name'] != 'terminated':
all_terminated = False
time.sleep(5)
for vol_id in volumes_to_delete:
volume = EC2.Volume(id=vol_id)
volume.delete()
return volumes_to_delete
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
# the reinstantiation from id is necessary to force boto3
# to correctly update the 'state' variable during init
_id = booting_instance.id
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
while _state != 'running' or _ip is None:
time.sleep(wait_time)
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
block_until_ssh_open(_ip)
time.sleep(extra_wait_time)
return _instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git co lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('go run cmd/rabbitmq-setup/main.go -server amqp://localhost')
run('nohup ./start.py >& /dev/null < /dev/null &')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_letsencrypt(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_letsencrypt_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./letsencrypt.log ]; then \
cat ./letsencrypt.log; else echo "[nolocallog]"; fi')
def create_client_instances(targetlist):
"Create a fleet of client instances"
instances = []
print("Creating instances: ", end="")
for target in targetlist:
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
instances.append(make_instance(name,
target['ami'],
KEYNAME,
machine_type=machine_type,
userdata=userdata))
print()
return instances
#-------------------------------------------------------------------------------
# SCRIPT BEGINS
#-------------------------------------------------------------------------------
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
LOGDIR = "letest-%d"%int(time.time())
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
EC2 = AWS_SESSION.resource('ec2')
print("Making Security Group")
sg_exists = False
for sg in EC2.security_groups.all():
if sg.group_name == 'letsencrypt_test':
sg_exists = True
print(" %s already exists"%'letsencrypt_test')
if not sg_exists:
make_security_group()
time.sleep(30)
boulder_preexists = False
boulder_servers = EC2.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance('le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_groups=['letsencrypt_test'])
if not cl_args.boulderonly:
instances = create_client_instances(targetlist)
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
SENTINEL = None #queue kill signal
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
def test_client_process(inqueue, outqueue):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, target = inreq
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instances[ii] = block_until_instance_ready(instances[ii])
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
print(env.host_string)
try:
install_and_launch_letsencrypt(instances[ii], boulder_url, target)
outqueue.put((ii, target, 'pass'))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, 'fail'))
print("%s - %s FAIL"%(target['ami'], target['name']))
pass
# append server letsencrypt.log to each per-machine output log
print("\n\nletsencrypt.log\n" + "-"*80 + "\n")
try:
execute(grab_letsencrypt_log)
except:
print("log fail\n")
pass
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
for p in jobs:
p.join()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
for outq in outputs:
ii, target, status = outq
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
results_file.close()
if not cl_args.saveinstances:
print('Logs in ', LOGDIR)
print('Terminating EC2 Instances and Cleaning Dangling EBS Volumes')
if cl_args.killboulder:
boulder_server.terminate()
terminate_and_clean(instances)
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
# kill any connections
fabric.network.disconnect_all()
| apache-2.0 | 8,239,710,991,521,104,000 | 37.86194 | 113 | 0.610226 | false |
jjbrophy47/sn_spam | independent/scripts/independent.py | 1 | 6452 | """
Module containing the Independent class to handle all operations pertaining
to the independent model.
"""
import os
import pandas as pd
class Independent:
"""Returns an Independent object that reads in the data, splits into sets,
trains and classifies, and writes the results."""
def __init__(self, config_obj, classification_obj, util_obj):
"""Initializes object dependencies for this class."""
self.config_obj = config_obj
"""Configuration object with user settings."""
self.classification_obj = classification_obj
"""Object that handles classification of the data."""
self.util_obj = util_obj
"""Class containing general utility methods."""
# public
def main(self):
"""Main method that reads in the comments, splits them into train and
test, writes them to files, and prints out stats.
Returns the train and test comment dataframes."""
modified = self.config_obj.modified
self.util_obj.start()
data_f, fold_f, status_f = self.file_folders()
sw = self.open_status_writer(status_f)
coms_filename = self.util_obj.get_comments_filename(modified)
coms_df = self.read_file(data_f + coms_filename, sw)
train_df, val_df, test_df = self.split_coms(coms_df)
if self.config_obj.alter_user_ids:
self.alter_user_ids(coms_df, test_df)
self.write_folds(val_df, test_df, fold_f)
self.print_subsets(train_df, val_df, test_df, fw=sw)
self.util_obj.start('\nvalidation set:\n', fw=sw)
self.classification_obj.main(train_df, val_df, dset='val', fw=sw)
self.util_obj.end('time: ', fw=sw)
self.util_obj.start('\ntest set:\n', fw=sw)
all_train_df = train_df.copy()
if self.config_obj.super_train:
all_train_df = pd.concat([train_df, val_df])
self.classification_obj.main(all_train_df, test_df, dset='test', fw=sw)
self.util_obj.end('time: ', fw=sw)
self.util_obj.end('total independent model time: ', fw=sw)
self.util_obj.close_writer(sw)
return val_df, test_df
# private
def file_folders(self):
"""Returns absolute paths for various directories."""
ind_dir = self.config_obj.ind_dir
domain = self.config_obj.domain
data_f = ind_dir + 'data/' + domain + '/'
fold_f = ind_dir + 'data/' + domain + '/folds/'
status_f = ind_dir + 'output/' + domain + '/status/'
if not os.path.exists(fold_f):
os.makedirs(fold_f)
if not os.path.exists(status_f):
os.makedirs(status_f)
return data_f, fold_f, status_f
def open_status_writer(self, status_f):
"""Opens a file to write updates of the independent model.
status_f: status folder.
Returns file object to write to."""
fold = self.config_obj.fold
fname = status_f + 'ind_' + fold + '.txt'
f = self.util_obj.open_writer(fname)
return f
def read_file(self, filename, fw=None):
"""Reads the appropriate comments file of the domain.
filename: csv comments file.
Returns comments dataframe up to the end marker in the config."""
self.util_obj.start('loading data...', fw=fw)
coms_df = pd.read_csv(filename, lineterminator='\n',
nrows=self.config_obj.end)
self.util_obj.end(fw=fw)
return coms_df
def split_coms(self, coms_df):
"""Splits the comments into training, validation, and test sets.
coms_df: comments dataframe.
Returns train, val, and test dataframes."""
start = self.config_obj.start
train_size = self.config_obj.train_size
val_size = self.config_obj.val_size
coms_df = coms_df[start:]
num_coms = len(coms_df)
split_ndx1 = int(num_coms * train_size)
split_ndx2 = split_ndx1 + int(num_coms * val_size)
train_df = coms_df[:split_ndx1]
val_df = coms_df[split_ndx1:split_ndx2]
test_df = coms_df[split_ndx2:]
return train_df, val_df, test_df
def alter_user_ids(self, coms_df, test_df):
"""Alters the user ids in the test set so that all spam messages
are posted by a different user.
test_df: test set dataframe.
Returns altered test set with different user ids for each spammer."""
max_user_id = coms_df['user_id'].max() + 1
user_ids = list(zip(test_df['label'], test_df['user_id']))
new_user_ids = []
for label, user_id in user_ids:
new_user_ids.append(max_user_id if label == 1 else user_id)
max_user_id += 1
test_df['user_id'] = new_user_ids
def write_folds(self, val_df, test_df, fold_f):
"""Writes validation and test set dataframes to csv files.
val_df: dataframe with validation set comments.
test_df: dataframe with test set comments.
fold_f: folder to save the data to."""
fold = self.config_obj.fold
val_fname = fold_f + 'val_' + fold + '.csv'
test_fname = fold_f + 'test_' + fold + '.csv'
val_df.to_csv(val_fname, line_terminator='\n', index=None)
test_df.to_csv(test_fname, line_terminator='\n', index=None)
def print_subsets(self, train_df, val_df, test_df, fw=None):
"""Writes basic statistics about the training and test sets.
train_df: training set comments.
test_df: test set comments."""
spam, total = len(train_df[train_df['label'] == 1]), len(train_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\ttraining set size: ' + str(len(train_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
spam, total = len(val_df[val_df['label'] == 1]), len(val_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\tvalidation set size: ' + str(len(val_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
spam, total = len(test_df[test_df['label'] == 1]), len(test_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\ttest set size: ' + str(len(test_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
| mit | 5,480,564,389,528,302,000 | 39.578616 | 79 | 0.591445 | false |
josesanch/django-oscar | sites/demo/apps/order/south_migrations/0003_auto__del_field_ordernote_date__add_field_ordernote_date_created__add_.py | 16 | 32376 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.utils.timezone import now
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('order_ordernote', 'date', 'date_created')
# Adding field 'OrderNote.date_updated'
db.add_column('order_ordernote', 'date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=now(), blank=True), keep_default=False)
def backwards(self, orm):
db.rename_column('order_ordernote', 'date_created', 'date')
# Deleting field 'OrderNote.date_updated'
db.delete_column('order_ordernote', 'date_updated')
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.communicationevent': {
'Meta': {'object_name': 'CommunicationEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customer.CommunicationEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': "orm['order.Order']"})
},
'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'to': "orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'to': "orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'order.lineprice': {
'Meta': {'object_name': 'LinePrice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': "orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['auth.User']"})
},
'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': "orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'order.paymentevent': {
'Meta': {'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.PaymentEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': "orm['order.Order']"})
},
'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.PaymentEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.paymenteventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'PaymentEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.shippingevent': {
'Meta': {'ordering': "['-date']", 'object_name': 'ShippingEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.ShippingEventQuantity']", 'symmetrical': 'False'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': "orm['order.Order']"})
},
'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.ShippingEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.shippingeventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'ShippingEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['order']
| bsd-3-clause | 8,531,701,499,073,443,000 | 85.566845 | 222 | 0.554299 | false |
kashif/chainer | tests/chainer_tests/links_tests/activation_tests/test_simplified_dropconnect.py | 1 | 9315 | import os
import tempfile
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import links
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
def gen_mask(ratio, shape):
return numpy.random.rand(*shape) >= ratio
@testing.parameterize(*testing.product({
'in_shape': [(3,), (3, 2, 2)],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_batchwise_mask': [True, False],
}))
class TestSimplifiedDropconnect(unittest.TestCase):
out_size = 2
ratio = 0.5
def setUp(self):
in_size = numpy.prod(self.in_shape)
self.link = links.SimplifiedDropconnect(
in_size, self.out_size,
initialW=chainer.initializers.Normal(1, self.W_dtype),
initial_bias=chainer.initializers.Normal(1, self.x_dtype))
self.link.cleargrads()
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(self.x_dtype)
W = self.link.W.data
b = self.link.b.data
if self.use_batchwise_mask:
mask_shape = (4,) + self.link.W.shape
else:
mask_shape = self.link.W.shape
self.mask = gen_mask(self.ratio, mask_shape)
W = (W * self.mask) * (1. / (1 - self.ratio))
x = self.x.reshape(4, -1)
# numpy 1.9 does not support matmul.
# So we use numpy.einsum instead of numpy.matmul.
if self.use_batchwise_mask:
self.y_expect = numpy.einsum('ijk,ikl->ijl',
W, x[:, :, None]).reshape(4, -1) + b
else:
self.y_expect = numpy.einsum('jk,ikl->ijl',
W, x[:, :, None]).reshape(4, -1) + b
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.x_dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
elif self.W_dtype == numpy.float16:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data, mask):
x = chainer.Variable(x_data)
y = self.link(x, train=True, mask=mask,
use_batchwise_mask=self.use_batchwise_mask)
self.assertEqual(y.data.dtype, self.x_dtype)
testing.assert_allclose(self.y_expect, y.data,
**self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.mask)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.mask))
def link_wrapper(self, *data):
return self.link(x=data[0], train=True, mask=data[1],
use_batchwise_mask=self.use_batchwise_mask)
def check_backward(self, x_data, y_grad, mask):
gradient_check.check_backward(
self.link_wrapper, (x_data, mask), y_grad,
(self.link.W, self.link.b),
no_grads=(False, True), dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy, self.mask)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.mask))
class TestSimplifiedDropconnectParameterShapePlaceholder(unittest.TestCase):
in_size = 3
in_shape = (in_size,)
out_size = 2
in_size_or_none = None
ratio = 0.5
def setUp(self):
self.link = links.SimplifiedDropconnect(self.in_size_or_none,
self.out_size)
temp_x = numpy.random.uniform(-1, 1,
(4, self.in_size)).astype(numpy.float32)
self.link(chainer.Variable(temp_x))
W = self.link.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
mask_shape = (4, self.out_size, self.in_size)
self.mask = gen_mask(self.ratio, mask_shape)
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(numpy.float32)
W = (W * self.mask) * (1. / (1 - self.ratio))
# numpy 1.9 does not support matmul.
# So we use numpy.einsum instead of numpy.matmul.
self.y_expect = numpy.einsum('ijk,ikl->ijl',
W, self.x[:, :, None]).reshape(4, -1) + b
def check_forward(self, x_data, mask):
x = chainer.Variable(x_data)
y = self.link(x, train=True, mask=mask, use_batchwise_mask=True)
self.assertEqual(y.data.dtype, numpy.float32)
testing.assert_allclose(self.y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.mask)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.mask))
def link_wrapper(self, *data):
return self.link(x=data[0], train=True, mask=data[1],
use_batchwise_mask=True)
def check_backward(self, x_data, y_grad, mask):
gradient_check.check_backward(
self.link_wrapper, (x_data, mask), y_grad,
(self.link.W, self.link.b), dtype='d', no_grads=(False, True),
atol=1e-4, rtol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy, self.mask)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.mask))
def test_serialization(self):
lin1 = links.SimplifiedDropconnect(None, self.out_size)
x = chainer.Variable(self.x)
# Must call the link to initialize weights.
lin1(x)
w1 = lin1.W.data
fd, temp_file_path = tempfile.mkstemp()
os.close(fd)
npz.save_npz(temp_file_path, lin1)
lin2 = links.SimplifiedDropconnect(None, self.out_size)
npz.load_npz(temp_file_path, lin2)
w2 = lin2.W.data
self.assertEqual((w1 == w2).all(), True)
class TestSimplifiedDropconnectNotBatchwiseMask(unittest.TestCase):
in_shape = (3,)
out_size = 2
ratio = 0.5
def setUp(self):
in_size = numpy.prod(self.in_shape)
self.link = links.SimplifiedDropconnect(
in_size, self.out_size,
initialW=chainer.initializers.Normal(1, numpy.float32),
initial_bias=chainer.initializers.Normal(1, numpy.float32))
self.link.cleargrads()
x_shape = (4,) + self.in_shape
self.x = numpy.ones(x_shape).astype(numpy.float32)
self.W = self.link.W.data
self.b = self.link.b.data
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x, train=True, use_batchwise_mask=False)
# check mask equality here.
testing.assert_allclose(y.data[0], y.data[1])
testing.assert_allclose(y.data[0], y.data[2])
testing.assert_allclose(y.data[0], y.data[3])
mask = y.creator.mask
mask = cuda.to_cpu(mask)
y_expect = self.x.dot(self.W.T * mask.T) * (1. / (1 - self.ratio))
y_expect += self.b
testing.assert_allclose(y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
class TestInvalidSimplifiedDropconnect(unittest.TestCase):
def test_invalid_input_size(self):
link = links.SimplifiedDropconnect(3, 2)
x = numpy.random.uniform(-1, 1, (4, 1, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
link(chainer.Variable(x))
def test_invalid_mask_size(self):
link = links.SimplifiedDropconnect(3, 2)
x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
mask = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
link(chainer.Variable(x), use_batchwise_mask=True, mask=mask)
def test_invalid_mask_size2(self):
link = links.SimplifiedDropconnect(3, 2)
x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
mask = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
link(chainer.Variable(x), use_batchwise_mask=False, mask=mask)
testing.run_module(__name__, __file__)
| mit | 4,405,001,306,083,681,000 | 34.284091 | 78 | 0.588191 | false |
PanDAWMS/panda-jedi | pandajedi/jediddm/DDMInterface.py | 1 | 2256 | from pandajedi.jediconfig import jedi_config
from pandajedi.jedicore import Interaction
# interface to DDM
class DDMInterface:
# constructor
def __init__(self):
self.interfaceMap = {}
# setup interface
def setupInterface(self):
# parse config
for configStr in jedi_config.ddm.modConfig.split(','):
configStr = configStr.strip()
items = configStr.split(':')
# check format
try:
vo = items[0]
maxSize = int(items[1])
moduleName = items[2]
className = items[3]
if len(items) >= 5:
group = items[4]
if not group:
group = None
else:
group = None
except Exception:
# TODO add config error message
continue
# add VO interface
voIF = Interaction.CommandSendInterface(vo,maxSize,moduleName,className)
voIF.initialize()
key = self.get_dict_key(vo, group)
self.interfaceMap[key] = voIF
# get interface with VO
def getInterface(self, vo, group=None):
# vo + group
key = self.get_dict_key(vo, group)
if key in self.interfaceMap:
return self.interfaceMap[key]
# only vo
key = self.get_dict_key(vo, None)
if key in self.interfaceMap:
return self.interfaceMap[key]
# catchall
cacheAll = self.get_dict_key('any', None)
if cacheAll in self.interfaceMap:
return self.interfaceMap[cacheAll]
# not found
return None
# get dict key
def get_dict_key(self, vo, group):
return vo, group
if __name__ == '__main__':
def dummyClient(dif):
print("client test")
dif.getInterface('atlas').test()
print('client done')
dif = DDMInterface()
dif.setupInterface()
print("master test")
atlasIF = dif.getInterface('atlas')
atlasIF.test()
print("master done")
import multiprocessing
p = multiprocessing.Process(target=dummyClient,
args=(dif,))
p.start()
p.join()
| apache-2.0 | -5,697,655,182,095,574,000 | 27.556962 | 84 | 0.535461 | false |
pwoodworth/intellij-community | python/lib/Lib/email/Parser.py | 392 | 3300 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: [email protected]
"""A parser of RFC 2822 and MIME email messages."""
__all__ = ['Parser', 'HeaderParser']
import warnings
from cStringIO import StringIO
from email.feedparser import FeedParser
from email.message import Message
class Parser:
def __init__(self, *args, **kws):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
if len(args) >= 1:
if '_class' in kws:
raise TypeError("Multiple values for keyword arg '_class'")
kws['_class'] = args[0]
if len(args) == 2:
if 'strict' in kws:
raise TypeError("Multiple values for keyword arg 'strict'")
kws['strict'] = args[1]
if len(args) > 2:
raise TypeError('Too many arguments')
if '_class' in kws:
self._class = kws['_class']
del kws['_class']
else:
self._class = Message
if 'strict' in kws:
warnings.warn("'strict' argument is deprecated (and ignored)",
DeprecationWarning, 2)
del kws['strict']
if kws:
raise TypeError('Unexpected keyword arguments')
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
| apache-2.0 | -5,318,890,526,377,604,000 | 34.483871 | 78 | 0.624848 | false |
lanen/youtube-dl | youtube_dl/extractor/kickstarter.py | 111 | 2654 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KickStarterIE(InfoExtractor):
_VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>[^/]*)/.*'
_TESTS = [{
'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location',
'md5': 'c81addca81327ffa66c642b5d8b08cab',
'info_dict': {
'id': '1404461844',
'ext': 'mp4',
'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
'description': (
'A unique motocross documentary that examines the '
'life and mind of one of sports most elite athletes: Josh Grant.'
),
},
}, {
'note': 'Embedded video (not using the native kickstarter video service)',
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
'info_dict': {
'id': '78704821',
'ext': 'mp4',
'uploader_id': 'pebble',
'uploader': 'Pebble Technology',
'title': 'Pebble iOS Notifications',
}
}, {
'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html',
'info_dict': {
'id': '1420158244',
'ext': 'mp4',
'title': 'Power Drive 2000',
},
'expected_warnings': ['OpenGraph description'],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>\s*(.*?)(?:\s*— Kickstarter)?\s*</title>',
webpage, 'title')
video_url = self._search_regex(
r'data-video-url="(.*?)"',
webpage, 'video URL', default=None)
if video_url is None: # No native kickstarter, look for embedded videos
return {
'_type': 'url_transparent',
'ie_key': 'Generic',
'url': url,
'title': title,
}
thumbnail = self._og_search_thumbnail(webpage, default=None)
if thumbnail is None:
thumbnail = self._html_search_regex(
r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"',
webpage, 'thumbnail image', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': thumbnail,
}
| unlicense | -7,067,527,939,780,323,000 | 36.914286 | 121 | 0.525622 | false |
chromium/gyp | pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| bsd-3-clause | 7,743,603,149,825,352,000 | 28.732143 | 79 | 0.703904 | false |
jarrahwu/tornado | tornado/log.py | 82 | 9819 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Logging support for Tornado.
Tornado uses three logger streams:
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
potentially other servers in the future)
* ``tornado.application``: Logging of errors from application code (i.e.
uncaught exceptions from callbacks)
* ``tornado.general``: General-purpose logging, including any errors
or warnings from Tornado itself.
These streams may be configured independently using the standard library's
`logging` module. For example, you may wish to send ``tornado.access`` logs
to a separate file for analysis.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import logging.handlers
import sys
from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type
try:
import curses
except ImportError:
curses = None
# Logger objects for internal tornado use
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
try:
return _unicode(s)
except UnicodeDecodeError:
return repr(s)
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(options=None, logger=None):
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
from tornado.options import options
if options.logging is None or options.logging.lower() == 'none':
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not logger.handlers)):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options=None):
"""Add logging-related flags to ``options``.
These options are present automatically on the default options instance;
this method is only necessary if you have created your own `.OptionParser`.
.. versionadded:: 4.2
This function existed in prior versions but was broken and undocumented until 4.2.
"""
if options is None:
# late import to prevent cycle
from tornado.options import options
options.define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="debug|info|warning|error|none")
options.define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
options.define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
options.add_parse_callback(lambda: enable_pretty_logging(options))
| apache-2.0 | -7,631,701,674,883,525,000 | 40.256303 | 109 | 0.633771 | false |
yodalee/servo | tests/wpt/web-platform-tests/eventsource/resources/cors-cookie.py | 248 | 1220 | from datetime import datetime
def main(request, response):
last_event_id = request.headers.get("Last-Event-Id", "")
ident = request.GET.first('ident', "test")
cookie = "COOKIE" if ident in request.cookies else "NO_COOKIE"
origin = request.GET.first('origin', request.headers["origin"])
credentials = request.GET.first('credentials', 'true')
headers = []
if origin != 'none':
headers.append(("Access-Control-Allow-Origin", origin));
if credentials != 'none':
headers.append(("Access-Control-Allow-Credentials", credentials));
if last_event_id == '':
headers.append(("Content-Type", "text/event-stream"))
response.set_cookie(ident, "COOKIE")
data = "id: 1\nretry: 200\ndata: first %s\n\n" % cookie
elif last_event_id == '1':
headers.append(("Content-Type", "text/event-stream"))
long_long_time_ago = datetime.now().replace(year=2001, month=7, day=27)
response.set_cookie(ident, "COOKIE", expires=long_long_time_ago)
data = "id: 2\ndata: second %s\n\n" % cookie
else:
headers.append(("Content-Type", "stop"))
data = "data: " + last_event_id + cookie + "\n\n";
return headers, data
| mpl-2.0 | 7,978,842,802,615,347,000 | 38.354839 | 79 | 0.621311 | false |
jhonatajh/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/configurations/inheritance/gyptest-inheritance.py | 430 | 1047 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('configurations.gyp')
test.set_configuration('Release')
test.build('configurations.gyp')
test.run_built_executable('configurations',
stdout=('Base configuration\n'
'Common configuration\n'
'Common2 configuration\n'
'Release configuration\n'))
test.set_configuration('Debug')
test.build('configurations.gyp')
test.run_built_executable('configurations',
stdout=('Base configuration\n'
'Common configuration\n'
'Common2 configuration\n'
'Debug configuration\n'))
test.pass_test()
| gpl-3.0 | 8,164,308,533,853,478,000 | 30.727273 | 72 | 0.577841 | false |
danieljaouen/ansible | lib/ansible/module_utils/scaleway.py | 6 | 4276 | import json
import sys
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
def scaleway_argument_spec():
return dict(
api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
no_log=True, aliases=['oauth_token']),
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
api_timeout=dict(type='int', default=30, aliases=['timeout']),
validate_certs=dict(default=True, type='bool'),
)
def payload_from_object(scw_object):
return dict(
(k, v)
for k, v in scw_object.items()
if k != 'id' and v is not None
)
class ScalewayException(Exception):
def __init__(self, message):
self.message = message
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
@property
def ok(self):
return self.status_code in (200, 201, 202, 204)
class Scaleway(object):
def __init__(self, module):
self.module = module
self.headers = {
'X-Auth-Token': self.module.params.get('api_token'),
'User-Agent': self.get_user_agent_string(module),
'Content-type': 'application/json',
}
self.name = None
def get_resources(self):
results = self.get('/%s' % self.name)
if not results.ok:
raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
results.status_code, results.json['message']
))
return results.json.get(self.name)
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.module.params.get('api_url'), path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
if headers is not None:
self.headers.update(headers)
resp, info = fetch_url(
self.module, url, data=data, headers=self.headers, method=method,
timeout=self.module.params.get('api_timeout')
)
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
return Response(resp, info)
@staticmethod
def get_user_agent_string(module):
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
def patch(self, path, data=None, headers=None):
return self.send("PATCH", path, data, headers)
def update(self, path, data=None, headers=None):
return self.send("UPDATE", path, data, headers)
def warn(self, x):
self.module.warn(str(x))
SCALEWAY_LOCATION = {
'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://cp-par1.scaleway.com'},
'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://cp-par1.scaleway.com'},
'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://cp-ams1.scaleway.com'},
'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://cp-ams1.scaleway.com'}
}
| gpl-3.0 | 1,573,773,098,723,297,800 | 30.211679 | 128 | 0.582554 | false |
jfpla/odoo | addons/purchase/stock.py | 111 | 18604 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
if move.purchase_line_id:
return move.price_unit
return super(stock_move, self).get_price_unit(cr, uid, move, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel']:
po_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.purchase_line_id and move.purchase_line_id.order_id:
order = move.purchase_line_id.order_id
order_id = order.id
# update linked purchase order as superuser as the warehouse
# user may not have rights to access purchase.order
if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr)
if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr)
if order_id not in po_to_check and vals['state'] == 'cancel' and order.invoice_method == 'picking':
po_to_check.append(order_id)
# Some moves which are cancelled might be part of a PO line which is partially
# invoiced, so we check if some PO line can be set on "invoiced = True".
if po_to_check:
self.pool.get('purchase.order')._set_po_lines_invoiced(cr, uid, po_to_check, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
context = context or {}
if not default.get('split_from'):
#we don't want to propagate the link to the purchase order line except in case of move split
default['purchase_line_id'] = False
return super(stock_move, self).copy(cr, uid, id, default, context)
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
if move.purchase_line_id:
invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id
invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if context.get('inv_type') in ('in_invoice', 'in_refund') and move.purchase_line_id:
purchase_line = move.purchase_line_id
self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
purchase_line_obj = self.pool.get('purchase.order.line')
purchase_obj = self.pool.get('purchase.order')
invoice_line_obj = self.pool.get('account.invoice.line')
purchase_id = move.purchase_line_id.order_id.id
purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('invoice_lines', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if purchase_line_ids:
inv_lines = []
for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context):
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if context.get('inv_type') == 'in_invoice' and move.purchase_line_id:
purchase_order = move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
if context.get('inv_type') == 'in_refund' and move.origin_returned_move_id.purchase_line_id:
purchase_order = move.origin_returned_move_id.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
elif context.get('inv_type') in ('in_invoice', 'in_refund') and move.picking_id:
# In case of an extra move, it is better to use the data from the original moves
for purchase_move in move.picking_id.move_lines:
if purchase_move.purchase_line_id:
purchase_order = purchase_move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
partner = move.picking_id and move.picking_id.partner_id or False
code = self.get_code_from_locs(cr, uid, move, context=context)
if partner and partner.property_product_pricelist_purchase and code == 'incoming':
currency = partner.property_product_pricelist_purchase.currency_id.id
return partner, uid, currency
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if inv_type == 'in_invoice' and move.purchase_line_id:
purchase_line = move.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
elif inv_type == 'in_refund' and move.origin_returned_move_id.purchase_line_id:
purchase_line = move.origin_returned_move_id.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
return res
def _get_moves_taxes(self, cr, uid, moves, inv_type, context=None):
is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, inv_type, context=context)
if inv_type == 'in_invoice':
for move in moves:
if move.purchase_line_id:
is_extra_move[move.id] = False
extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.purchase_line_id.taxes_id])]
elif move.product_id.product_tmpl_id.supplier_taxes_id:
mov_id = self.search(cr, uid, [('purchase_line_id', '!=', False), ('picking_id', '=', move.picking_id.id)], limit=1, context=context)
if mov_id:
mov = self.browse(cr, uid, mov_id[0], context=context)
fp = mov.purchase_line_id.order_id.fiscal_position
res = self.pool.get("account.invoice.line").product_id_change(cr, uid, [], move.product_id.id, None, partner_id=move.picking_id.partner_id.id, fposition_id=(fp and fp.id), type='in_invoice', context=context)
extra_move_tax[0, move.product_id] = [(6, 0, res['value']['invoice_line_tax_id'])]
return (is_extra_move, extra_move_tax)
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
# The method attribute_price of the parent class sets the price to the standard product
# price if move.price_unit is zero. We don't want this behavior in the case of a purchase
# order since we can purchase goods which are free of charge (e.g. 5 units offered if 100
# are purchased).
if move.purchase_line_id:
return
code = self.get_code_from_locs(cr, uid, move, context=context)
if not move.purchase_line_id and code == 'incoming' and not move.price_unit:
partner = move.picking_id and move.picking_id.partner_id or False
price = False
# If partner given, search price in its purchase pricelist
if partner and partner.property_product_pricelist_purchase:
pricelist_obj = self.pool.get("product.pricelist")
pricelist = partner.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist],
move.product_id.id, move.product_uom_qty, partner.id, {
'uom': move.product_uom.id,
'date': move.date,
})[pricelist]
if price:
return self.write(cr, uid, [move.id], {'price_unit': price}, context=context)
super(stock_move, self).attribute_price(cr, uid, move, context=context)
def _get_taxes(self, cr, uid, move, context=None):
if move.origin_returned_move_id.purchase_line_id.taxes_id:
return [tax.id for tax in move.origin_returned_move_id.purchase_line_id.taxes_id]
return super(stock_move, self)._get_taxes(cr, uid, move, context=context)
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _get_to_invoice(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
for move in picking.move_lines:
if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking':
if not move.move_orig_ids:
res[picking.id] = True
return res
def _get_picking_to_recompute(self, cr, uid, ids, context=None):
picking_ids = set()
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id and move.purchase_line_id:
picking_ids.add(move.picking_id.id)
return list(picking_ids)
_columns = {
'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?',
help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?',
store={
'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10),
}),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
purchase_obj = self.pool.get("purchase.order")
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if move.purchase_line_id and move.purchase_line_id.order_id:
purchase = move.purchase_line_id.order_id
inv_vals.update({
'fiscal_position': purchase.fiscal_position.id,
'payment_term': purchase.payment_term_id.id,
})
return inv_vals
class stock_warehouse(osv.osv):
_inherit = 'stock.warehouse'
_columns = {
'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse',
help="When products are bought, they can be delivered to this warehouse"),
'buy_pull_id': fields.many2one('procurement.rule', 'Buy rule'),
}
_defaults = {
'buy_to_resupply': True,
}
def _get_buy_pull_rule(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1]
except:
buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context)
buy_route_id = buy_route_id and buy_route_id[0] or False
if not buy_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.'))
return {
'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context),
'location_id': warehouse.in_type_id.default_location_dest_id.id,
'route_id': buy_route_id,
'action': 'buy',
'picking_type_id': warehouse.in_type_id.id,
'warehouse_id': warehouse.id,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
pull_obj = self.pool.get('procurement.rule')
res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context)
if warehouse.buy_to_resupply:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
res['buy_pull_id'] = buy_pull_id
return res
def write(self, cr, uid, ids, vals, context=None):
pull_obj = self.pool.get('procurement.rule')
if isinstance(ids, (int, long)):
ids = [ids]
if 'buy_to_resupply' in vals:
if vals.get("buy_to_resupply"):
for warehouse in self.browse(cr, uid, ids, context=context):
if not warehouse.buy_pull_id:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
vals['buy_pull_id'] = buy_pull_id
else:
for warehouse in self.browse(cr, uid, ids, context=context):
if warehouse.buy_pull_id:
buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context)
if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
all_routes += [warehouse.buy_pull_id.route_id.id]
return all_routes
def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None):
res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context)
if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
for product_id in res:
for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids:
if route.id == warehouse.buy_pull_id.route_id.id:
res.remove(product_id)
break
return res
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context)
pull_obj = self.pool.get('procurement.rule')
#change the buy pull rule name
if warehouse.buy_pull_id:
pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
return res
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context)
if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id:
self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context)
return res
| agpl-3.0 | -2,372,116,081,887,773,000 | 57.319749 | 231 | 0.599065 | false |
ojengwa/grr | lib/flows/general/timelines.py | 5 | 2754 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Calculates timelines from the client."""
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import utils
from grr.proto import flows_pb2
class MACTimesArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.MACTimesArgs
class MACTimes(flow.GRRFlow):
"""Calculate the MAC times from objects in the VFS."""
category = "/Timeline/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
args_type = MACTimesArgs
@flow.StateHandler(next_state="CreateTimeline")
def Start(self):
"""This could take a while so we just schedule for the worker."""
self.state.Register("urn", self.client_id.Add(self.args.path))
if self.runner.output is not None:
self.runner.output = aff4.FACTORY.Create(
self.runner.output.urn, "GRRTimeSeries", token=self.token)
self.runner.output.Set(
self.runner.output.Schema.DESCRIPTION(
"Timeline {0}".format(self.args.path)))
# Main work done in another process.
self.CallState(next_state="CreateTimeline")
def _ListVFSChildren(self, fds):
"""Recursively iterate over all children of the AFF4Objects in fds."""
child_urns = []
while 1:
direct_children = []
for _, children in aff4.FACTORY.MultiListChildren(
fds, token=self.token):
direct_children.extend(children)
# Break if there are no children at this level.
if not direct_children:
break
child_urns.extend(direct_children)
# Now get the next lower level of children.
fds = direct_children
return child_urns
@flow.StateHandler()
def CreateTimeline(self):
"""Populate the timeline with the MAC data."""
child_urns = self._ListVFSChildren([self.state.urn])
attribute = aff4.Attribute.GetAttributeByName("stat")
for subject, values in data_store.DB.MultiResolveRegex(
child_urns, attribute.predicate, token=self.token, limit=10000000):
for _, serialized, _ in values:
stat = rdfvalue.StatEntry(serialized)
event = rdfvalue.Event(source=utils.SmartUnicode(subject),
stat=stat)
# Add a new event for each MAC time if it exists.
for c in "mac":
timestamp = getattr(stat, "st_%stime" % c)
if timestamp is not None:
event.timestamp = timestamp * 1000000
event.type = "file.%stime" % c
# We are taking about the file which is a direct child of the
# source.
event.subject = utils.SmartUnicode(subject)
if self.runner.output is not None:
self.runner.output.AddEvent(event)
| apache-2.0 | 8,376,900,936,104,855,000 | 32.585366 | 75 | 0.659041 | false |
rocky/python-xdis | test_unit/test_marsh.py | 1 | 1343 | #!/usr/bin/env python
import os, unittest
from xdis.load import load_module
def get_srcdir():
filename = os.path.normcase(os.path.dirname(os.path.abspath(__file__)))
return os.path.realpath(filename)
srcdir = get_srcdir()
class TestMarshal(unittest.TestCase):
def test_basic(self):
"""Tests xdis.load.load_module"""
# We deliberately pick a bytecode that we aren't likely to be running against
mod_file = os.path.join(get_srcdir(), '..', 'test', 'bytecode_2.5',
'02_complex.pyc')
(version, timestamp, magic_int, co, is_pypy,
source_size) = load_module(mod_file)
self.assertEqual(version, 2.5,
"Should have picked up Python version properly")
assert co.co_consts == (5j, None), "Code should have a complex constant"
mod_file = os.path.join(get_srcdir(), '..', 'test', 'bytecode_3.3',
'06_frozenset.pyc')
(version, timestamp, magic_int, co, is_pypy,
source_size) = load_module(mod_file)
expect = (0, None, 'attlist', 'linktype', 'link', 'element', 'Yep',
frozenset(['linktype', 'attlist', 'element', 'link']))
self.assertEqual(co.co_consts, expect, "Should handle frozenset")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 632,181,724,801,595,300 | 36.305556 | 85 | 0.586001 | false |
robwarm/gpaw-symm | doc/devel/bigpicture.py | 1 | 9152 | """creates: bigpicture.svg bigpicture.png"""
import os
from math import pi, cos, sin
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
class Box:
def __init__(self, name, description=(), attributes=(), color='grey'):
self.name = name
if isinstance(description, str):
description = [description]
self.description = description
self.attributes = attributes
self.color = color
self.owns = []
self.position = None
def set_position(self, position):
self.position = np.asarray(position)
def has(self, other, name, angle=None, distance=None, x=0.4, style='<-'):
self.owns.append((other, name, x, style))
if angle is not None:
angle *= pi / 180
other.set_position(self.position +
[cos(angle) * distance, sin(angle) * distance])
def cut(size, dir):
if abs(size[0] * dir[1]) < abs(size[1] * dir[0]):
x = min(max(-size[0] / 2, dir[0]), size[0] / 2)
y = x * dir[1] / dir[0]
else:
y = min(max(-size[1] / 2, dir[1]), size[1] / 2)
x = y * dir[0] / dir[1]
return x, y
class MPL:
def __init__(self, boxes):
self.boxes = boxes
def plot(self):
a4 = 100 * np.array([2**-1.75, 2**-2.25])
inch = 2.54
self.fig = plt.figure(1, a4 / inch)
self.ax = ax = self.fig.add_axes([0, 0, 1, 1], frameon=False)
ax.set_xlim(0, a4[0])
ax.set_ylim(0, a4[1])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.add_patch(mpatches.Rectangle((22.5, 16), 6, 4, fc='orange'))
ax.text(22.7, 19.5, 'ASE package')
for b in boxes:
x, y = b.position
text = b.name
for txt in b.description:
text += '\n' + txt
for txt in b.attributes:
text += '\n' + txt
b.text = ax.text(x, y,
text,
fontsize=9,
ha='center',
va='center',
bbox=dict(boxstyle='round',
facecolor=b.color,
alpha=0.75))
self.fig.canvas.mpl_connect('draw_event', self.on_draw)
plt.savefig('bigpicture.png', dpi=50)
plt.savefig('bigpicture.svg')
os.system('cp bigpicture.svg ../_build')
def on_draw(self, event):
for b in self.boxes:
bbox = b.text.get_window_extent()
t = b.text.get_transform()
b.size = t.inverted().transform(bbox.size)
for b in self.boxes:
for other, name, s, style in b.owns:
d = other.position - b.position
p1 = b.position + cut(b.size, d)
p2 = other.position + cut(other.size, -d)
if style == '-|>':
arrowprops = dict(arrowstyle=style, fc='white')
else:
arrowprops = dict(arrowstyle=style)
self.ax.annotate('', p1, p2,
arrowprops=arrowprops)
if name:
p = (1 - s) * p1 + s * p2
self.ax.text(p[0], p[1], name, fontsize=7,
ha='center', va='center',
bbox=dict(facecolor='white', ec='white'))
self.fig.canvas.callbacks.callbacks[event.name] = {}
self.fig.canvas.draw()
return False
boxes = []
def box(*args, **kwargs):
b = Box(*args, **kwargs)
boxes.append(b)
return b
atoms = box('Atoms', [''], ['positions, numbers, cell, pbc'],
color='white')
paw = box('PAW', [], [], 'green')
scf = box('SCFLoop', [])
density = box('Density',
[r'$\tilde{n}_\sigma = \sum_{\mathbf{k}n}' +
r'|\tilde{\psi}_{\sigma\mathbf{k}n}|^2' +
r'+\frac{1}{2}\sum_a \tilde{n}_c^a$',
r'$\tilde{\rho}(\mathbf{r}) = ' +
r'\sum_\sigma\tilde{n}_\sigma + \sum_{aL}Q_L^a \hat{g}_L^a$'],
['nspins, nt_sG, nt_sg,', 'rhot_g, Q_aL, D_asp'])
mixer = box('Mixer')#, color='blue')
hamiltonian = box('Hamiltonian',
[r'$-\frac{1}{2}\nabla^2 + \tilde{v} + ' +
r'\sum_a \sum_{i_1i_2} |\tilde{p}_{i_1}^a \rangle ' +
r'\Delta H_{i_1i_2} \langle \tilde{p}_{i_2}^a|$'],
['nspins, vt_sG, vt_sg, vHt_g, dH_asp',
'Etot, Ekin, Exc, Epot, Ebar'])
wfs = box('WaveFunctions',
[r'$\tilde{\psi}_{\sigma\mathbf{k}n}(\mathbf{r})$'],
['nspins, ibzk_qc, mynbands',
'kpt_comm, band_comm'], color='magenta')
gd = box('GridDescriptor', ['(coarse grid)'],
['cell_cv, N_c,', 'pbc_c, dv, comm'], 'orange')
finegd = box('GridDescriptor', '(fine grid)',
['cell_cv, N_c, pbc_c, dv, comm'], 'orange')
rgd = box('RadialGridDescriptor', [], ['r_g, dr_g, rcut'], color='orange')
setups = box('Setups', ['', '', '', ''], ['nvalence, nao, Eref, corecharge'])
xccorrection = box('XCCorrection')
nct = box('LFC', r'$\tilde{n}_c^a(r)$', [], 'red')
vbar = box('LFC', r'$\bar{v}^a(r)$', [], 'red')
ghat = box('LFC', r'$\hat{g}_{\ell m}^a(\mathbf{r})$', [], 'red')
fd = box('FDWaveFunctions',
r"""$\tilde{\psi}_{\sigma\mathbf{k}n}(ih,jh,kh)$""",
[], 'magenta')
pt = box('LFC', r'$\tilde{p}_i^a(\mathbf{r})$', [], 'red')
lcao = box('LCAOWaveFunctions',
r"$\tilde{\psi}_{\sigma\mathbf{k}n}(\mathbf{r})=\sum_{\mu\mathbf{R}} C_{\sigma\mathbf{k}n\mu} \Phi_\mu(\mathbf{r} - \mathbf{R}) \exp(i\mathbf{k}\cdot\mathbf{R})$",
['S_qMM, T_qMM, P_aqMi'], 'magenta')
atoms0 = box('Atoms', '(copy)', ['positions, numbers, cell, pbc'],
color='grey')
parameters = box('InputParameters', [], ['xc, nbands, ...'])
forces = box('ForceCalculator')
occupations = box(
'OccupationNumbers',
r'$\epsilon_{\sigma\mathbf{k}n} \rightarrow f_{\sigma\mathbf{k}n}$')
poisson = box('PoissonSolver',
r'$\nabla^2 \tilde{v}_H(\mathbf{r}) = -4\pi \tilde{\rho}(\mathbf{r})$')
eigensolver = box('EigenSolver')
symmetry = box('Symmetry')
restrictor = box('Transformer', '(fine -> coarse)',
color='yellow')
interpolator = box('Transformer', '(coarse -> fine)',
color='yellow')
xc = box('XCFunctional')
kin = box('FDOperator', r'$-\frac{1}{2}\nabla^2$')
hsoperator = box('HSOperator',
[r"$\langle \psi_n | A | \psi_{n'} \rangle$",
r"$\sum_{n'}U_{nn'}|\tilde{\psi}_{n'}\rangle$"])
overlap = box('Overlap')
basisfunctions = box('BasisFunctions', r'$\Phi_\mu(\mathbf{r})$',
color='red')
tci = box('TwoCenterIntegrals',
r'$\langle\Phi_\mu|\Phi_\nu\rangle,'
r'\langle\Phi_\mu|\hat{T}|\Phi_\nu\rangle,'
r'\langle\tilde{p}^a_i|\Phi_\mu\rangle$')
atoms.set_position((25, 18.3))
atoms.has(paw, 'calculator', -160, 7.5)
paw.has(scf, 'scf', 160, 4, x=0.48)
paw.has(density, 'density', -150, 14, 0.23)
paw.has(hamiltonian, 'hamiltonian', 180, 10, 0.3)
paw.has(wfs, 'wfs', -65, 5.5, x=0.48)
paw.has(atoms0, 'atoms', 9, 7.5)
paw.has(parameters, 'input_parameters', 90, 4)
paw.has(forces, 'forces', 50, 4)
paw.has(occupations, 'occupations', 136, 4)
density.has(mixer, 'mixer', 130, 3.3)
density.has(gd, 'gd', x=0.33)
density.has(finegd, 'finegd', 76, 3.5)
density.has(setups, 'setups', 0, 7, 0.45)
density.has(nct, 'nct', -90, 3)
density.has(ghat, 'ghat', -130, 3.4)
density.has(interpolator, 'interpolator', -45, 4)
hamiltonian.has(restrictor, 'restrictor', 40, 4)
hamiltonian.has(xc, 'xc', 160, 6, x=0.6)
hamiltonian.has(vbar, 'vbar', 80, 4)
hamiltonian.has(setups, 'setups', x=0.3)
hamiltonian.has(gd, 'gd', x=0.45)
hamiltonian.has(finegd, 'finegd')
hamiltonian.has(poisson, 'poissonsolver', 130, 4)
wfs.has(gd, 'gd', 160, 4.8, x=0.48)
wfs.has(setups, 'setups', x=0.4)
wfs.has(lcao, None, -55, 5.9, style='-|>')
wfs.has(fd, None, -112, 5.0, style='-|>')
wfs.has(eigensolver, 'eigensolver', 30, 5, x=0.6)
wfs.has(symmetry, 'symmetry', 80, 3)
fd.has(pt, 'pt', -45, 3.6)
fd.has(kin, 'kin', -90, 3)
fd.has(overlap, 'overlap', -135, 3.5)
lcao.has(basisfunctions, 'basis_functions', -50, 3.5)
lcao.has(tci, 'tci', -90, 4.2)
overlap.has(setups, 'setups', x=0.4)
overlap.has(hsoperator, 'operator', -115, 2.5, x=0.41)
for i in range(3):
setup = box('Setup', [],
['Z, Nv, Nc, pt_j, nct,', 'vbar, ghat_l, Delta_pl'],
'blue')
setup.set_position(setups.position +
(0.9 - i * 0.14, 0.3 - i * 0.14))
setup.has(xccorrection, 'xc_correction', -110, 3.7)
xccorrection.has(rgd, 'rgd', -105, 2.4, 0.4)
kpts = [box('KPoint', [], ['psit_nG, C_nM,', 'eps_n, f_n, P_ani'],
color='cyan') for i in range(3)]
wfs.has(kpts[1], 'kpt_u', 0, 5.4, 0.48)
kpts[0].set_position(kpts[1].position - 0.14)
kpts[2].set_position(kpts[1].position + 0.14)
MPL(boxes).plot()
| gpl-3.0 | -7,501,790,693,817,104,000 | 36.508197 | 174 | 0.51792 | false |
mtp1376/youtube-dl | youtube_dl/extractor/liveleak.py | 14 | 4214 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class LiveLeakIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<id>[\w_]+)(?:.*)'
_TESTS = [{
'url': 'http://www.liveleak.com/view?i=757_1364311680',
'md5': '50f79e05ba149149c1b4ea961223d5b3',
'info_dict': {
'id': '757_1364311680',
'ext': 'flv',
'description': 'extremely bad day for this guy..!',
'uploader': 'ljfriel2',
'title': 'Most unlucky car accident'
}
}, {
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
'md5': 'b13a29626183c9d33944e6a04f41aafc',
'info_dict': {
'id': 'f93_1390833151',
'ext': 'mp4',
'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.',
'uploader': 'ARD_Stinkt',
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
}
}, {
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
'md5': '42c6d97d54f1db107958760788c5f48f',
'info_dict': {
'id': '4f7_1392687779',
'ext': 'mp4',
'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.",
'uploader': 'CapObveus',
'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck',
'age_limit': 18,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._og_search_title(webpage).replace('LiveLeak.com -', '').strip()
video_description = self._og_search_description(webpage)
video_uploader = self._html_search_regex(
r'By:.*?(\w+)</a>', webpage, 'uploader', fatal=False)
age_limit = int_or_none(self._search_regex(
r'you confirm that you are ([0-9]+) years and over.',
webpage, 'age limit', default=None))
sources_raw = self._search_regex(
r'(?s)sources:\s*(\[.*?\]),', webpage, 'video URLs', default=None)
if sources_raw is None:
alt_source = self._search_regex(
r'(file: ".*?"),', webpage, 'video URL', default=None)
if alt_source:
sources_raw = '[{ %s}]' % alt_source
else:
# Maybe an embed?
embed_url = self._search_regex(
r'<iframe[^>]+src="(http://www.prochan.com/embed\?[^"]+)"',
webpage, 'embed URL')
return {
'_type': 'url_transparent',
'url': embed_url,
'id': video_id,
'title': video_title,
'description': video_description,
'uploader': video_uploader,
'age_limit': age_limit,
}
sources_json = re.sub(r'\s([a-z]+):\s', r'"\1": ', sources_raw)
sources = json.loads(sources_json)
formats = [{
'format_id': '%s' % i,
'format_note': s.get('label'),
'url': s['file'],
} for i, s in enumerate(sources)]
for i, s in enumerate(sources):
orig_url = s['file'].replace('.h264_base.mp4', '')
if s['file'] != orig_url:
formats.append({
'format_id': 'original-%s' % i,
'format_note': s.get('label'),
'url': orig_url,
'preference': 1,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'uploader': video_uploader,
'formats': formats,
'age_limit': age_limit,
}
| unlicense | -277,313,390,138,315,800 | 39.133333 | 223 | 0.508068 | false |
yekeqiang/luigi | test/instance_wrap_test.py | 4 | 3011 | # Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import luigi
from luigi.mock import MockFile
import unittest
import decimal
import datetime
import luigi.notifications
luigi.notifications.DEBUG = True
File = MockFile
class Report(luigi.Task):
date = luigi.DateParameter()
def run(self):
f = self.output().open('w')
f.write('10.0 USD\n')
f.write('4.0 EUR\n')
f.write('3.0 USD\n')
f.close()
def output(self):
return File(self.date.strftime('/tmp/report-%Y-%m-%d'))
class ReportReader(luigi.Task):
date = luigi.DateParameter()
def requires(self):
return Report(self.date)
def run(self):
self.lines = list(self.input().open('r').readlines())
def get_line(self, line):
amount, currency = self.lines[line].strip().split()
return decimal.Decimal(amount), currency
def complete(self):
return False
class CurrencyExchanger(luigi.Task):
task = luigi.Parameter()
currency_to = luigi.Parameter()
exchange_rates = {('USD', 'USD'): decimal.Decimal(1),
('EUR', 'USD'): decimal.Decimal('1.25')}
def requires(self):
return self.task # Note that you still need to state this explicitly
def get_line(self, line):
amount, currency_from = self.task.get_line(line)
return amount * self.exchange_rates[(currency_from, self.currency_to)], self.currency_to
def complete(self):
return False
class InstanceWrapperTest(unittest.TestCase):
''' This test illustrates that tasks can have tasks as parameters
This is a more complicated variant of factorial_test.py which is an example of
tasks communicating directly with other tasks. In this case, a task takes another
task as a parameter and wraps it.
Also see wrap_test.py for an example of a task class wrapping another task class.
Not the most useful pattern, but there's actually been a few cases where it was
pretty handy to be able to do that. I'm adding it as a unit test to make sure that
new code doesn't break the expected behavior.
'''
def test(self):
d = datetime.date(2012, 1, 1)
r = ReportReader(d)
ex = CurrencyExchanger(r, 'USD')
w = luigi.worker.Worker()
w.add(ex)
w.run()
w.stop()
self.assertEqual(ex.get_line(0), (decimal.Decimal('10.0'), 'USD'))
self.assertEqual(ex.get_line(1), (decimal.Decimal('5.0'), 'USD'))
| apache-2.0 | -8,421,584,842,355,547,000 | 30.041237 | 96 | 0.664895 | false |
DESHRAJ/fjord | vendor/packages/html5lib/html5lib/html5parser.py | 95 | 115775 | try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
any
except:
# Implement 'any' for python 2.4 and previous
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
"abc".startswith(("a", "b"))
def startswithany(str, prefixes):
return str.startswith(prefixes)
except:
# Python 2.4 doesn't accept a tuple as argument to string startswith
def startswithany(str, prefixes):
for prefix in prefixes:
if str.startswith(prefix):
return True
return False
import sys
import types
import inputstream
import tokenizer
import treebuilders
from treebuilders._base import Marker
from treebuilders import simpletree
import utils
import constants
from constants import spaceCharacters, asciiUpper2Lower
from constants import formattingElements, specialElements
from constants import headingElements, tableInsertModeElements
from constants import cdataElements, rcdataElements, voidElements
from constants import tokenTypes, ReparseException, namespaces, spaceCharacters
from constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
def parse(doc, treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.iteritems():
if type(attribute) == types.FunctionType:
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree = simpletree.TreeBuilder,
tokenizer = tokenizer.HTMLTokenizer, strict = False,
namespaceHTMLElements = True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).iteritems()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException, e:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] #only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token= phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name":token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl":u"definitionURL"}
for k,v in replacements.iteritems():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename":u"attributeName",
"attributetype":u"attributeType",
"basefrequency":u"baseFrequency",
"baseprofile":u"baseProfile",
"calcmode":u"calcMode",
"clippathunits":u"clipPathUnits",
"contentscripttype":u"contentScriptType",
"contentstyletype":u"contentStyleType",
"diffuseconstant":u"diffuseConstant",
"edgemode":u"edgeMode",
"externalresourcesrequired":u"externalResourcesRequired",
"filterres":u"filterRes",
"filterunits":u"filterUnits",
"glyphref":u"glyphRef",
"gradienttransform":u"gradientTransform",
"gradientunits":u"gradientUnits",
"kernelmatrix":u"kernelMatrix",
"kernelunitlength":u"kernelUnitLength",
"keypoints":u"keyPoints",
"keysplines":u"keySplines",
"keytimes":u"keyTimes",
"lengthadjust":u"lengthAdjust",
"limitingconeangle":u"limitingConeAngle",
"markerheight":u"markerHeight",
"markerunits":u"markerUnits",
"markerwidth":u"markerWidth",
"maskcontentunits":u"maskContentUnits",
"maskunits":u"maskUnits",
"numoctaves":u"numOctaves",
"pathlength":u"pathLength",
"patterncontentunits":u"patternContentUnits",
"patterntransform":u"patternTransform",
"patternunits":u"patternUnits",
"pointsatx":u"pointsAtX",
"pointsaty":u"pointsAtY",
"pointsatz":u"pointsAtZ",
"preservealpha":u"preserveAlpha",
"preserveaspectratio":u"preserveAspectRatio",
"primitiveunits":u"primitiveUnits",
"refx":u"refX",
"refy":u"refY",
"repeatcount":u"repeatCount",
"repeatdur":u"repeatDur",
"requiredextensions":u"requiredExtensions",
"requiredfeatures":u"requiredFeatures",
"specularconstant":u"specularConstant",
"specularexponent":u"specularExponent",
"spreadmethod":u"spreadMethod",
"startoffset":u"startOffset",
"stddeviation":u"stdDeviation",
"stitchtiles":u"stitchTiles",
"surfacescale":u"surfaceScale",
"systemlanguage":u"systemLanguage",
"tablevalues":u"tableValues",
"targetx":u"targetX",
"targety":u"targetY",
"textlength":u"textLength",
"viewbox":u"viewBox",
"viewtarget":u"viewTarget",
"xchannelselector":u"xChannelSelector",
"ychannelselector":u"yChannelSelector",
"zoomandpan":u"zoomAndPan"
}
for originalName in token["data"].keys():
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = {
"xlink:actuate":("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole":("xlink", "arcrole", namespaces["xlink"]),
"xlink:href":("xlink", "href", namespaces["xlink"]),
"xlink:role":("xlink", "role", namespaces["xlink"]),
"xlink:show":("xlink", "show", namespaces["xlink"]),
"xlink:title":("xlink", "title", namespaces["xlink"]),
"xlink:type":("xlink", "type", namespaces["xlink"]),
"xml:base":("xml", "base", namespaces["xml"]),
"xml:lang":("xml", "lang", namespaces["xml"]),
"xml:space":("xml", "space", namespaces["xml"]),
"xmlns":(None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink":("xmlns", "xlink", namespaces["xmlns"])
}
for originalName in token["data"].iterkeys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select":"inSelect",
"td":"inCell",
"th":"inCell",
"tr":"inRow",
"tbody":"inTableBody",
"thead":"inTableBody",
"tfoot":"inTableBody",
"caption":"inCaption",
"colgroup":"inColumnGroup",
"table":"inTable",
"head":"inBody",
"body":"inBody",
"frameset":"inFrameset",
"html":"beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
element = self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.iteritems())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type":type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(object):
"""Base class for helper object that implements each phase of processing
"""
# Order should be (they can be omitted):
# * EOF
# * Comment
# * Doctype
# * SpaceCharacters
# * Characters
# * StartTag
# - startTag* methods
# * EndTag
# - endTag* methods
__metaclass__ = getMetaclass(debug, log)
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if self.parser.firstStartTag == False and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId != None or
systemId != None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or startswithany(publicId,
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or startswithany(publicId,
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId == None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (startswithany(publicId,
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or startswithany(publicId,
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId != None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF (self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif "content" in attributes:
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
#Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s"%node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name":token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name":token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
#Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext",self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"),self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body",self.endTagBody),
("html",self.endTagHtml),
(("address", "article", "aside", "blockquote", "center",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p",self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
#Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == u"\u0000":
#The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
#This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError(u"unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li":["li"],
"dt":["dt", "dd"],
"dd":["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
#input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = u"This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type":tokenTypes["Characters"], "data":prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes = attributes,
selfClosing =
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
#Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
#We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
#Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name":"form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude = token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://www.whatwg.org/specs/web-apps/current-work/#adoptionAgency
# XXX Better parseError messages appreciated.
name = token["name"]
outerLoopCounter = 0
while outerLoopCounter < 8:
outerLoopCounter += 1
# Step 1 paragraph 1
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
self.parser.parseError("adoption-agency-1.1", {"name": token["name"]})
return
# Step 1 paragraph 2
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Step 1 paragraph 3
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 2
# Start of the adoption agency algorithm proper
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 3
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
commonAncestor = self.tree.openElements[afeIndex-1]
# Step 5
#if furthestBlock.parent:
# furthestBlock.parent.removeChild(furthestBlock)
# Step 5
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 12. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 7.4
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 6
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 6.3
if node == formattingElement:
break
# Step 6.4
if lastNode == furthestBlock:
bookmark = (self.tree.activeFormattingElements.index(node)
+ 1)
# Step 6.5
#cite = node.parent
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 6.6
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 7.7
lastNode = node
# End of inner loop
# Step 7
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster parent the
# lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 8
clone = formattingElement.cloneNode()
# Step 9
furthestBlock.reparentChildren(clone)
# Step 10
furthestBlock.appendChild(clone)
# Step 11
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 12
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
self.tree.openElements[-1].name)
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode"%token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
#The rest of this method is all stuff that only happens if
#document.write works
def endTagOther(self, token):
node = self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
#Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
#If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type":tokenTypes["Characters"], "data":data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == u"\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
#pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
#XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == u"\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {u"altglyph":u"altGlyph",
u"altglyphdef":u"altGlyphDef",
u"altglyphitem":u"altGlyphItem",
u"animatecolor":u"animateColor",
u"animatemotion":u"animateMotion",
u"animatetransform":u"animateTransform",
u"clippath":u"clipPath",
u"feblend":u"feBlend",
u"fecolormatrix":u"feColorMatrix",
u"fecomponenttransfer":u"feComponentTransfer",
u"fecomposite":u"feComposite",
u"feconvolvematrix":u"feConvolveMatrix",
u"fediffuselighting":u"feDiffuseLighting",
u"fedisplacementmap":u"feDisplacementMap",
u"fedistantlight":u"feDistantLight",
u"feflood":u"feFlood",
u"fefunca":u"feFuncA",
u"fefuncb":u"feFuncB",
u"fefuncg":u"feFuncG",
u"fefuncr":u"feFuncR",
u"fegaussianblur":u"feGaussianBlur",
u"feimage":u"feImage",
u"femerge":u"feMerge",
u"femergenode":u"feMergeNode",
u"femorphology":u"feMorphology",
u"feoffset":u"feOffset",
u"fepointlight":u"fePointLight",
u"fespecularlighting":u"feSpecularLighting",
u"fespotlight":u"feSpotLight",
u"fetile":u"feTile",
u"feturbulence":u"feTurbulence",
u"foreignobject":u"foreignObject",
u"glyphref":u"glyphRef",
u"lineargradient":u"linearGradient",
u"radialgradient":u"radialGradient",
u"textpath":u"textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == u"\u0000":
token["data"] = u"\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
token["name"])
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", token["name"])
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
#XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self,name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes = None,
selfClosing = False):
if attributes is None:
attributes = {}
return {"type":tokenTypes[type], "name":unicode(name), "data":attributes,
"selfClosing":selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| bsd-3-clause | 6,437,826,723,308,856,000 | 41.361873 | 116 | 0.548849 | false |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/test/test_with.py | 88 | 26440 | #!/usr/bin/env python
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import GeneratorContextManager, contextmanager
from test.test_support import run_unittest
class MockContextManager(GeneratorContextManager):
def __init__(self, gen):
GeneratorContextManager.__init__(self, gen)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func(*args, **kwds))
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0], ex[1], ex[2]
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager,
exc_type=None):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
if exc_type is None:
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
exc_type = type(self.TEST_EXCEPTION)
self.assertEqual(mock_manager.exit_args[0], exc_type)
# Test the __exit__ arguments. Issue #7853
self.assertIsInstance(mock_manager.exit_args[1], exc_type)
self.assertIsNot(mock_manager.exit_args[2], None)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testExceptionNormalized(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
# Note this relies on the fact that 1 // 0 produces an exception
# that is not normalized immediately.
1 // 0
self.assertRaises(ZeroDivisionError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm, ZeroDivisionError)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise iter([]).next()
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __nonzero__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1 // 0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(targets.keys(), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as targets.values()[0][1]:
self.assertEqual(targets.keys(), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = targets.keys()
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (targets.values()[0][2], targets.values()[0][1], targets.values()[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1 // 0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1 // 0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
class NestedWith(unittest.TestCase):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
def testNoExceptions(self):
with self.Dummy() as a, self.Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with self.Dummy() as a, self.InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with self.Dummy() as a, self.EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with self.Dummy(gobble=True) as a, self.ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(body_executed)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with self.Dummy(value=(1,2)) as (a1, a2), \
self.Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase,
NestedWith)
if __name__ == '__main__':
test_main()
| gpl-2.0 | -3,088,992,426,235,267,000 | 34.253333 | 93 | 0.613616 | false |
preete-dixit-ck/incubator-airflow | tests/contrib/hooks/test_ssh_hook.py | 14 | 2342 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from airflow import configuration
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
class SSHHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
from airflow.contrib.hooks.ssh_hook import SSHHook
self.hook = SSHHook(ssh_conn_id='ssh_default')
self.hook.no_host_key_check = True
def test_ssh_connection(self):
ssh_hook = self.hook.get_conn()
self.assertIsNotNone(ssh_hook)
def test_tunnel(self):
print("Setting up remote listener")
import subprocess
import socket
self.server_handle = subprocess.Popen(["python", "-c", HELLO_SERVER_CMD],
stdout=subprocess.PIPE)
print("Setting up tunnel")
with self.hook.create_tunnel(2135, 2134):
print("Tunnel up")
server_output = self.server_handle.stdout.read(5)
self.assertEqual(server_output, b"ready")
print("Connecting to server via tunnel")
s = socket.socket()
s.connect(("localhost", 2135))
print("Receiving...", )
response = s.recv(5)
self.assertEqual(response, b"hello")
print("Closing connection")
s.close()
print("Waiting for listener...")
output, _ = self.server_handle.communicate()
self.assertEqual(self.server_handle.returncode, 0)
print("Closing tunnel")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,162,591,811,685,305,000 | 32.457143 | 81 | 0.63877 | false |
photoninger/ansible | lib/ansible/modules/crypto/openssl_csr.py | 9 | 20902 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_csr
author: "Yanis Guenane (@Spredzy)"
version_added: "2.4"
short_description: Generate OpenSSL Certificate Signing Request (CSR)
description:
- "This module allows one to (re)generate OpenSSL certificate signing requests.
It uses the pyOpenSSL python library to interact with openssl. This module supports
the subjectAltName as well as the keyUsage and extendedKeyUsage extensions."
requirements:
- "python-pyOpenSSL >= 0.15"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
digest:
required: false
default: "sha256"
description:
- Digest used when signing the certificate signing request with the private key
privatekey_path:
required: true
description:
- Path to the privatekey to use when signing the certificate signing request
privatekey_passphrase:
required: false
description:
- The passphrase for the privatekey.
version:
required: false
default: 1
description:
- Version of the certificate signing request
force:
required: false
default: False
choices: [ True, False ]
description:
- Should the certificate signing request be forced regenerated by this ansible module
path:
required: true
description:
- Name of the file into which the generated OpenSSL certificate signing request will be written
subject:
required: false
description:
- Key/value pairs that will be present in the subject name field of the certificate signing request.
- If you need to specify more than one value with the same key, use a list as value.
version_added: '2.5'
country_name:
required: false
aliases: [ 'C', 'countryName' ]
description:
- countryName field of the certificate signing request subject
state_or_province_name:
required: false
aliases: [ 'ST', 'stateOrProvinceName' ]
description:
- stateOrProvinceName field of the certificate signing request subject
locality_name:
required: false
aliases: [ 'L', 'localityName' ]
description:
- localityName field of the certificate signing request subject
organization_name:
required: false
aliases: [ 'O', 'organizationName' ]
description:
- organizationName field of the certificate signing request subject
organizational_unit_name:
required: false
aliases: [ 'OU', 'organizationalUnitName' ]
description:
- organizationalUnitName field of the certificate signing request subject
common_name:
required: false
aliases: [ 'CN', 'commonName' ]
description:
- commonName field of the certificate signing request subject
email_address:
required: false
aliases: [ 'E', 'emailAddress' ]
description:
- emailAddress field of the certificate signing request subject
subject_alt_name:
required: false
aliases: [ 'subjectAltName' ]
description:
- SAN extension to attach to the certificate signing request
- This can either be a 'comma separated string' or a YAML list.
subject_alt_name_critical:
required: false
aliases: [ 'subjectAltName_critical' ]
description:
- Should the subjectAltName extension be considered as critical
key_usage:
required: false
aliases: [ 'keyUsage' ]
description:
- This defines the purpose (e.g. encipherment, signature, certificate signing)
of the key contained in the certificate.
- This can either be a 'comma separated string' or a YAML list.
key_usage_critical:
required: false
aliases: [ 'keyUsage_critical' ]
description:
- Should the keyUsage extension be considered as critical
extended_key_usage:
required: false
aliases: [ 'extKeyUsage', 'extendedKeyUsage' ]
description:
- Additional restrictions (e.g. client authentication, server authentication)
on the allowed purposes for which the public key may be used.
- This can either be a 'comma separated string' or a YAML list.
extended_key_usage_critical:
required: false
aliases: [ 'extKeyUsage_critical', 'extendedKeyUsage_critical' ]
description:
- Should the extkeyUsage extension be considered as critical
basic_constraints:
required: false
aliases: ['basicConstraints']
description:
- Indicates basic constraints, such as if the certificate is a CA.
version_added: 2.5
basic_constraints_critical:
required: false
aliases: [ 'basicConstraints_critical' ]
description:
- Should the basicConstraints extension be considered as critical
version_added: 2.5
extends_documentation_fragment: files
notes:
- "If the certificate signing request already exists it will be checked whether subjectAltName,
keyUsage and extendedKeyUsage only contain the requested values and if the request was signed
by the given private key"
'''
EXAMPLES = '''
# Generate an OpenSSL Certificate Signing Request
- openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
# Generate an OpenSSL Certificate Signing Request with a
# passphrase protected private key
- openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
common_name: www.ansible.com
# Generate an OpenSSL Certificate Signing Request with Subject information
- openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
country_name: FR
organization_name: Ansible
email_address: [email protected]
common_name: www.ansible.com
# Generate an OpenSSL Certificate Signing Request with subjectAltName extension
- openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com'
# Force re-generate an OpenSSL Certificate Signing Request
- openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
force: True
common_name: www.ansible.com
# Generate an OpenSSL Certificate Signing Request with special key usages
- openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
key_usage:
- digitalSignature
- keyAgreement
extended_key_usage:
- clientAuth
'''
RETURN = '''
privatekey:
description: Path to the TLS/SSL private key the CSR was generated for
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
filename:
description: Path to the generated Certificate Signing Request
returned: changed or success
type: string
sample: /etc/ssl/csr/www.ansible.com.csr
subject:
description: A list of the subject tuples attached to the CSR
returned: changed or success
type: list
sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]"
subjectAltName:
description: The alternative names this CSR is valid for
returned: changed or success
type: list
sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ]
keyUsage:
description: Purpose for which the public key may be used
returned: changed or success
type: list
sample: [ 'digitalSignature', 'keyAgreement' ]
extendedKeyUsage:
description: Additional restriction on the public key purposes
returned: changed or success
type: list
sample: [ 'clientAuth' ]
basicConstraints:
description: Indicates if the certificate belongs to a CA
returned: changed or success
type: list
sample: ['CA:TRUE', 'pathLenConstraint:0']
'''
import os
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes
try:
import OpenSSL
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
class CertificateSigningRequestError(crypto_utils.OpenSSLObjectError):
pass
class CertificateSigningRequest(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(CertificateSigningRequest, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.digest = module.params['digest']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.version = module.params['version']
self.subjectAltName = module.params['subjectAltName']
self.subjectAltName_critical = module.params['subjectAltName_critical']
self.keyUsage = module.params['keyUsage']
self.keyUsage_critical = module.params['keyUsage_critical']
self.extendedKeyUsage = module.params['extendedKeyUsage']
self.extendedKeyUsage_critical = module.params['extendedKeyUsage_critical']
self.basicConstraints = module.params['basicConstraints']
self.basicConstraints_critical = module.params['basicConstraints_critical']
self.request = None
self.privatekey = None
self.subject = [
('C', module.params['countryName']),
('ST', module.params['stateOrProvinceName']),
('L', module.params['localityName']),
('O', module.params['organizationName']),
('OU', module.params['organizationalUnitName']),
('CN', module.params['commonName']),
('emailAddress', module.params['emailAddress']),
]
if module.params['subject']:
self.subject = self.subject + crypto_utils.parse_name_field(module.params['subject'])
self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]]
if not self.subjectAltName:
for sub in self.subject:
if OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])) == 13: # 13 is the NID for "commonName"
self.subjectAltName = ['DNS:%s' % sub[1]]
break
def generate(self, module):
'''Generate the certificate signing request.'''
if not self.check(module, perms_required=False) or self.force:
req = crypto.X509Req()
req.set_version(self.version - 1)
subject = req.get_subject()
for entry in self.subject:
if entry[1] is not None:
# Workaround for https://github.com/pyca/pyopenssl/issues/165
nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0]))
OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0)
extensions = []
if self.subjectAltName:
altnames = ', '.join(self.subjectAltName)
extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii')))
if self.keyUsage:
usages = ', '.join(self.keyUsage)
extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii')))
if self.extendedKeyUsage:
usages = ', '.join(self.extendedKeyUsage)
extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii')))
if self.basicConstraints:
usages = ', '.join(self.basicConstraints)
extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii')))
if extensions:
req.add_extensions(extensions)
req.set_pubkey(self.privatekey)
req.sign(self.privatekey, self.digest)
self.request = req
try:
csr_file = open(self.path, 'wb')
csr_file.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request))
csr_file.close()
except (IOError, OSError) as exc:
raise CertificateSigningRequestError(exc)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CertificateSigningRequest, self).check(module, perms_required)
self.privatekey = crypto_utils.load_privatekey(self.privatekey_path, self.privatekey_passphrase)
def _check_subject(csr):
subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject]
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()]
if not set(subject) == set(current_subject):
return False
return True
def _check_subjectAltName(extensions):
altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '')
altnames = [altname.strip() for altname in str(altnames_ext).split(',')]
# apperently openssl returns 'IP address' not 'IP' as specifier when converting the subjectAltName to string
# although it won't accept this specifier when generating the CSR. (https://github.com/openssl/openssl/issues/4004)
altnames = [name if not name.startswith('IP Address:') else "IP:" + name.split(':', 1)[1] for name in altnames]
if self.subjectAltName:
if set(altnames) != set(self.subjectAltName) or altnames_ext.get_critical() != self.subjectAltName_critical:
return False
else:
if altnames:
return False
return True
def _check_keyUsage_(extensions, extName, expected, critical):
usages_ext = [ext for ext in extensions if ext.get_short_name() == extName]
if (not usages_ext and expected) or (usages_ext and not expected):
return False
elif not usages_ext and not expected:
return True
else:
current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')]
expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected]
return set(current) == set(expected) and usages_ext[0].get_critical() == critical
def _check_keyUsage(extensions):
return _check_keyUsage_(extensions, b'keyUsage', self.keyUsage, self.keyUsage_critical)
def _check_extenededKeyUsage(extensions):
return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical)
def _check_basicConstraints(extensions):
return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical)
def _check_extensions(csr):
extensions = csr.get_extensions()
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions))
def _check_signature(csr):
try:
return csr.verify(self.privatekey)
except crypto.Error:
return False
if not state_and_perms:
return False
csr = crypto_utils.load_certificate_request(self.path)
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
def dump(self):
'''Serialize the object into a dictionary.'''
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'subject': self.subject,
'subjectAltName': self.subjectAltName,
'keyUsage': self.keyUsage,
'extendedKeyUsage': self.extendedKeyUsage,
'basicConstraints': self.basicConstraints,
'changed': self.changed
}
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
digest=dict(default='sha256', type='str'),
privatekey_path=dict(require=True, type='path'),
privatekey_passphrase=dict(type='str', no_log=True),
version=dict(default='1', type='int'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
subject=dict(type='dict'),
countryName=dict(aliases=['C', 'country_name'], type='str'),
stateOrProvinceName=dict(aliases=['ST', 'state_or_province_name'], type='str'),
localityName=dict(aliases=['L', 'locality_name'], type='str'),
organizationName=dict(aliases=['O', 'organization_name'], type='str'),
organizationalUnitName=dict(aliases=['OU', 'organizational_unit_name'], type='str'),
commonName=dict(aliases=['CN', 'common_name'], type='str'),
emailAddress=dict(aliases=['E', 'email_address'], type='str'),
subjectAltName=dict(aliases=['subject_alt_name'], type='list'),
subjectAltName_critical=dict(aliases=['subject_alt_name_critical'], default=False, type='bool'),
keyUsage=dict(aliases=['key_usage'], type='list'),
keyUsage_critical=dict(aliases=['key_usage_critical'], default=False, type='bool'),
extendedKeyUsage=dict(aliases=['extKeyUsage', 'extended_key_usage'], type='list'),
extendedKeyUsage_critical=dict(aliases=['extKeyUsage_critical', 'extended_key_usage_critical'], default=False, type='bool'),
basicConstraints=dict(aliases=['basic_constraints'], type='list'),
basicConstraints_critical=dict(aliases=['basic_constraints_critical'], default=False, type='bool'),
),
add_file_common_args=True,
supports_check_mode=True,
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir)
csr = CertificateSigningRequest(module)
if module.params['state'] == 'present':
if module.check_mode:
result = csr.dump()
result['changed'] = module.params['force'] or not csr.check(module)
module.exit_json(**result)
try:
csr.generate(module)
except (CertificateSigningRequestError, crypto_utils.OpenSSLObjectError) as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = csr.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
csr.remove()
except (CertificateSigningRequestError, crypto_utils.OpenSSLObjectError) as exc:
module.fail_json(msg=to_native(exc))
result = csr.dump()
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 | 1,751,537,763,652,358,700 | 39.196154 | 148 | 0.63726 | false |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/networkx/tests/test_convert_pandas.py | 43 | 2177 | from nose import SkipTest
from nose.tools import assert_true
import networkx as nx
class TestConvertPandas(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
try:
import pandas as pd
except ImportError:
raise SkipTest('Pandas not available.')
def __init__(self, ):
global pd
import pandas as pd
self.r = pd.np.random.RandomState(seed=5)
ints = self.r.random_integers(1, 10, size=(3,2))
a = ['A', 'B', 'C']
b = ['D', 'A', 'E']
df = pd.DataFrame(ints, columns=['weight', 'cost'])
df[0] = a # Column label 0 (int)
df['b'] = b # Column label 'b' (str)
self.df = df
def assert_equal(self, G1, G2):
assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y ))
def test_from_dataframe_all_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', True)
self.assert_equal(G, Gtrue)
def test_from_dataframe_multi_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost'])
self.assert_equal(G, Gtrue)
def test_from_dataframe_one_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'weight': 10}),
('B', 'A', {'weight': 7}),
('A', 'D', {'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight')
self.assert_equal(G, Gtrue)
def test_from_dataframe_no_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {}),
('B', 'A', {}),
('A', 'D', {})])
G=nx.from_pandas_dataframe(self.df, 0, 'b',)
self.assert_equal(G, Gtrue)
| bsd-2-clause | -3,473,546,875,328,870,400 | 37.192982 | 79 | 0.467616 | false |
fxfitz/ansible | lib/ansible/modules/network/radware/vdirect_file.py | 14 | 9277 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Radware LTD.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
module: vdirect_file
author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk)
short_description: Uploads a new or updates an existing runnable file into Radware vDirect server
description:
- Uploads a new or updates an existing configuration template or workflow template into the Radware vDirect server.
All parameters may be set as environment variables.
notes:
- Requires the Radware vdirect-client Python package on the host. This is as easy as
C(pip install vdirect-client)
version_added: "2.4"
options:
vdirect_ip:
description:
- Primary vDirect server IP address, may be set as VDIRECT_IP environment variable.
required: true
vdirect_user:
description:
- vDirect server username, may be set as VDIRECT_USER environment variable.
required: true
vdirect_password:
description:
- vDirect server password, may be set as VDIRECT_PASSWORD environment variable.
required: true
vdirect_secondary_ip:
description:
- Secondary vDirect server IP address, may be set as VDIRECT_SECONDARY_IP environment variable.
vdirect_wait:
description:
- Wait for async operation to complete, may be set as VDIRECT_WAIT environment variable.
type: bool
default: 'yes'
vdirect_https_port:
description:
- vDirect server HTTPS port number, may be set as VDIRECT_HTTPS_PORT environment variable.
default: 2189
vdirect_http_port:
description:
- vDirect server HTTP port number, may be set as VDIRECT_HTTP_PORT environment variable.
default: 2188
vdirect_timeout:
description:
- Amount of time to wait for async operation completion [seconds],
- may be set as VDIRECT_TIMEOUT environment variable.
default: 60
vdirect_use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection,
- may be set as VDIRECT_HTTPS or VDIRECT_USE_SSL environment variable.
type: bool
default: 'yes'
vdirect_validate_certs:
description:
- If C(no), SSL certificates will not be validated,
- may be set as VDIRECT_VALIDATE_CERTS or VDIRECT_VERIFY environment variable.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
file_name:
description:
- vDirect runnable file name to be uploaded.
- May be velocity configuration template (.vm) or workflow template zip file (.zip).
required: true
requirements:
- "vdirect-client >= 4.1.1"
'''
EXAMPLES = '''
- name: vdirect_file
vdirect_file:
vdirect_ip: 10.10.10.10
vdirect_user: vDirect
vdirect_password: radware
file_name: /tmp/get_vlans.vm
'''
RETURN = '''
result:
description: Message detailing upload result
returned: success
type: string
sample: "Workflow template created"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
import os
import os.path
try:
from vdirect_client import rest_client
HAS_REST_CLIENT = True
except ImportError:
HAS_REST_CLIENT = False
TEMPLATE_EXTENSION = '.vm'
WORKFLOW_EXTENSION = '.zip'
WRONG_EXTENSION_ERROR = 'The file_name parameter must have ' \
'velocity script (.vm) extension or ZIP archive (.zip) extension'
CONFIGURATION_TEMPLATE_CREATED_SUCCESS = 'Configuration template created'
CONFIGURATION_TEMPLATE_UPDATED_SUCCESS = 'Configuration template updated'
WORKFLOW_TEMPLATE_CREATED_SUCCESS = 'Workflow template created'
WORKFLOW_TEMPLATE_UPDATED_SUCCESS = 'Workflow template updated'
meta_args = dict(
vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])),
vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])),
vdirect_password=dict(
required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']),
no_log=True, type='str'),
vdirect_secondary_ip=dict(
required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']),
default=None),
vdirect_use_ssl=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']),
default=True, type='bool'),
vdirect_wait=dict(
required=False, fallback=(env_fallback, ['VDIRECT_WAIT']),
default=True, type='bool'),
vdirect_timeout=dict(
required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']),
default=60, type='int'),
vdirect_validate_certs=dict(
required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']),
default=True, type='bool'),
vdirect_https_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']),
default=2189, type='int'),
vdirect_http_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']),
default=2188, type='int'),
file_name=dict(required=True)
)
class FileException(Exception):
def __init__(self, reason, details):
self.reason = reason
self.details = details
def __str__(self):
return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details)
class InvalidSourceException(FileException):
def __init__(self, message):
super(InvalidSourceException, self).__init__(
'Error parsing file', repr(message))
class VdirectFile(object):
def __init__(self, params):
self.client = rest_client.RestClient(params['vdirect_ip'],
params['vdirect_user'],
params['vdirect_password'],
wait=params['vdirect_wait'],
secondary_vdirect_ip=params['vdirect_secondary_ip'],
https_port=params['vdirect_https_port'],
http_port=params['vdirect_http_port'],
timeout=params['vdirect_timeout'],
https=params['vdirect_use_ssl'],
verify=params['vdirect_validate_certs'])
def upload(self, fqn):
if fqn.endswith(TEMPLATE_EXTENSION):
template_name = os.path.basename(fqn)
template = rest_client.Template(self.client)
runnable_file = open(fqn, 'r')
file_content = runnable_file.read()
result_to_return = CONFIGURATION_TEMPLATE_CREATED_SUCCESS
result = template.create_from_source(file_content, template_name, fail_if_invalid=True)
if result[rest_client.RESP_STATUS] == 409:
result_to_return = CONFIGURATION_TEMPLATE_UPDATED_SUCCESS
result = template.upload_source(file_content, template_name, fail_if_invalid=True)
if result[rest_client.RESP_STATUS] == 400:
raise InvalidSourceException(str(result[rest_client.RESP_STR]))
elif fqn.endswith(WORKFLOW_EXTENSION):
workflow = rest_client.WorkflowTemplate(self.client)
runnable_file = open(fqn, 'rb')
file_content = runnable_file.read()
result_to_return = WORKFLOW_TEMPLATE_CREATED_SUCCESS
result = workflow.create_template_from_archive(file_content, fail_if_invalid=True)
if result[rest_client.RESP_STATUS] == 409:
result_to_return = WORKFLOW_TEMPLATE_UPDATED_SUCCESS
result = workflow.update_archive(file_content, os.path.splitext(os.path.basename(fqn))[0])
if result[rest_client.RESP_STATUS] == 400:
raise InvalidSourceException(str(result[rest_client.RESP_STR]))
else:
result_to_return = WRONG_EXTENSION_ERROR
return result_to_return
def main():
module = AnsibleModule(argument_spec=meta_args)
if not HAS_REST_CLIENT:
module.fail_json(msg="The python vdirect-client module is required")
try:
vdirect_file = VdirectFile(module.params)
result = vdirect_file.upload(module.params['file_name'])
result = dict(result=result)
module.exit_json(**result)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -7,459,700,842,623,794,000 | 37.334711 | 119 | 0.651073 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.