repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tiwillia/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.5/roles/lib_openshift/library/oc_secret.py | 2 | 52800 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import json
import os
import re
import shutil
import subprocess
# pylint: disable=import-error
import ruamel.yaml as yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_secret
short_description: Module to manage openshift secrets
description:
- Manage openshift secrets programmatically.
options:
state:
description:
- If present, the secret will be created if it doesn't exist or updated if different. If absent, the secret will be removed if present. If list, information about the secret will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
files:
description:
- A list of files provided for secrets
required: false
default: None
aliases: []
delete_after:
description:
- Whether or not to delete the files after processing them.
required: false
default: false
aliases: []
contents:
description:
- Content of the secrets
required: false
default: None
aliases: []
force:
description:
- Whether or not to force the operation
required: false
default: false
aliases: []
decode:
description:
- base64 decode the object
required: false
default: false
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create secret
oc_secret:
state: present
namespace: openshift-infra
name: metrics-deployer
files:
- name: nothing
path: /dev/null
register: secretout
run_once: true
- name: get ca from hawkular
oc_secret:
state: list
namespace: openshift-infra
name: hawkular-metrics-certificate
decode: True
register: hawkout
run_once: true
- name: Create secrets
oc_secret:
namespace: mynamespace
name: mysecrets
contents:
- path: data.yml
data: "{{ data_content }}"
- path: auth-keys
data: "{{ auth_keys_content }}"
- path: configdata.yml
data: "{{ configdata_content }}"
- path: cert.crt
data: "{{ cert_content }}"
- path: key.pem
data: "{{ osso_site_key_content }}"
- path: ca.cert.pem
data: "{{ ca_cert_content }}"
register: secretout
'''
# -*- -*- -*- End included fragment: doc/secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout, stderr
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['oadm']
else:
cmds = ['oc']
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
returncode, stdout, stderr = self._run(cmds, input_data)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(value)
print(user_def[key])
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(api_values)
print(user_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' return a secret as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
# pylint: disable=no-member
if self.secrets.has_key(key):
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_secret.py -*- -*- -*-
# pylint: disable=wrong-import-position,wrong-import-order
import base64
# pylint: disable=too-many-arguments
class OCSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = secret_name
self.kubeconfig = kubeconfig
self.decode = decode
self.verbose = verbose
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if results['results'][0].has_key('data'):
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None):
'''Create a secret '''
if not files:
files = Utils.create_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
return results
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
if not files:
files = Utils.create_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocsecret = OCSecret(params['namespace'],
params['name'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = ocsecret.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, state: 'list'}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = ocsecret.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
if state == 'present':
if params['files']:
files = params['files']
elif params['contents']:
files = Utils.create_files_from_contents(params['contents'])
else:
return {'failed': True,
'msg': 'Either specify files or contents.'}
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True,
'msg': 'Would have performed a create.'}
api_rval = ocsecret.create(params['files'], params['contents'])
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
########
# Update
########
secret = ocsecret.prep_secret(params['files'], params['contents'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
return {'changed': False,
'results': secret['results'],
'state': 'present'}
if check_mode:
return {'changed': True,
'msg': 'Would have performed an update.'}
api_rval = ocsecret.update(files, force=params['force'])
# Remove files
if secret and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_secret.py -*- -*- -*-
def main():
'''
ansible oc module for managing OpenShift Secrets
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
force=dict(default=False, type='bool'),
decode=dict(default=False, type='bool'),
),
mutually_exclusive=[["contents", "files"]],
supports_check_mode=True,
)
rval = OCSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_secret.py -*- -*- -*-
| apache-2.0 | -5,882,913,405,720,118,000 | 31.734036 | 244 | 0.520909 | false |
Onager/plaso | tests/output/tln.py | 1 | 8070 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the TLN output class."""
import io
import os
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.lib import definitions
from plaso.output import tln
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class TLNFieldFormattingHelperTest(test_lib.OutputModuleTestCase):
"""Test the TLN output module field formatting helper."""
# pylint: disable=protected-access
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'data_type': 'test:event',
'display_name': 'OS: /var/log/syslog.1',
'hostname': 'ubuntu',
'inode': 12345678,
'pathspec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def testFormatDescription(self):
"""Tests the _FormatDescription function."""
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
formatting_helper = tln.TLNFieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
description_string = formatting_helper._FormatDescription(
event, event_data, event_data_stream)
expected_description_string = (
'2012-06-27T18:17:01+00:00; '
'Unknown Time; '
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session closed '
'for user root)')
self.assertEqual(description_string, expected_description_string)
def testFormatNotes(self):
"""Tests the _FormatNotes function."""
output_mediator = self._CreateOutputMediator()
formatting_helper = tln.TLNFieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
notes_string = formatting_helper._FormatNotes(
event, event_data, event_data_stream)
self.assertEqual(
notes_string, 'File: OS: /var/log/syslog.1 inode: 12345678')
def test_FormatTimestamp(self):
"""Tests the __FormatTimestamp function."""
output_mediator = self._CreateOutputMediator()
formatting_helper = tln.TLNFieldFormattingHelper(output_mediator)
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
timestamp_string = formatting_helper._FormatTimestamp(
event, event_data, event_data_stream)
self.assertEqual(timestamp_string, '1340821021')
class TLNOutputModuleTest(test_lib.OutputModuleTestCase):
"""Tests for the TLN output module."""
# pylint: disable=protected-access
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'data_type': 'test:event',
'display_name': 'OS: /var/log/syslog.1',
'hostname': 'ubuntu',
'inode': 12345678,
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def testWriteHeader(self):
"""Tests the WriteHeader function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = tln.TLNOutputModule(output_mediator)
output_module._file_object = test_file_object
output_module.WriteHeader()
header = test_file_object.getvalue()
self.assertEqual(header, 'Time|Source|Host|User|Description\n')
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
output_module = tln.TLNOutputModule(output_mediator)
output_module._file_object = test_file_object
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
output_module.WriteEventBody(
event, event_data, event_data_stream, event_data_stream)
expected_event_body = (
'1340821021|FILE|ubuntu|root|2012-06-27T18:17:01+00:00; Unknown Time; '
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): '
'session closed for user root)\n')
event_body = test_file_object.getvalue()
self.assertEqual(event_body, expected_event_body)
self.assertEqual(event_body.count('|'), 4)
class L2TTLNOutputModuleTest(test_lib.OutputModuleTestCase):
"""Tests for the log2timeline TLN output module."""
# pylint: disable=protected-access
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'data_type': 'test:event',
'display_name': 'OS: /var/log/syslog.1',
'hostname': 'ubuntu',
'inode': 12345678,
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def testWriteHeader(self):
"""Tests the WriteHeader function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = tln.L2TTLNOutputModule(output_mediator)
output_module._file_object = test_file_object
output_module.WriteHeader()
header = test_file_object.getvalue()
self.assertEqual(header, 'Time|Source|Host|User|Description|TZ|Notes\n')
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
output_module = tln.L2TTLNOutputModule(output_mediator)
output_module._file_object = test_file_object
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
output_module.WriteEventBody(
event, event_data, event_data_stream, event_data_stream)
expected_event_body = (
'1340821021|FILE|ubuntu|root|2012-06-27T18:17:01+00:00; Unknown Time; '
'Reporter <CRON> PID: 8442 (pam_unix(cron:session): '
'session closed for user root)'
'|UTC|File: OS: /var/log/syslog.1 inode: 12345678\n')
event_body = test_file_object.getvalue()
self.assertEqual(event_body, expected_event_body)
self.assertEqual(event_body.count('|'), 6)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,306,304,721,288,736,300 | 34.707965 | 79 | 0.674473 | false |
jptomo/rpython-lang-scheme | rpython/rlib/parsing/makepackrat.py | 1 | 24419 | from __future__ import with_statement
import py
import sys
from rpython.rlib.parsing.tree import Nonterminal, Symbol, RPythonVisitor
from rpython.rlib.parsing.codebuilder import Codebuilder
from rpython.rlib.objectmodel import we_are_translated
class BacktrackException(Exception):
def __init__(self, error=None):
self.error = error
if not we_are_translated():
Exception.__init__(self, error)
class TreeOptimizer(RPythonVisitor):
def visit_or(self, t):
if len(t.children) == 1:
return self.dispatch(t.children[0])
return self.general_nonterminal_visit(t)
visit_commands = visit_or
def visit_negation(self, t):
child = self.dispatch(t.children[0])
if child.symbol == "negation":
child.symbol = "lookahead"
return child
t.children[0] = child
return t
def general_nonterminal_visit(self, t):
for i in range(len(t.children)):
t.children[i] = self.dispatch(t.children[i])
return t
def general_visit(self, t):
return t
syntax = r"""
NAME:
`[a-zA-Z_][a-zA-Z0-9_]*`;
SPACE:
' ';
COMMENT:
`( *#[^\n]*\n)+`;
IGNORE:
`(#[^\n]*\n)|\n|\t| `;
newline:
COMMENT
| `( *\n *)*`;
REGEX:
r = `\`[^\\\`]*(\\.[^\\\`]*)*\``
return {Symbol('REGEX', r, None)};
QUOTE:
r = `'[^\']*'`
return {Symbol('QUOTE', r, None)};
PYTHONCODE:
r = `\{[^\n\}]*\}`
return {Symbol('PYTHONCODE', r, None)};
EOF:
!__any__;
file:
IGNORE*
list
[EOF];
list:
content = production+
return {Nonterminal('list', content)};
production:
name = NAME
SPACE*
args = productionargs
':'
IGNORE*
what = or_
IGNORE*
';'
IGNORE*
return {Nonterminal('production', [name, args, what])};
productionargs:
'('
IGNORE*
args = (
NAME
[
IGNORE*
','
IGNORE*
]
)*
arg = NAME
IGNORE*
')'
IGNORE*
return {Nonterminal('productionargs', args + [arg])}
| return {Nonterminal('productionargs', [])};
or_:
l = (commands ['|' IGNORE*])+
last = commands
return {Nonterminal('or', l + [last])}
| commands;
commands:
cmd = command
newline
cmds = (command [newline])+
return {Nonterminal('commands', [cmd] + cmds)}
| command;
command:
simplecommand;
simplecommand:
return_
| if_
| named_command
| repetition
| choose
| negation;
return_:
'return'
SPACE*
code = PYTHONCODE
IGNORE*
return {Nonterminal('return', [code])};
if_:
'do'
newline
cmd = command
SPACE*
'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [cmd, condition])}
| 'if'
SPACE*
condition = PYTHONCODE
IGNORE*
return {Nonterminal('if', [condition])};
choose:
'choose'
SPACE*
name = NAME
SPACE*
'in'
SPACE*
expr = PYTHONCODE
IGNORE*
cmds = commands
return {Nonterminal('choose', [name, expr, cmds])};
commandchain:
result = simplecommand+
return {Nonterminal('commands', result)};
named_command:
name = NAME
SPACE*
'='
SPACE*
cmd = command
return {Nonterminal('named_command', [name, cmd])};
repetition:
what = enclosed
SPACE* '?' IGNORE*
return {Nonterminal('maybe', [what])}
| what = enclosed
SPACE*
repetition = ('*' | '+')
IGNORE*
return {Nonterminal('repetition', [repetition, what])};
negation:
'!'
SPACE*
what = negation
IGNORE*
return {Nonterminal('negation', [what])}
| enclosed;
enclosed:
'<'
IGNORE*
what = primary
IGNORE*
'>'
IGNORE*
return {Nonterminal('exclusive', [what])}
| '['
IGNORE*
what = or_
IGNORE*
']'
IGNORE*
return {Nonterminal('ignore', [what])}
| ['(' IGNORE*] or_ [')' IGNORE*]
| primary;
primary:
call | REGEX [IGNORE*] | QUOTE [IGNORE*];
call:
x = NAME
args = arguments
IGNORE*
return {Nonterminal("call", [x, args])};
arguments:
'('
IGNORE*
args = (
PYTHONCODE
[IGNORE* ',' IGNORE*]
)*
last = PYTHONCODE
')'
IGNORE*
return {Nonterminal("args", args + [last])}
| return {Nonterminal("args", [])};
"""
class ErrorInformation(object):
def __init__(self, pos, expected=None):
if expected is None:
expected = []
self.expected = expected
self.pos = pos
def __str__(self):
return "ErrorInformation(%s, %s)" % (self.pos, self.expected)
def get_line_column(self, source):
pos = self.pos
assert pos >= 0
uptoerror = source[:pos]
lineno = uptoerror.count("\n")
columnno = pos - uptoerror.rfind("\n")
return lineno, columnno
def nice_error_message(self, filename='<filename>', source=""):
if source:
lineno, columnno = self.get_line_column(source)
result = [" File %s, line %s" % (filename, lineno + 1)]
result.append(source.split("\n")[lineno])
result.append(" " * columnno + "^")
else:
result.append("<couldn't get source>")
if self.expected:
failure_reasons = self.expected
if len(failure_reasons) > 1:
all_but_one = failure_reasons[:-1]
last = failure_reasons[-1]
expected = "%s or '%s'" % (
", ".join(["'%s'" % e for e in all_but_one]), last)
else:
expected = failure_reasons[0]
result.append("ParseError: expected %s" % (expected, ))
else:
result.append("ParseError")
return "\n".join(result)
class Status(object):
# status codes:
NORMAL = 0
ERROR = 1
INPROGRESS = 2
LEFTRECURSION = 3
SOMESOLUTIONS = 4
_annspecialcase_ = 'specialize:ctr_location' # polymorphic
def __repr__(self):
return "Status(%s, %s, %s, %s)" % (self.pos, self.result, self.error,
self.status)
def __init__(self):
self.pos = 0
self.error = None
self.status = self.INPROGRESS
self.result = None
class ParserBuilder(RPythonVisitor, Codebuilder):
def __init__(self):
Codebuilder.__init__(self)
self.initcode = []
self.names = {}
self.matchers = {}
def make_parser(self):
m = {'Status': Status,
'Nonterminal': Nonterminal,
'Symbol': Symbol,}
exec py.code.Source(self.get_code()).compile() in m
return m['Parser']
def memoize_header(self, name, args):
dictname = "_dict_%s" % (name, )
self.emit_initcode("self.%s = {}" % (dictname, ))
if args:
self.emit("_key = (self._pos, %s)" % (", ".join(args)))
else:
self.emit("_key = self._pos")
self.emit("_status = self.%s.get(_key, None)" % (dictname, ))
with self.block("if _status is None:"):
self.emit("_status = self.%s[_key] = Status()" % (
dictname, ))
with self.block("else:"):
self.emit("_statusstatus = _status.status")
with self.block("if _statusstatus == _status.NORMAL:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("elif _statusstatus == _status.ERROR:"):
self.emit("raise BacktrackException(_status.error)")
if self.have_call:
with self.block(
"elif (_statusstatus == _status.INPROGRESS or\n"
" _statusstatus == _status.LEFTRECURSION):"):
self.emit("_status.status = _status.LEFTRECURSION")
with self.block("if _status.result is not None:"):
self.emit("self._pos = _status.pos")
self.emit("return _status")
with self.block("else:"):
self.emit("raise BacktrackException(None)")
with self.block(
"elif _statusstatus == _status.SOMESOLUTIONS:"):
self.emit("_status.status = _status.INPROGRESS")
self.emit("_startingpos = self._pos")
self.start_block("try:")
self.emit("_result = None")
self.emit("_error = None")
def memoize_footer(self, name, args):
dictname = "_dict_%s" % (name, )
if self.have_call:
with self.block(
"if _status.status == _status.LEFTRECURSION:"):
with self.block("if _status.result is not None:"):
with self.block("if _status.pos >= self._pos:"):
self.emit("_status.status = _status.NORMAL")
self.emit("self._pos = _status.pos")
self.emit("return _status")
self.emit("_status.pos = self._pos")
self.emit("_status.status = _status.SOMESOLUTIONS")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("self._pos = _startingpos")
self.emit("return self._%s(%s)" % (name, ', '.join(args)))
else:
self.emit("assert _status.status != _status.LEFTRECURSION")
self.emit("_status.status = _status.NORMAL")
self.emit("_status.pos = self._pos")
self.emit("_status.result = %s" % (self.resultname, ))
self.emit("_status.error = _error")
self.emit("return _status")
self.end_block("try")
with self.block("except BacktrackException, _exc:"):
self.emit("_status.pos = -1")
self.emit("_status.result = None")
self.combine_error('_exc.error')
self.emit("_status.error = _error")
self.emit("_status.status = _status.ERROR")
self.emit("raise BacktrackException(_error)")
def choice_point(self, name=None):
var = "_choice%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = self._pos" % (var, ))
return var
def revert(self, var):
self.emit("self._pos = %s" % (var, ))
def visit_list(self, t):
self.start_block("class Parser(object):")
for elt in t.children:
self.dispatch(elt)
with self.block("def __init__(self, inputstream):"):
for line in self.initcode:
self.emit(line)
self.emit("self._pos = 0")
self.emit("self._inputstream = inputstream")
if self.matchers:
self.emit_regex_code()
self.end_block("class")
def emit_regex_code(self):
for regex, matcher in self.matchers.iteritems():
with self.block(
"def _regex%s(self):" % (abs(hash(regex)), )):
c = self.choice_point()
self.emit("_runner = self._Runner(self._inputstream, self._pos)")
self.emit("_i = _runner.recognize_%s(self._pos)" % (
abs(hash(regex)), ))
self.start_block("if _runner.last_matched_state == -1:")
self.revert(c)
self.emit("raise BacktrackException")
self.end_block("if")
self.emit("_upto = _runner.last_matched_index + 1")
self.emit("_pos = self._pos")
self.emit("assert _pos >= 0")
self.emit("assert _upto >= 0")
self.emit("_result = self._inputstream[_pos: _upto]")
self.emit("self._pos = _upto")
self.emit("return _result")
with self.block("class _Runner(object):"):
with self.block("def __init__(self, text, pos):"):
self.emit("self.text = text")
self.emit("self.pos = pos")
self.emit("self.last_matched_state = -1")
self.emit("self.last_matched_index = -1")
self.emit("self.state = -1")
for regex, matcher in self.matchers.iteritems():
matcher = str(matcher).replace(
"def recognize(runner, i)",
"def recognize_%s(runner, i)" % (abs(hash(regex)), ))
self.emit(str(matcher))
def visit_production(self, t):
name = t.children[0]
if name in self.names:
raise Exception("name %s appears twice" % (name, ))
self.names[name] = True
otherargs = t.children[1].children
argswithself = ", ".join(["self"] + otherargs)
argswithoutself = ", ".join(otherargs)
with self.block("def %s(%s):" % (name, argswithself)):
self.emit("return self._%s(%s).result" % (name, argswithoutself))
self.start_block("def _%s(%s):" % (name, argswithself, ))
self.namecount = 0
self.resultname = "_result"
self.have_call = False
self.created_error = False
allother = self.store_code_away()
self.dispatch(t.children[-1])
subsequent = self.restore_code(allother)
self.memoize_header(name, otherargs)
self.add_code(subsequent)
self.memoize_footer(name, otherargs)
self.end_block("def")
def visit_or(self, t, first=False):
possibilities = t.children
if len(possibilities) > 1:
self.start_block("while 1:")
for i, p in enumerate(possibilities):
c = self.choice_point()
with self.block("try:"):
self.dispatch(p)
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
if i == len(possibilities) - 1:
self.emit("raise BacktrackException(_error)")
self.dispatch(possibilities[-1])
if len(possibilities) > 1:
self.emit("break")
self.end_block("while")
def visit_commands(self, t):
for elt in t.children:
self.dispatch(elt)
def visit_maybe(self, t):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[0])
with self.block("except BacktrackException:"):
self.revert(c)
def visit_repetition(self, t):
name = "_all%s" % (self.namecount, )
self.namecount += 1
self.emit("%s = []" % (name, ))
if t.children[0] == '+':
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("while 1:"):
c = self.choice_point()
with self.block("try:"):
self.dispatch(t.children[1])
self.emit("%s.append(_result)" % (name, ))
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
self.revert(c)
self.emit("break")
self.emit("_result = %s" % (name, ))
def visit_exclusive(self, t):
self.resultname = "_enclosed"
self.dispatch(t.children[0])
self.emit("_enclosed = _result")
def visit_ignore(self, t):
resultname = "_before_discard%i" % (self.namecount, )
self.namecount += 1
self.emit("%s = _result" % (resultname, ))
self.dispatch(t.children[0])
self.emit("_result = %s" % (resultname, ))
def visit_negation(self, t):
c = self.choice_point()
resultname = "_stored_result%i" % (self.namecount, )
self.namecount += 1
child = t.children[0]
self.emit("%s = _result" % (resultname, ))
with self.block("try:"):
self.dispatch(child)
with self.block("except BacktrackException:"):
self.revert(c)
self.emit("_result = %s" % (resultname, ))
with self.block("else:"):
# heuristic to get nice error messages sometimes
if isinstance(child, Symbol) and child.symbol == "QUOTE":
error = "self._ErrorInformation(%s, ['NOT %s'])" % (
c, child.additional_info[1:-1], )
else:
error = "None"
self.emit("raise BacktrackException(%s)" % (error, ))
def visit_lookahead(self, t):
resultname = "_stored_result%i" % (self.namecount, )
self.emit("%s = _result" % (resultname, ))
c = self.choice_point()
self.dispatch(t.children[0])
self.revert(c)
self.emit("_result = %s" % (resultname, ))
def visit_named_command(self, t):
name = t.children[0]
self.dispatch(t.children[1])
self.emit("%s = _result" % (name, ))
def visit_return(self, t):
self.emit("_result = (%s)" % (t.children[0].additional_info[1:-1], ))
def visit_if(self, t):
if len(t.children) == 2:
self.dispatch(t.children[0])
with self.block("if not (%s):" % (
t.children[-1].additional_info[1:-1], )):
self.emit("raise BacktrackException(")
self.emit(" self._ErrorInformation(")
self.emit(" _startingpos, ['condition not met']))")
def visit_choose(self, t):
with self.block("for %s in (%s):" % (
t.children[0], t.children[1].additional_info[1:-1], )):
with self.block("try:"):
self.dispatch(t.children[2])
self.emit("break")
with self.block("except BacktrackException, _exc:"):
self.combine_error('_exc.error')
with self.block("else:"):
self.emit("raise BacktrackException(_error)")
def visit_call(self, t):
self.have_call = True
args = ", ".join(['(%s)' % (arg.additional_info[1:-1], )
for arg in t.children[1].children])
if t.children[0].startswith("_"):
callname = t.children[0]
self.emit("_result = self.%s(%s)" % (callname, args))
else:
callname = "_" + t.children[0]
self.emit("_call_status = self.%s(%s)" % (callname, args))
self.emit("_result = _call_status.result")
self.combine_error('_call_status.error')
def visit_REGEX(self, t):
r = t.additional_info[1:-1].replace('\\`', '`')
matcher = self.get_regex(r)
self.emit("_result = self._regex%s()" % (abs(hash(r)), ))
def visit_QUOTE(self, t):
self.emit("_result = self.__chars__(%r)" % (
str(t.additional_info[1:-1]), ))
def get_regex(self, r):
from rpython.rlib.parsing.regexparse import parse_regex
if r in self.matchers:
return self.matchers[r]
regex = parse_regex(r)
if regex is None:
raise ValueError(
"%s is not a valid regular expression" % regextext)
automaton = regex.make_automaton().make_deterministic()
automaton.optimize()
matcher = automaton.make_lexing_code()
self.matchers[r] = py.code.Source(matcher)
return matcher
def combine_error(self, newerror):
if self.created_error:
self.emit(
"_error = self._combine_errors(_error, %s)" % (newerror, ))
else:
self.emit("_error = %s" % (newerror, ))
self.created_error = True
class MetaPackratParser(type):
def __new__(cls, name_, bases, dct):
if '__doc__' not in dct or dct['__doc__'] is None:
return type.__new__(cls, name_, bases, dct)
from pypackrat import PyPackratSyntaxParser
import sys, new, inspect
frame = sys._getframe(1)
source = dct['__doc__']
p = PyPackratSyntaxParser(source)
try:
t = p.file()
except BacktrackException, exc:
print exc.error.nice_error_message("<docstring>", source)
lineno, _ = exc.error.get_line_column(source)
errorline = source.split("\n")[lineno]
try:
code = frame.f_code
source = inspect.getsource(code)
lineno_in_orig = source.split("\n").index(errorline)
if lineno_in_orig >= 0:
print "probable error position:"
print "file:", code.co_filename
print "line:", lineno_in_orig + code.co_firstlineno + 1
except (IOError, ValueError):
pass
raise exc
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
pcls = visitor.make_parser()
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in dct
#XXX XXX XXX
if 'BacktrackException' not in frame.f_globals:
raise Exception("must import BacktrackException")
if 'Status' not in frame.f_globals:
raise Exception("must import Status")
result = type.__new__(cls, name_, bases, dct)
for key, value in pcls.__dict__.iteritems():
if isinstance(value, type):
value.__module__ = result.__module__ #XXX help the annotator
if isinstance(value, type(lambda: None)):
value = new.function(value.func_code, frame.f_globals)
if not hasattr(result, key) and key not in forbidden:
setattr(result, key, value)
if result.__init__ == object.__init__:
result.__init__ = pcls.__dict__['__init__']
result.init_parser = pcls.__dict__['__init__']
result._code = visitor.get_code()
return result
class PackratParser(object):
__metaclass__ = MetaPackratParser
_ErrorInformation = ErrorInformation
_BacktrackException = BacktrackException
def __chars__(self, chars):
#print '__chars__(%s)' % (chars, ), self._pos
try:
for i in range(len(chars)):
if self._inputstream[self._pos + i] != chars[i]:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
self._pos += len(chars)
return chars
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, [chars]))
def __any__(self):
try:
result = self._inputstream[self._pos]
self._pos += 1
return result
except IndexError:
raise BacktrackException(
self._ErrorInformation(self._pos, ['anything']))
def _combine_errors(self, error1, error2):
if error1 is None:
return error2
if (error2 is None or error1.pos > error2.pos or
len(error2.expected) == 0):
return error1
elif error2.pos > error1.pos or len(error1.expected) == 0:
return error2
expected = []
already_there = {}
for ep in [error1.expected, error2.expected]:
for reason in ep:
if reason not in already_there:
already_there[reason] = True
expected.append(reason)
return ErrorInformation(error1.pos, expected)
def test_generate():
f = py.path.local(__file__).dirpath().join("pypackrat.py")
from pypackrat import PyPackratSyntaxParser
p = PyPackratSyntaxParser(syntax)
t = p.file()
t = t.visit(TreeOptimizer())
visitor = ParserBuilder()
t.visit(visitor)
code = visitor.get_code()
content = """
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from makepackrat import PackratParser, BacktrackException, Status
%s
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.iteritems():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
""" % (code, )
print content
f.write(content)
| mit | -3,646,075,986,021,694,500 | 31.645722 | 81 | 0.528113 | false |
mancoast/CPythonPyc_test | fail/311_test_builtin.py | 1 | 44506 | # Python test set -- built-in functions
import test.support, unittest
from test.support import fcmp, TESTFN, unlink, run_unittest, \
run_with_locale
from operator import neg
import sys, warnings, random, collections, io, fractions
warnings.filterwarnings("ignore", "hex../oct.. of negative int",
FutureWarning, __name__)
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
import builtins
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
test_conv_no_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
test_conv_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', ValueError),
('314 ', 314),
(' \t\t 314 \t\t ', ValueError),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', ValueError),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
class TestFailingBool:
def __bool__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
class BuiltinTest(unittest.TestCase):
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxsize-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# long
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
# str
self.assertRaises(TypeError, abs, 'a')
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_ascii(self):
self.assertEqual(ascii(''), '\'\'')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(()), '()')
self.assertEqual(ascii([]), '[]')
self.assertEqual(ascii({}), '{}')
a = []
a.append(a)
self.assertEqual(ascii(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(ascii(a), '{0: {...}}')
def test_neg(self):
x = -sys.maxsize-1
self.assertTrue(isinstance(x, int))
self.assertEqual(-x, sys.maxsize+1)
# XXX(nnorwitz): This test case for callable should probably be removed.
def test_callable(self):
self.assertTrue(hasattr(len, '__call__'))
def f(): pass
self.assertTrue(hasattr(f, '__call__'))
class C:
def meth(self): pass
self.assertTrue(hasattr(C, '__call__'))
x = C()
self.assertTrue(hasattr(x.meth, '__call__'))
self.assertTrue(not hasattr(x, '__call__'))
class D(C):
def __call__(self): pass
y = D()
self.assertTrue(hasattr(y, '__call__'))
y()
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 1<<24)
self.assertEqual(chr(sys.maxunicode),
str(('\\U%08x' % (sys.maxunicode)).encode("ascii"),
'unicode-escape'))
self.assertRaises(TypeError, chr)
self.assertEqual(chr(0x0000FFFF), "\U0000FFFF")
self.assertEqual(chr(0x00010000), "\U00010000")
self.assertEqual(chr(0x00010001), "\U00010001")
self.assertEqual(chr(0x000FFFFE), "\U000FFFFE")
self.assertEqual(chr(0x000FFFFF), "\U000FFFFF")
self.assertEqual(chr(0x00100000), "\U00100000")
self.assertEqual(chr(0x00100001), "\U00100001")
self.assertEqual(chr(0x0010FFFE), "\U0010FFFE")
self.assertEqual(chr(0x0010FFFF), "\U0010FFFF")
self.assertRaises(ValueError, chr, -1)
self.assertRaises(ValueError, chr, 0x00110000)
self.assertRaises((OverflowError, ValueError), chr, 2**32)
def test_cmp(self):
self.assertTrue(not hasattr(builtins, "cmp"))
def test_compile(self):
compile('print(1)\n', '', 'exec')
bom = b'\xef\xbb\xbf'
compile(bom + b'print(1)\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
compile(memoryview(b"text"), "name", "exec")
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'single', 0xff)
self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
compile('print("\xe5")\n', '', 'exec')
self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
def test_delattr(self):
import sys
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assertTrue('local_var' in dir())
# dir(module)
import sys
self.assertTrue('exit' in dir(sys))
# dir(module_with_invalid__dict__)
import types
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assertTrue("strip" in dir(str))
self.assertTrue("__mro__" not in dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assertTrue("y" in dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assertTrue("__repr__" in dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assertTrue("__repr__" not in dir(f))
self.assertTrue("bar" in dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assertTrue(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__not_list)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
# dir(traceback)
try:
raise IndexError
except:
self.assertEqual(len(dir(sys.exc_info()[2])), 4)
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(-sys.maxsize-1, -1),
(sys.maxsize+1, 0))
self.assertTrue(not fcmp(divmod(3.25, 1.0), (3.0, 0.25)))
self.assertTrue(not fcmp(divmod(-3.25, 1.0), (-4.0, 0.75)))
self.assertTrue(not fcmp(divmod(3.25, -1.0), (-4.0, -0.75)))
self.assertTrue(not fcmp(divmod(-3.25, -1.0), (3.0, -0.25)))
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
bom = b'\xef\xbb\xbf'
self.assertEqual(eval(bom + b'a', globals, locals), 1)
self.assertEqual(eval('"\xe5"', globals), "\xe5")
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_getattr(self):
import sys
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
def test_hasattr(self):
import sys
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr allows SystemExit and KeyboardInterrupts by
class A:
def __getattr__(self, what):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEquals(type(hash(X())), int)
class Y(object):
def __hash__(self):
return 2**100
self.assertEquals(type(hash(Y())), int)
class Z(int):
def __hash__(self):
return self
self.assertEquals(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
for stmt in (
"max(key=int)", # no args
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2, 3.0), 1)
self.assertEqual(min(1, 2.0, 3), 1)
self.assertEqual(min(1.0, 2, 3), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
for stmt in (
"min(key=int)", # no args
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEquals(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
it = iter(Iter())
self.assertEquals(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEquals(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEquals(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input, below
fp = open(TESTFN, 'w')
try:
fp.write('1+1\n')
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
finally:
fp.close()
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
try:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
finally:
fp.close()
unlink(TESTFN)
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
self.assertEqual(ord('\x80'), 128)
self.assertEqual(ord('\xff'), 255)
self.assertEqual(ord(b' '), 32)
self.assertEqual(ord(b'A'), 65)
self.assertEqual(ord(b'a'), 97)
self.assertEqual(ord(b'\x80'), 128)
self.assertEqual(ord(b'\xff'), 255)
self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
self.assertEqual(ord(chr(0x10FFFF)), 0x10FFFF)
self.assertEqual(ord("\U0000FFFF"), 0x0000FFFF)
self.assertEqual(ord("\U00010000"), 0x00010000)
self.assertEqual(ord("\U00010001"), 0x00010001)
self.assertEqual(ord("\U000FFFFE"), 0x000FFFFE)
self.assertEqual(ord("\U000FFFFF"), 0x000FFFFF)
self.assertEqual(ord("\U00100000"), 0x00100000)
self.assertEqual(ord("\U00100001"), 0x00100001)
self.assertEqual(ord("\U0010FFFE"), 0x0010FFFE)
self.assertEqual(ord("\U0010FFFF"), 0x0010FFFF)
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2, 2.0:
for y in 10, 10, 10.0:
for z in 1000, 1000, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertAlmostEqual(pow(-1, 0.5), 1j)
self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
self.assertRaises(TypeError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow)
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
#self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
#issue 6334: the internal stored range length was being
#computed incorrectly in some cases involving large arguments.
x = range(10**20, 10**20+10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10**20+10, 10**20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20, 10**20+10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20+10, 10**20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
""" XXX(nnorwitz):
# Now test range() with longs
self.assertEqual(list(range(-2**100)), [])
self.assertEqual(list(range(0, -2**100)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertTrue(a in seq)
self.assertTrue(b not in seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertTrue(b in seq)
self.assertTrue(a not in seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertTrue(-a in seq)
self.assertTrue(-b not in seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(ValueError, range, a, a + 1, int(0))
class badzero(int):
def __eq__(self, other):
raise RuntimeError
__ne__ = __lt__ = __gt__ = __le__ = __ge__ = __eq__
# XXX This won't (but should!) raise RuntimeError if a is an int...
self.assertRaises(RuntimeError, range, a, a + 1, badzero(1))
"""
# Reject floats when it would require PyLongs to represent.
# (smaller floats still accepted, but deprecated)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
#NEAL self.assertRaises(OverflowError, range, -sys.maxsize, sys.maxsize)
#NEAL self.assertRaises(OverflowError, range, 0, 2*sys.maxsize)
self.assertRaises(OverflowError, len, range(0, sys.maxsize**10))
def test_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), "1+1")
self.assertEqual(input('testing\n'), "1+1")
self.assertEqual(input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = io.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = io.StringIO(" 'whitespace'")
self.assertEqual(input(), " 'whitespace'")
sys.stdin = io.StringIO()
self.assertRaises(EOFError, input)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
unlink(TESTFN)
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), int)
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check even / odd rounding behaviour
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 6)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -6)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), int)
self.assertEqual(type(round(-8, -1)), int)
self.assertEqual(type(round(-8, 0)), int)
self.assertEqual(type(round(-8, 1)), int)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound:
def __round__(self):
return 23
class TestNoRound:
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__round__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(list(range(2,8))), 27)
self.assertEqual(sum(iter(list(range(2,8)))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
import sys
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(list(zip(a, b)), t)
b = [4, 5, 6]
self.assertEqual(list(zip(a, b)), t)
b = (4, 5, 6, 7)
self.assertEqual(list(zip(a, b)), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(list(zip(a, I())), t)
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
list(zip(SequenceWithoutALength(), range(2**30))),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq()))
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray(b"abc")
self.assertRaises(ValueError, x.translate, b"1", 1)
self.assertRaises(TypeError, x.translate, b"1"*256, 1)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = list(range(100))
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple, str]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(set(s)) # unique letters only
types = [str, set, frozenset, list, tuple, dict.fromkeys]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
def test_main(verbose=None):
test_classes = (BuiltinTest, TestSorted)
run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-3.0 | 7,221,118,868,111,259,000 | 33.608087 | 105 | 0.526199 | false |
skirpichev/omg | diofant/combinatorics/perm_groups.py | 1 | 111562 | from itertools import islice
from math import log
from random import choice, randrange
from ..core import Basic
from ..functions import factorial
from ..ntheory import sieve
from ..utilities import has_variety
from ..utilities.iterables import is_sequence, uniq
from ..utilities.randtest import _randrange
from .permutations import (Cycle, Permutation, _af_commutes_with, _af_invert,
_af_pow, _af_rmul, _af_rmuln)
from .util import (_base_ordering, _check_cycles_alt_sym,
_distribute_gens_by_base, _handle_precomputed_bsgs,
_orbits_transversals_from_bsgs, _strip, _strip_af,
_strong_gens_from_distr)
rmul = Permutation.rmul_with_af
_af_new = Permutation._af_new
class PermutationGroup(Basic):
"""The class defining a Permutation group.
PermutationGroup([p1, p2, ..., pn]) returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> Permutation.print_cyclic = True
The permutations corresponding to motion of the front, right and
bottom face of a 2x2 Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the 2x2 Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
diofant.combinatorics.polyhedron.Polyhedron,
diofant.combinatorics.permutations.Permutation
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
[2] Seress, A.
"Permutation Group Algorithms"
[3] https://en.wikipedia.org/wiki/Schreier_vector
[4] https://en.wikipedia.org/wiki/Nielsen_transformation
#Product_replacement_algorithm
[5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
[6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
[7] https://web.archive.org/web/20170105021515/http://www.algorithmist.com:80/index.php/Union_Find
[8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
[9] https://en.wikipedia.org/wiki/Center_%28group_theory%29
[10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer
[11] https://groupprops.subwiki.org/wiki/Derived_subgroup
[12] https://en.wikipedia.org/wiki/Nilpotent_group
[13] https://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
"""
is_group = True
def __new__(cls, *args, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is False.
"""
if not args:
args = [Permutation()]
else:
args = list(args[0] if is_sequence(args[0]) else args)
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if kwargs.pop('dups', True):
args = list(uniq([_af_new(list(a)) for a in args]))
obj = Basic.__new__(cls, *args, **kwargs)
obj._generators = args
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
obj._r = len(obj._generators)
obj._degree = obj._generators[0].size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._strong_gens = []
obj._basic_orbits = []
obj._transversals = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
return obj
def __getitem__(self, i):
return self._generators[i]
def __contains__(self, i):
"""Return True if `i` is contained in PermutationGroup.
Examples
========
>>> p = Permutation(1, 2, 3)
>>> Permutation(3) in PermutationGroup(p)
True
"""
if not isinstance(i, Permutation):
raise TypeError('A PermutationGroup contains only Permutations as '
'elements, not elements of type %s' % type(i))
return self.contains(i)
def __len__(self):
return len(self._generators)
def __eq__(self, other):
"""Return True if PermutationGroup generated by elements in the
group are same i.e they represent the same PermutationGroup.
Examples
========
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G == H
True
"""
if not isinstance(other, PermutationGroup):
return False
set_self_gens = set(self.generators)
set_other_gens = set(other.generators)
# before reaching the general case there are also certain
# optimisation and obvious cases requiring less or no actual
# computation.
if set_self_gens == set_other_gens:
return True
# in the most general case it will check that each generator of
# one group belongs to the other PermutationGroup and vice-versa
for gen1 in set_self_gens:
if not other.contains(gen1):
return False
for gen2 in set_other_gens:
if not self.contains(gen2):
return False
return True
def __hash__(self):
return super().__hash__()
def __mul__(self, other):
"""Return the direct product of two permutation groups as a permutation
group.
This implementation realizes the direct product by shifting
the index set for the generators of the second group: so if we have
G acting on n1 points and H acting on n2 points, G*H acts on n1 + n2
points.
Examples
========
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
Permutation(9)(0, 1, 2, 3, 4),
Permutation(5, 6, 7, 8, 9)])
>>> H.order()
25
"""
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group ``G`` with a set of generators
``S``. For the initialization ``_random_pr_init``, a list ``R`` of
``\max\{r, |S|\}`` group generators is created as the attribute
``G._random_gens``, repeating elements of ``S`` if necessary, and the
identity element of ``G`` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of ``G`` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from ``\{gh, g(~h), hg, (~h)g\}``. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across ``G`` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] https://web.archive.org/web/20170105021515/http://www.algorithmist.com:80/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] https://web.archive.org/web/20170105021515/http://www.algorithmist.com:80/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""Return a base from the Schreier-Sims algorithm.
For a permutation group ``G``, a base is a sequence of points
``B = (b_1, b_2, ..., b_k)`` such that no element of ``G`` apart
from the identity fixes all the points in ``B``. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of ``B`` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
If a base for a group ``G`` is given by ``(b_1, b_2, ..., b_k)``, this
function returns a base ``(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)``,
where ``i`` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[Permutation(0, 1, 2, 3), Permutation(3)(0, 1), Permutation(1, 3, 2),
Permutation(2, 3), Permutation(1, 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
``|\beta_{i+1}^{\left\langle T\right\rangle}|`` should be replaced by
``|\beta_{i}^{\left\langle T\right\rangle}|``, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
// len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
"""
Return the basic orbits relative to a base and strong generating set.
If ``(b_1, b_2, ..., b_k)`` is a base for a group ``G``, and
``G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}`` is the ``i``-th basic stabilizer
(so that ``G^{(1)} = G``), the ``i``-th basic orbit relative to this base
is the orbit of ``b_i`` under ``G^{(i)}``. See [1], pp. 87-89 for more
information.
Examples
========
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
"""
Return a chain of stabilizers relative to a base and strong generating
set.
The ``i``-th basic stabilizer ``G^{(i)}`` relative to a base
``(b_1, b_2, ..., b_k)`` is ``G_{b_1, b_2, ..., b_{i-1}}``. For more
information, see [1], pp. 87-89.
Examples
========
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
Permutation(3)(0, 1, 2),
Permutation(1, 2, 3)])
PermutationGroup([
Permutation(1, 2, 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: Permutation(3),
1: Permutation(3)(0, 1, 2),
2: Permutation(3)(0, 2, 1),
3: Permutation(0, 3, 1)},
{1: Permutation(3),
2: Permutation(1, 2, 3),
3: Permutation(1, 3, 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def center(self):
r"""
Return the center of a permutation group.
The center for a group ``G`` is defined as
``Z(G) = \{z\in G | \forall g\in G, zg = gz \}``,
the set of elements of ``G`` that commute with all elements of ``G``.
It is equal to the centralizer of ``G`` inside ``G``, and is naturally a
subgroup of ``G`` ([9]).
Examples
========
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
``C_G(S) = \{ g \in G | gs = sg \forall s \in S\}`` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
def trivial_test(x):
return True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups ``H, G`` is equal to the normal closure
of the commutators of all the generators, i.e. ``hgh^{-1}g^{-1}`` for ``h``
a generator of ``H`` and ``g`` a generator of ``G`` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True)
>>> f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
see util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def coset_rank(self, g):
"""Rank using Schreier-Sims representation.
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
Permutation(7)(2, 4)(3, 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""Unrank using Schreier-Sims representation.
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return
base = self._base
transversals = self._transversals
basic_orbits = self._basic_orbits
m = len(base)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
The number of permutations comprising the group is given by
len(group); the number of permutations that can be generated
by the group is given by group.order().
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[Permutation(2), Permutation(2)(0, 1)]
See Also
========
order
"""
return self._degree
@property
def elements(self):
"""Returns all the elements of the permutation group in
a list
"""
return set(islice(self.generate(), None))
def derived_series(self):
r"""Return the derived series for the group.
The derived series for a group ``G`` is defined as
``G = G_0 > G_1 > G_2 > \ldots`` where ``G_i = [G_{i-1}, G_{i-1}]``,
i.e. ``G_i`` is the derived subgroup of ``G_{i-1}``, for
``i\in\mathbb{N}``. When we have ``G_k = G_{k-1}`` for some
``k\in\mathbb{N}``, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order ``G = G_0, G_1, G_2, \ldots``.
Examples
========
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
next = self.derived_subgroup()
while not current.is_subgroup(next):
res.append(next)
current = next
next = next.derived_subgroup()
return res
def derived_subgroup(self):
r"""Compute the derived subgroup.
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g, h\in G`` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if ct not in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method='coset', af=False):
"""Return iterator to generate the elements of the group
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If af = True it yields the array form of the permutations
Examples
========
>>> Permutation.print_cyclic = True
The permutation group given in the tetrahedron object is also
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group
True
Also the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate()))
>>> J
PermutationGroup([
Permutation(0, 1)(2, 3),
Permutation(3),
Permutation(1, 2, 3),
Permutation(1, 3, 2),
Permutation(0, 3, 1),
Permutation(0, 2, 3),
Permutation(0, 3)(1, 2),
Permutation(0, 1, 3),
Permutation(3)(0, 2, 1),
Permutation(0, 3, 2),
Permutation(3)(0, 1, 2),
Permutation(0, 2)(1, 3)])
>>> _.is_group
True
"""
if method == 'coset':
return self.generate_schreier_sims(af)
elif method == 'dimino':
return self.generate_dimino(af)
else:
raise NotImplementedError(f'No generation defined for {method}')
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm
If af == True it yields the array form of the permutations
References
==========
[1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = {tuple(idn)}
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If af = True it yields the array form of the permutations
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
return
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
return
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
return
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[Permutation(1, 2), Permutation(2)(0, 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not True, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, diofant.core.basic.Basic.has
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately ``\log(2)/\log(n)``
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
diofant.combinatorics.util._check_cycles_alt_sym
"""
if _random_prec is None:
n = self.degree
if n < 8:
return False
if not self.is_transitive():
return False
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
for i in range(N_eps):
perm = self.random_pr()
if _check_cycles_alt_sym(perm):
return True
return False
else:
for i in range(_random_prec['N_eps']):
perm = _random_prec[i]
if _check_cycles_alt_sym(perm):
return True
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
A group ``G`` is nilpotent if it has a central series of finite length.
Alternatively, ``G`` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr, strict=True):
"""Test if G=self is a normal subgroup of gr.
G is normal in gr if
for each g2 in G, g1 in gr, g = g1*g2*g1**-1 belongs to G
It is sufficient to check this for each g1 in gr.generator and
g2 g2 in G.generator
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
d_self = self.degree
d_gr = gr.degree
new_self = self.copy()
if not strict and d_self != d_gr:
if d_self < d_gr:
new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)])
else:
gr = PermGroup(gr.generators + [Permutation(d_self - 1)])
gens2 = [p._array_form for p in new_self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not new_self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
r"""Test if a group is primitive.
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form ``\{0, k\}`` for ``k``
ranging over representatives for the orbits of ``G_0``, the stabilizer of
``0``. This algorithm has complexity ``O(n^2)`` where ``n`` is the degree
of the group, and will perform badly if ``G_0`` is small.
There are two implementations offered: one finds ``G_0``
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of ``G_0`` using ``random_stab``,
hoping that they generate a subgroup of ``G_0`` with not too many more
orbits than G_0 (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
n = self.degree
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and self.minimal_block([0, x]) != [0]*n:
self._is_primitive = False
return False
self._is_primitive = True
return True
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return True if all elements of self belong to G.
If ``strict`` is False then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to False:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if not isinstance(G, PermutationGroup):
return False
if self == G:
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
def is_transitive(self, strict=True):
"""Test if the group is transitive.
A group is transitive if it has a single orbit.
If ``strict`` is False the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1, 0, 2, 3])
>>> e = Permutation([0, 1, 3, 2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group ``G`` is the series
``G = G_0 > G_1 > G_2 > \ldots`` where
``G_k = [G, G_{k-1}]``, i.e. every term after the first is equal to the
commutator of ``G`` and the previous term in ``G1`` ([1], p.29).
Returns
=======
A list of permutation groups in the order
``G = G_0, G_1, G_2, \ldots``
Examples
========
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
next = self.commutator(self, current)
while not current.is_subgroup(next):
res.append(next)
current = next
next = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Notes
=====
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``diofant.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> G = PermutationGroup([Permutation([0, 2, 1, 3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
``O(|points||S|)``. ([1], pp. 83-87; [7]).
Examples
========
>>> D = DihedralGroup(10)
>>> D.minimal_block([0, 5])
[0, 6, 2, 8, 4, 0, 6, 2, 8, 4]
>>> D.minimal_block([0, 1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
temp = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(temp, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(temp), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
return parents
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for i in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h ^ g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h ^ g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
{0, 1, 2}
>>> G.orbit([0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> Permutation.print_cyclic = True
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
Permutation(0, 4, 1, 2, 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[Permutation(5),
Permutation(0, 1, 2, 3, 4, 5),
Permutation(0, 5)(1, 4)(2, 3),
Permutation(0, 2, 4)(1, 3, 5),
Permutation(5)(0, 4)(1, 3),
Permutation(0, 3)(1, 4)(2, 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of self, ordered according to lowest element
in each orbit.
Examples
========
>>> a = Permutation(1, 5)(2, 3)(4, 0, 6)
>>> b = Permutation(1, 5)(3, 4)(2, 6, 0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[{0, 2, 3, 4, 6}, {1, 5}]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
len(group); the length of each permutation in the group is
given by group.size.
Examples
========
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[Permutation(2), Permutation(2)(0, 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order is not None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
basic_transversals = self.basic_transversals
m = 1
for x in basic_transversals:
m *= len(x)
self._order = m
return m
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
For a permutation group ``G`` and a set of points
``\{p_1, p_2,\ldots, p_k\}``, the pointwise stabilizer of
``p_1, p_2, \ldots, p_k`` is defined as
``G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\} ([1],p20).
It is a subgroup of ``G``.
Examples
========
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
.stabilizer(), this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> Permutation.print_cyclic = True
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
Permutation(0, 1)(2, 3)
>>> G.make_perm(3, [0, 1, 0])
Permutation(0, 2, 3, 1)
>>> G.make_perm([0, 1, 0])
Permutation(0, 2, 3, 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for i in range(n):
p = self[randrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element."""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
It computes the generators of the chain of stabilizers
G > G_{b_1} > .. > G_{b1,..,b_r} > 1
in which G_{b_1,..,b_i} stabilizes b_1,..,b_i,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
h_1*..*h_s.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: Permutation(2)(0, 1), 1: Permutation(2), 2: Permutation(1, 2)},
{0: Permutation(2), 2: Permutation(0, 2)}]
"""
if self._transversals:
return
base, strong_gens = self.schreier_sims_incremental()
self._base = base
self._strong_gens = strong_gens
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
def schreier_sims_incremental(self, base=None, gens=None):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
base_len = len(_base)
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True))
orbs[i] = list(transversals[i])
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for gen in strong_gens_distr[i]:
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
h, j = _strip_af(schreier_gen, _base, orbs, transversals, i)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l] =\
dict(_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True))
orbs[l] = list(transversals[l])
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
# build the strong generating set
strong_gens = list(uniq(i for gens in strong_gens_distr for i in gens))
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most ``2^{-consec\_succ}``,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) # doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least ``1 - 1/\text{consec\_succ}``.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i])
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l])
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element doesn't belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> a = Permutation([2, 4, 6, 3, 1, 5, 0])
>>> b = Permutation([0, 1, 3, 5, 4, 6, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
Permutation(5)(0, 4)(1, 3),
Permutation(5)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
r"""Return a strong generating set from the Schreier-Sims algorithm.
A generating set ``S = \{g_1, g_2, ..., g_t\}`` for a permutation group
``G`` is a strong generating set relative to the sequence of points
(referred to as a "base") ``(b_1, b_2, ..., b_k)`` if, for
``1 \leq i \leq k`` we have that the intersection of the pointwise
stabilizer ``G^{(i+1)} := G_{b_1, b_2, ..., b_i}`` with ``S`` generates
the pointwise stabilizer ``G^{(i+1)}``. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> D = DihedralGroup(4)
>>> D.strong_gens
[Permutation(0, 1, 2, 3), Permutation(0, 3)(1, 2), Permutation(1, 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> def prop_even(x):
... return x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lenghty and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current implementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key=lambda x: base_ordering[x])
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
def trivial_test(x):
return True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accorndingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
r"""Compute the degree of transitivity of the group.
A permutation group ``G`` acting on ``\Omega = \{0, 1, ..., n-1\}`` is
``k``-fold transitive, if, for any k points
``(a_1, a_2, ..., a_k)\in\Omega`` and any k points
``(b_1, b_2, ..., b_k)\in\Omega`` there exists ``g\in G`` such that
``g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k``
The degree of transitivity of ``G`` is the maximum ``k`` such that
``G`` is ``k``-fold transitive. ([8])
Examples
========
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit(i)
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
{0, 1, 2}
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = tuple(gen[x] for x in b)
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = frozenset(gen[x] for x in b)
if temp not in used:
orb.append(temp)
used.add(temp)
return {tuple(x) for x in orb}
def _orbits(degree, generators):
"""Compute the orbits of G.
If rep=False it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[{0, 1, 2}]
"""
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
if af is True, the transversal elements are given in array form
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[Permutation(5),
Permutation(0, 1, 2, 3, 4, 5),
Permutation(0, 5)(1, 4)(2, 3),
Permutation(0, 2, 4)(1, 3, 5),
Permutation(5)(0, 4)(1, 3),
Permutation(0, 3)(1, 4)(2, 5)]
"""
tr = [(alpha, list(range(degree)))]
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
for gen in gens:
temp = gen[x]
if used[temp] is False:
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
return tr
if af:
return [y for _, y in tr]
return [_af_new(y) for _, y in tr]
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
degree degree of G
generators generators of G
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[Permutation(5)(0, 4)(1, 3), Permutation(5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
| bsd-3-clause | 75,722,258,632,304,220 | 33.023178 | 106 | 0.514781 | false |
lkmnds/dickord | console.py | 1 | 1195 | import logging
import traceback
import asyncio
import requests
import dickord
route = dickord.route
import config
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('userbot')
benis = dickord.Dicker(user_pass=('luna@localhost', 'fuck'))
@benis.sensor('READY')
async def ready_for_work(payload):
u = benis.user
logger.info(f"We are ready! name = {u.username}#{u.discriminator}, id = {u.id}")
logger.info("Requesting channel")
req = requests.get(route('channels/150501171201'), headers=benis.http.headers)
print(req)
print(req.json())
await asyncio.sleep(1)
logger.info('aaaa')
await benis.select_ass('portal 2 pinball')
await asyncio.sleep(0.5)
logger.info('sending typing')
req = requests.post(route('channels/150501171201/typing'), headers=benis.http.headers)
print(req)
print(req.text)
await asyncio.sleep(1)
logger.info('meme')
req = await benis.http.insert_benis('channels/150501171201/messages', \
{'content': 'meme'})
print(req)
print(req.text)
res, err_msg = benis.infinite_insert()
if not res:
print(f"Errored somewhere: {err_msg}")
else:
print("Exited with success")
| mit | -1,654,092,541,386,538,200 | 24.425532 | 90 | 0.68954 | false |
samuelefiorini/cgm-tools | scripts/run_kf.py | 1 | 2960 | """KF experiments development."""
from cgmtools import utils
from cgmtools import plotting
from cgmtools.forecast import kf
import datetime
import numpy as np
import pickle as pkl
###############################################################################
# Load full data set from pickle file (see data_wrangler.py)
dfs_full = pkl.load(open('../../data/dfs_py3.pkl', 'rb'))
# Keep only patients with more than `THRESHOLD` days of CGM acquisition
_threshold = datetime.timedelta(days=3.5) # default
dfs = utils.filter_patients(dfs_full, _threshold)
burn_in = 300 # burn-in samples used to learn the best order via cv
n_splits = 15
ph = 18 # prediction horizon
# State-space model:
# transition matrix (double integration model)
F = np.array([[2, -1], [1, 0]])
# measures matrix
H = np.array([1, 0])
# Get patients list
patients = list(dfs.keys())
for idx in patients:
df = utils.gluco_extract(dfs[idx], return_df=True)
# Learn the best order via cv
# lambda2_range = np.logspace(-12, -4, 10)
lambda2_range = np.logspace(-12, -4, 3)
sigma2_range = np.linspace(1, 40, 3)
# sigma2_range = np.linspace(1, 40, 10)
out = kf.grid_search(df, lambda2_range, sigma2_range, burn_in=burn_in,
n_splits=15, F=F, H=H,
return_mean_vld_error=True,
return_initial_state_mean=True,
return_initial_state_covariance=True,
verbose=False)
lambda2, sigma2, mse, X0, P0 = out
print("[{}]:\tBest lambda {:2.12f}, sigma {:2.0f}".format(idx, lambda2,
sigma2))
Q = np.array([[lambda2, 0], [0, 0]]) # transition_covariance
R = sigma2 # observation (co)variance
df = df.iloc[burn_in:] # don't mix-up training/test
_kf = kf.cgmkalmanfilter(F=F, Q=Q, R=R, X0=X0, P0=P0)
errs, forecast = kf.online_forecast(df, _kf, H, ph=18, lambda2=lambda2,
sigma2=sigma2, verbose=True)
# Save results reports
error_summary = utils.forecast_report(errs)
print(error_summary)
# import matplotlib.pyplot as plt
# plotting.cgm(df, forecast['ts'], title='Patient '+idx,
# savefig=False)
# plotting.residuals(df, forecast['ts'], skip_first=burn_in,
# skip_last=ph, title='Patient '+idx,
# savefig=False)
# plt.show()
# break
# # dump it into a pkl
pkl.dump(error_summary, open(idx+'.pkl', 'wb'))
try:
# Plot signal and its fit
plotting.cgm(df, forecast['ts'], title='Patient '+idx,
savefig=True)
# Plot residuals
plotting.residuals(df, forecast['ts'], skip_first=burn_in,
skip_last=ph, title='Patient '+idx,
savefig=True)
except:
print("Plotting failed for patient {}".format(idx))
| gpl-3.0 | 4,648,840,799,209,392,000 | 35.54321 | 79 | 0.565203 | false |
ftomassetti/plaid | app/views/decorators.py | 1 | 1552 | from functools import wraps
from flask import render_template, request, url_for
from app.models import PatchState
def filterable(f):
"""Filter a query"""
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
q = d['query']
state = request.args.get('state', None, type=str)
if state:
q = q.filter_by(state=PatchState.from_string(state))
# add more filters later
d['query'] = q
return d
return wrapped
def paginable(pagename, max_per_page=50):
"""Paginate a query"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
q = d['query']
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', max_per_page, type=int)
p = q.paginate(page, per_page, False)
if not p.items:
d['page'] = None
d[pagename] = q.paginate(1, per_page, False)
else:
d[pagename] = p
return d
return wrapped
return decorator
def render(template):
"""render a query"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
def endpoint(**up):
kwargs.update(up)
return url_for(request.endpoint, **kwargs)
d['endpoint'] = endpoint
return render_template(template, **d)
return wrapped
return decorator
| gpl-2.0 | -1,112,586,424,140,818,000 | 24.442623 | 75 | 0.52384 | false |
citizenlabsgr/voter-engagement | api/core/management/commands/gendata.py | 1 | 4551 | import random
from contextlib import suppress
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from faker import Faker
from api.elections.models import Election
from api.voters.models import Status, Voter
User = get_user_model()
fake = Faker()
def p(value):
return value > random.random()
class Command(BaseCommand):
help = "Generate data for automated testing and manual review"
def add_arguments(self, parser):
parser.add_argument(
'emails',
nargs='?',
type=lambda value: value.split(','),
default=[],
)
def handle(self, *, emails, **_options): # pylint: disable=arguments-differ
self.update_site()
admin = self.get_or_create_superuser()
users = [self.get_or_create_user(email) for email in emails]
self.generate_review_data(admin, *users)
def update_site(self):
site = Site.objects.get(id=1)
site.name = f"Voter Engagement {settings.BASE_NAME}"
site.domain = settings.BASE_DOMAIN
site.save()
self.stdout.write(f"Updated site: {site}")
def get_or_create_superuser(self, username="admin", password="password"):
try:
user = User.objects.create_superuser(
username=username,
email=f"{username}@{settings.BASE_DOMAIN}",
password=password,
)
self.stdout.write(f"Created new superuser: {user}")
except IntegrityError:
user = User.objects.get(username=username)
self.stdout.write(f"Found existing superuser: {user}")
return user
def get_or_create_user(self, base_email, password="password"):
username, email_domain = base_email.split('@')
user, created = User.objects.get_or_create(username=username)
user.email = f"{username}+{settings.BASE_NAME}@{email_domain}"
user.set_password(password)
user.save()
if created:
self.stdout.write(f"Created new user: {user}")
else:
self.stdout.write(f"Update user: {user}")
return user
def generate_review_data(self, *_users):
while User.objects.count() < 10:
with suppress(IntegrityError):
username = fake.name().replace(' ', '')
user = User.objects.create(
username=username.lower() if p(0.30) else username,
email=fake.email(),
first_name=fake.first_name(),
last_name=fake.last_name(),
)
self.stdout.write(f"Created user: {user}")
while Election.objects.count() < 5:
with suppress(IntegrityError):
name, date = self.fake_election()
election = Election.objects.create(
name=name,
date=date,
)
self.stdout.write(f"Created election: {election}")
while Voter.objects.count() < 50:
with suppress(IntegrityError):
voter = Voter.objects.create(
first_name=fake.first_name(),
last_name=fake.last_name(),
birth_date=fake.date(),
zip_code=fake.zipcode(),
email=fake.email(),
)
self.stdout.write(f"Created voter: {voter}")
while Status.objects.count() < 50:
with suppress(IntegrityError):
status = Status.objects.create(
voter=self.random_voter(),
election=self.random_election(),
registered=True if p(0.90) else None,
read_sample_ballot=True if p(0.80) else None,
located_polling_location=True if p(0.70) else None,
voted=True if p(0.60) else None,
)
self.stdout.write(f"Created status: {status}")
@staticmethod
def fake_election():
date = fake.future_date(end_date="+2y")
kind = random.choice(["General", "Midterm", "Special"])
return f"{date.year} {kind} Election", date
@staticmethod
def random_voter():
return random.choice(Voter.objects.all())
@staticmethod
def random_election():
return random.choice(Election.objects.all())
| mit | -1,028,694,120,705,746,800 | 32.711111 | 80 | 0.566249 | false |
RCPRG-ros-pkg/control_subsystem | common/set_big_stiffness.py | 1 | 5158 | #!/usr/bin/env python
# Copyright (c) 2014, Robot Control and Pattern Recognition Group, Warsaw University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import roslib
roslib.load_manifest('velma_controller')
import rospy
from geometry_msgs.msg import *
from cartesian_trajectory_msgs.msg import *
import actionlib
from actionlib_msgs.msg import *
import tf
import tf_conversions.posemath as pm
import PyKDL
def moveImpedance(k, t):
global action_impedance_client
action_impedance_goal = CartesianImpedanceGoal()
action_impedance_goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.2)
action_impedance_goal.trajectory.points.append(CartesianImpedanceTrajectoryPoint(
rospy.Duration(t),
CartesianImpedance(k,Wrench(Vector3(0.7, 0.7, 0.7),Vector3(0.7, 0.7, 0.7)))))
action_impedance_client.send_goal(action_impedance_goal)
def moveWrist( wrist_frame, tool_frame, t, max_wrench):
global action_trajectory_client
# we are moving the tool, so: T_B_Wd*T_W_T
wrist_pose = pm.toMsg(wrist_frame*tool_frame)
action_trajectory_goal = CartesianTrajectoryGoal()
action_trajectory_goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.01)
action_trajectory_goal.trajectory.points.append(CartesianTrajectoryPoint(
rospy.Duration(t),
wrist_pose,
Twist()))
action_trajectory_goal.wrench_constraint = max_wrench
action_trajectory_client.send_goal(action_trajectory_goal)
def moveTool(tool_frame, t):
global action_tool_client
tool_pose = pm.toMsg(tool_frame)
action_tool_goal = CartesianTrajectoryGoal()
action_tool_goal.trajectory.header.stamp = rospy.Time.now()
action_tool_goal.trajectory.points.append(CartesianTrajectoryPoint(
rospy.Duration(t),
tool_pose,
Twist()))
action_tool_client.send_goal(action_tool_goal)
if __name__ == '__main__':
a = []
for arg in sys.argv:
a.append(arg)
if (len(a) > 1) and ((a[1]=="left") or ("right")):
prefix = a[1]
else:
print "Usage: %s prefix"%a[0]
exit(0)
rospy.init_node('impedance_riser')
listener = tf.TransformListener();
action_impedance_client = actionlib.SimpleActionClient("/" + prefix + "_arm/cartesian_impedance", CartesianImpedanceAction)
action_impedance_client.wait_for_server()
action_trajectory_client = actionlib.SimpleActionClient("/" + prefix + "_arm/cartesian_trajectory", CartesianTrajectoryAction)
action_trajectory_client.wait_for_server()
action_tool_client = actionlib.SimpleActionClient("/" + prefix + "_arm/tool_trajectory", CartesianTrajectoryAction)
action_tool_client.wait_for_server()
rospy.sleep(1.0)
# save current wrist position
time_now = rospy.Time.now() - rospy.Duration(1.0)
listener.waitForTransform('torso_base', prefix+'_arm_7_link', time_now, rospy.Duration(4.0))
pose = listener.lookupTransform('torso_base', prefix+'_arm_7_link', time_now)
T_B_W = pm.fromTf(pose)
T_W_T = PyKDL.Frame() # tool transformation
print "setting the tool to %s relative to wrist frame"%(T_W_T)
# move both tool position and wrist position - the gripper holds its position
print "moving wrist"
# we assume that during the initialization there are no contact forces, so we limit the wrench
moveWrist( T_B_W, T_W_T, 2.0, Wrench(Vector3(20, 20, 20), Vector3(4, 4, 4)) )
print "moving tool"
moveTool( T_W_T, 2.0 )
rospy.sleep(2.0)
# change the stiffness
print "changing stiffness for door approach"
moveImpedance(Wrench(Vector3(1200.0, 1200.0, 1200.0), Vector3(300.0, 300.0, 300.0)), 4.0)
rospy.sleep(4.0)
| bsd-3-clause | 521,373,026,289,866,900 | 40.596774 | 130 | 0.722373 | false |
boada/planckClusters | MOSAICpipe/bpz-1.99.3/priors/prior_hdfn_gen.py | 1 | 2464 | from __future__ import division
from past.utils import old_div
from bpz_tools import *
def function(z, m, nt):
"""HDFN prior from Benitez 2000
for Ellipticals, Spirals, and Irregular/Starbursts
Returns an array pi[z[:],:nt]
The input magnitude is F814W AB
"""
global zt_at_a
nz = len(z)
momin_hdf = 20.
if m > 32.: m = 32.
if m < 20.: m = 20.
# nt Templates = nell Elliptical + nsp Spiral + nSB starburst
try: # nt is a list of 3 values
nell, nsp, nsb = nt
except: # nt is a single value
nell = 1 # 1 Elliptical in default template set
nsp = 2 # 2 Spirals in default template set
nsb = nt - nell - nsp # rest Irr/SB
nn = nell, nsp, nsb
nt = sum(nn)
# See Table 1 of Benitez00
a = 2.465, 1.806, 0.906
zo = 0.431, 0.390, 0.0626
km = 0.0913, 0.0636, 0.123
k_t = 0.450, 0.147
a = repeat(a, nn)
zo = repeat(zo, nn)
km = repeat(km, nn)
k_t = repeat(k_t, nn[:2])
# Fractions expected at m = 20:
# 35% E/S0
# 50% Spiral
# 15% Irr
fo_t = 0.35, 0.5
fo_t = old_div(fo_t, array(nn[:2]))
fo_t = repeat(fo_t, nn[:2])
#fo_t = [0.35, 0.5]
#fo_t.append(1 - sum(fo_t))
#fo_t = array(fo_t) / array(nn)
#fo_t = repeat(fo_t, nn)
#print 'a', a
#print 'zo', zo
#print 'km', km
#print 'fo_t', fo_t
#print 'k_t', k_t
dm = m - momin_hdf
zmt = clip(zo + km * dm, 0.01, 15.)
zmt_at_a = zmt**(a)
#We define z**a as global to keep it
#between function calls. That way it is
# estimated only once
try:
xxx[9] = 3
zt_at_a.shape
except NameError:
zt_at_a = power.outer(z, a)
#Morphological fractions
nellsp = nell + nsp
f_t = zeros((len(a), ), float)
f_t[:nellsp] = fo_t * exp(-k_t * dm)
f_t[nellsp:] = old_div((1. - add.reduce(f_t[:nellsp])), float(nsb))
#Formula:
#zm=zo+km*(m_m_min)
#p(z|T,m)=(z**a)*exp(-(z/zm)**a)
p_i = zt_at_a[:nz, :nt] * exp(-clip(
old_div(zt_at_a[:nz, :nt], zmt_at_a[:nt]), 0., 700.))
#This eliminates the very low level tails of the priors
norm = add.reduce(p_i[:nz, :nt], 0)
p_i[:nz, :nt] = where(
less(
old_div(p_i[:nz, :nt], norm[:nt]), old_div(1e-2, float(nz))), 0.,
old_div(p_i[:nz, :nt], norm[:nt]))
norm = add.reduce(p_i[:nz, :nt], 0)
p_i[:nz, :nt] = p_i[:nz, :nt] / norm[:nt] * f_t[:nt]
return p_i
| mit | -1,657,587,623,260,758,800 | 27 | 77 | 0.520292 | false |
pjiangtw/HOPE | WishCplex/WISHCPLEX.py | 1 | 2949 | #----------------------------------------------------------------------------------------
# Copyright, 2013:
#
# Stefano Ermon - Cornell University , [email protected]
# Ashish Sabharwal - IBM Watson Research Center , [email protected]
#----------------------------------------------------------------------------------------
import sys
import math
import random
import os
import argparse
from WISHLogProcess import process_logs
from WISHLogProcess import process_logs_cplex_LB
from WISHLogProcess import process_logs_cplex_UB
# version number
__version__ = '1.0'
#########################################
# Usage Information:
# run "python WISH.py -h" for help
#########################################
parser = argparse.ArgumentParser(description='Estimate the partition function using the WISH algorithm and CPLEX for the optimization.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument("infile", help="Graphical model (in UAI format)")
parser.add_argument("outfolder", help="Folder where logs are stored")
parser.add_argument('-alpha', '--alpha', type=float, help="Accuracy alpha", default=1.0)
parser.add_argument('-delta', '--delta', type=float, help="Failure probability delta", default=0.1)
parser.add_argument('-timeout', '--timeout', type=int, help="Timeout for each optimization instance (seconds)", default=10)
args = parser.parse_args()
print "Reading factor graph from " + args.infile
inputfile = open(args.infile, "r")
fileName, fileExtension = os.path.splitext(args.infile)
ind = 0
origNbrFactor = 0
origNbrVar = 0
for l in inputfile:
if not l.strip()=='':
ind = ind +1
if ind==2:
origNbrVar=int(l)
elif ind==3:
l = l.rstrip("\n")
elif ind==4: ## add xor cpt tabe
origNbrFactor = int(l)
elif ind>5:
break
print "Model with " + str(origNbrVar) + "variables and "+str(origNbrFactor) +" factors"
depth = origNbrVar
T = 7 #int(math.ceil(math.log(origNbrVar)*math.log(1.0/args.delta)/args.alpha))
print "Using " + str(T) +" samples per level"
os.system("mkdir "+args.outfolder)
for i in range(0,depth+1): ## main for loop
if i==0:
sampnum=1
else:
sampnum=T
for t in range(1,sampnum+1): ## main for loop
outfilenamelog = "%s.xor%d.loglen%d.%d.ILOGLUE.uai.LOG" % (os.path.basename(fileName) , i , 0 , t)
cmdline = ("timeout %d ./WH_cplex -paritylevel 1 -number %d -seed 10 %s > %s") % (args.timeout , i , args.infile , args.outfolder +"/"+ outfilenamelog)
os.system(cmdline)
## Parallel execution:
##
## assign this job to a separate core (a system dependent script is needed here)
## we provide an example based on Torque/PBS:
##
## os.system("qsub -v basedir="+basedir+",file="+infile+",level="+str(i)+",len="+str(0)+",outdir="+outdir+",sample="+str(t)+",timeout=900s"+" LaunchIloglue.sh")
process_logs_cplex_LB(args.outfolder)
process_logs_cplex_UB(args.outfolder)
| apache-2.0 | 4,981,883,023,722,491,000 | 32.511364 | 162 | 0.631061 | false |
dsedivec/ansible-plugins | filter_plugins/hash.py | 1 | 1283 | # Copyright 2013 Dale Sedivec
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ansible import errors
try:
import passlib.hash as passlib_hash
except ImportError:
passlib_hash = None
def hash(data, algorithm_name, **kwargs):
if not passlib_hash:
raise errors.AnsibleError(
"passlib must be installed to use the hash filter")
try:
algorithm = getattr(passlib_hash, algorithm_name)
except AttributeError:
raise errors.AnsibleError(
"passlib doesn't contain algorithm %r" % (algorithm_name,))
return algorithm.encrypt(data, **kwargs)
class FilterModule (object):
def filters(self):
return {"hash": hash}
| gpl-3.0 | 2,962,180,769,414,720,000 | 31.897436 | 71 | 0.71629 | false |
abigailStev/lag_spectra | simple_cross_spectra.py | 1 | 30460 | #!/usr/bin/env
"""
Read in two extracted light curves (interest band and reference band), split
into segments, compute the power spectra per band and cross spectrum of each
segment, averages cross spectrum of all the segments, and computes frequency
lags between the two bands.
Example call:
python simple_cross_spectra.py ./cygx1_i.lc ./cygx1_ref.lc -o "./cygx1"
Enter python simple_cross_spectra.py -h at the command line for help.
"""
from __future__ import print_function
from astropy.table import Table, Column
from astropy.io import fits
import numpy as np
from scipy import fftpack
import argparse
import subprocess
from datetime import datetime
__author__ = "Abigail Stevens <A.L.Stevens at uva.nl>"
__year__ = "2016"
class Band(object):
def __init__(self, n_bins=8192, dt=0.0078125):
self.power = np.zeros(n_bins, dtype=np.float64)
self.mean_rate = 0.0
self.rms = 0.0
self.freq = fftpack.fftfreq(n_bins, d=dt)
################################################################################
def type_power_of_two(num):
"""
Check if an input is a power of 2 (1 <= num < 2147483648), as an argparse
type.
Parameters
----------
num : int
The number in question.
Returns
-------
n : int
The number in question, if it's a power of two
Raises
------
ArgumentTypeError if n isn't a power of two.
"""
n = int(num)
x = 2
assert n > 0
if n == 1:
return n
else:
while x <= n and x < 2147483648:
if n == x:
return n
x *= 2
message = "%d is not a power of two." % n
raise argparse.ArgumentTypeError(message)
################################################################################
def get_key_val(fits_file, ext, keyword):
"""
Get the value of a keyword from a FITS header. Keyword does not seem to be
case-sensitive.
Parameters
----------
fits_file : str
File name of the FITS file.
ext : int
The FITS extension in which to search for the given keyword.
keyword : str
The keyword for which you want the associated value.
Returns
-------
any type
Value of the given keyword.
Raises
------
IOError if the input file isn't actually a FITS file.
"""
ext = np.int8(ext)
assert (ext >= 0 and ext <= 2)
keyword = str(keyword)
try:
hdulist = fits.open(fits_file)
except IOError:
print("\tERROR: File does not exist: %s" % fits_file)
exit()
key_value = hdulist[ext].header[keyword]
hdulist.close()
return key_value
################################################################################
def raw_to_absrms(power, mean_rate, n_bins, dt, noisy=True):
"""
Normalize the power spectrum to absolute rms^2 normalization.
TODO: cite paper.
Parameters
----------
power : np.array of floats
The raw power at each Fourier frequency, as a 1-D or 2-D array.
Size = (n_bins) or (n_bins, detchans).
mean_rate : float
The mean count rate for the light curve, in cts/s.
n_bins : int
Number of bins per segment of light curve.
dt : float
Timestep between bins in n_bins, in seconds.
noisy : boolean
True if there is Poisson noise in the power spectrum (i.e., from real
data), False if there is no noise in the power spectrum (i.e.,
simulations without Poisson noise). Default is True.
Returns
-------
np.array of floats
The noise-subtracted power spectrum in absolute rms^2 units, in the
same size array as the input power.
"""
if noisy:
noise = 2.0 * mean_rate
else:
noise = 0.0
return power * (2.0 * dt / np.float(n_bins)) - noise
################################################################################
def var_and_rms(power, df):
"""
Computes the variance and rms (root mean square) of a power spectrum.
Assumes the negative-frequency powers have been removed. DOES NOT WORK ON
2-D POWER ARRAYS! Not sure why.
TODO: cite textbook or paper.
Parameters
----------
power : np.array of floats
1-D array (size = n_bins/2+1) of the raw power at each of the *positive*
Fourier frequencies.
df : float
The step size between Fourier frequencies.
Returns
-------
variance : float
The variance of the power spectrum.
rms : float
The rms of the power spectrum.
"""
variance = np.sum(power * df, axis=0)
rms = np.where(variance >= 0, np.sqrt(variance), np.nan)
return variance, rms
################################################################################
def cs_out(out_base, meta_dict, cs_avg, ci, ref):
"""
Saving header data, the cross spectrum, CoI power spectrum, and reference
band power spectrum to a FITS file to use in the program make_lags.py to get
cross-spectral lags. Cross spectra and power spectra are raw, as in un-
normalized.
Parameters
----------
out_base : str
The name the FITS file to write the cross spectrum and power spectra to,
for computing the lags.
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
cs_avg : np.array of complex numbers
2-D array of the averaged cross spectrum. Size = (n_bins, detchans).
ci : ccf_lc.Lightcurve object
The channel of interest light curve. Must already have freq, mean_rate,
and power assigned.
ref : ccf_lc.Lightcurve object
The reference band light curve. Must already have mean_rate, rms, and
power assigned.
Returns
-------
nothing, but writes to the file "*_cs.fits"
"""
out_file = out_base + "_cs.fits"
out_dir = out_file[0:out_file.rfind("/")+1]
if len(out_dir) >= 2:
subprocess.call(['mkdir', '-p', out_dir])
print("Output sent to: %s" % out_file)
out_table = Table()
out_table.add_column(Column(data=ci.freq, name='FREQUENCY', unit='Hz'))
out_table.add_column(Column(data=cs_avg, name='CROSS'))
out_table.add_column(Column(data=ci.power, name='POWER_CI'))
out_table.add_column(Column(data=ref.power, name='POWER_REF'))
out_table.meta['TYPE'] = "Cross spectrum and power spectra, saved for lags."
out_table.meta['DATE'] = str(datetime.now())
out_table.meta['EVTLIST'] = " "
out_table.meta['DT'] = meta_dict['dt']
out_table.meta['DF'] = meta_dict['df']
out_table.meta['N_BINS'] = meta_dict['n_bins']
out_table.meta['SEGMENTS'] = meta_dict['n_seg']
out_table.meta['SEC_SEG'] = meta_dict['n_seconds']
out_table.meta['EXPOSURE'] = meta_dict['exposure']
out_table.meta['DETCHANS'] = meta_dict['detchans']
out_table.meta['RATE_CI'] = ci.mean_rate
out_table.meta['RATE_REF'] = ref.mean_rate
out_table.meta['RMS_REF'] = float(ref.rms)
out_table.meta['NYQUIST'] = meta_dict['nyquist']
out_table.write(out_file, overwrite=True)
################################################################################
def make_cs(rate_ci, rate_ref, meta_dict):
"""
Generate the power spectra for each band and the cross spectrum for one
segment of the light curve.
Parameters
----------
rate_ci : np.array of floats
2-D array of the channel of interest light curve, Size = (n_bins).
rate_ref : np.array of floats
1-D array of the reference band lightcurve, Size = (n_bins).
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
Returns
-------
cs_seg : np.array of complex numbers
2-D array of the cross spectrum of each channel of interest with the
reference band.
ci_seg : Band object
The channel of interest light curve.
ref_seg : Band object
The reference band light curve.
"""
assert np.shape(rate_ci) == (meta_dict['n_bins'], ),\
"ERROR: CoI light curve has wrong dimensions. Must have size (n_bins, "\
")."
assert np.shape(rate_ref) == (meta_dict['n_bins'], ), "ERROR: Reference "\
"light curve has wrong dimensions. Must have size (n_bins, )."
ci_seg = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
ref_seg = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
## Computing the mean count rate of the segment
ci_seg.mean_rate = np.mean(rate_ci)
ref_seg.mean_rate = np.mean(rate_ref)
## Subtracting the mean off each value of 'rate'
rate_sub_mean_ci = np.subtract(rate_ci, ci_seg.mean_rate)
rate_sub_mean_ref = np.subtract(rate_ref, ref_seg.mean_rate)
## Taking the FFT of the time-domain photon count rate
## SciPy is faster than NumPy or pyFFTW for my array sizes
fft_data_ci = fftpack.fft(rate_sub_mean_ci)
fft_data_ref = fftpack.fft(rate_sub_mean_ref)
## Computing the power from the fourier transform
ci_seg.power = np.absolute(fft_data_ci) ** 2
ref_seg.power = np.absolute(fft_data_ref) ** 2
## Computing the cross spectrum from the fourier transform
cs_seg = np.multiply(fft_data_ci, np.conj(fft_data_ref))
return cs_seg, ci_seg, ref_seg
################################################################################
def lc_in(interest_file, ref_file, meta_dict):
n_seg = 0
interest_band = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
ref_band = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
cross_spec = np.zeros(meta_dict['n_bins'], dtype=np.complex128)
## Open the light curve files and load the data as astropy tables
try:
interest_table = Table.read(interest_file)
except IOError:
print("\tERROR: File does not exist: %s" % interest_file)
exit()
try:
ref_table = Table.read(ref_file)
except IOError:
print("\tERROR: File does not exist: %s" % ref_file)
exit()
start_time_i = interest_table['TIME'][0]
end_time_i = interest_table['TIME'][-1]
start_time_r = ref_table['TIME'][0]
end_time_r = ref_table['TIME'][-1]
len_i = len(interest_table['TIME'])
len_r = len(ref_table['TIME'])
# print("i: %.15f \t %.15f" % (start_time_i, end_time_i))
# print("r: %.15f \t %.15f" % (start_time_r, end_time_r))
# print("len i:", len_i)
# print("len r:", len_r)
# assert len_i == len_r
# assert start_time_i == start_time_r
# assert end_time_i == end_time_r
## The following is in case the two files aren't the exact same length.
a = 0 ## start of bin index to make segment of data
c = 0
b = meta_dict['n_bins'] ## end of bin index to make segment of data for
## inner for-loop
d = meta_dict['n_bins']
if start_time_i > start_time_r:
bin_diff = int((start_time_i - start_time_r) / meta_dict['dt'])
assert bin_diff < len_r
c += bin_diff
d += bin_diff
elif start_time_r > start_time_i:
bin_diff = int((start_time_r - start_time_i) / meta_dict['dt'])
assert bin_diff < len_i
a += bin_diff
b += bin_diff
## Loop through segments of the light curves
while b <= len_i and d <= len_r:
n_seg += 1
## Extract the count rates for each segment
rate_ci = interest_table["RATE"][a:b]
rate_ref = ref_table["RATE"][c:d]
## Compute the power spectra and cross spectrum for that segment
cs_seg, ci_seg, ref_seg = make_cs(rate_ci, rate_ref, meta_dict)
assert int(len(cs_seg)) == meta_dict['n_bins'], "ERROR: "\
"Something went wrong in make_cs. Length of cross spectrum"\
" segment != n_bins."
## Keep running total (to be made into averages)
cross_spec += cs_seg
interest_band.power += ci_seg.power
ref_band.power += ref_seg.power
interest_band.mean_rate += ci_seg.mean_rate
ref_band.mean_rate += ref_seg.mean_rate
if (test is True) and (n_seg == 1): ## For testing
break
## Clear loop variables for the next round
rate_ci = None
rate_ref = None
cs_seg = None
ci_seg = None
ref_seg = None
## Increment the counters and indices
a = b
c = d
b += meta_dict['n_bins']
d += meta_dict['n_bins']
## Since the for-loop goes from i to j-1 (since that's how the range
## function works) it's ok that we set i=j here for the next round.
## This will not cause double-counting rows or skipping rows.
return cross_spec, interest_band, ref_band, n_seg
################################################################################
def freq_lag_out(out_base, meta_dict, freq, phase, err_phase, tlag, err_tlag,
ci_mean_rate, ref_mean_rate):
"""
Saving header data, the cross spectrum, CoI power spectrum, and reference
band power spectrum to a FITS file to use in the program make_lags.py to get
cross-spectral lags. Cross spectra and power spectra are raw, as in un-
normalized.
Parameters
----------
out_base : str
The name the FITS file to write the cross spectrum and power spectra to,
for computing the lags.
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
freq : np.array of floats
1-D array of the Fourier frequencies against which the lag-frequency
spectrum is plotted.
phase, err_phase : np.array of floats
The phase and error in phase of the frequency lags, in radians.
tlag, err_tlag : np.array of floats
The time and error in time of the frequency lags, in seconds.
ci_mean_rate : floats
The mean photon count rate of each of the interest band, in cts/s.
ref_mean_rate : float
The mean photon count rate of the reference band, in cts/s.
Returns
-------
nothing, but writes to the file "*_lag-freq.fits"
"""
out_file = out_base + "_lag-freq.fits"
out_dir = out_file[0:out_file.rfind("/")+1]
if len(out_dir) >= 2:
subprocess.call(['mkdir', '-p', out_dir])
print("Output sent to: %s" % out_file)
out_table = Table()
out_table.add_column(Column(data=freq, name='FREQUENCY', unit='Hz'))
out_table.add_column(Column(data=phase, name='PHASE_LAG', unit='radian'))
out_table.add_column(Column(data=err_phase, name='PHASE_LAG_ERR',
unit='radian'))
out_table.add_column(Column(data=tlag, name='TIME_LAG', unit='s'))
out_table.add_column(Column(data=err_tlag, name='TIME_LAG_ERR', unit='s'))
out_table.meta['TYPE'] = "Lag-frequency spectrum"
out_table.meta['DATE'] = str(datetime.now())
out_table.meta['CS_DATA'] = out_base + "_cs.fits"
out_table.meta['DT'] = meta_dict['dt']
out_table.meta['DF'] = meta_dict['df']
out_table.meta['N_BINS'] = meta_dict['n_bins']
out_table.meta['SEGMENTS'] = meta_dict['n_seg']
out_table.meta['SEC_SEG'] = meta_dict['n_seconds']
out_table.meta['EXPOSURE'] = meta_dict['exposure']
out_table.meta['DETCHANS'] = meta_dict['detchans']
out_table.meta['RATE_CI'] = ci_mean_rate
out_table.meta['RATE_REF'] = ref_mean_rate
out_table.meta['NYQUIST'] = meta_dict['nyquist']
out_table.write(out_file, overwrite=True)
################################################################################
def bias_term(ci, ref, meta_dict, n_range):
"""
Compute the bias term to be subtracted off the cross spectrum to compute
the covariance spectrum. Equation in Equation in footnote 4 (section 2.1.3,
page 12) of Uttley et al. 2014.
Assumes power spectra are raw (not at all normalized, and not Poisson-noise-
subtracted).
Parameters
----------
ci : Band object
The channel of interest or interest band. ci.power is raw (not
normalized and not Poisson-noise-subtracted), of the frequencies to be
averaged over, with size = 1 (if avg over freq) or n_bins/2+1 (if avg
over energy). ci.mean_rate is in units of cts/s (size = 1 if avg over
energy, size = detchans if avg over freq). Power and frequency
have only the positive (tot en met Nyquist) frequencies.
ref : Band object
The reference band. ref.power is raw (not normalized and not Poisson-
noise-subtracted), of the frequencies to be averaged over, with
size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy).
ref.mean_rate is in units of cts/s. Power and frequency have only the
positive (tot en met Nyquist) frequencies.
meta_dict : dict
Dictionary of meta-parameters needed for analysis.
n_range : int
Number of bins that will be averaged together for the lags. Energy bins
for frequency lags, frequency bins for energy lags.
Returns
-------
n_squared : float
The bias term to be subtracted off the cross spectrum for computing the
covariance spectrum. Equation in footnote 4 (section 2.1.3, page 12) of
Uttley et al. 2014. If you get an undefined error for the bias term,
just set it equal to zero.
"""
## Compute the Poisson noise level in absolute rms units
Pnoise_ref = ref.mean_rate * 2.0
Pnoise_ci = ci.mean_rate * 2.0
## Normalizing power spectra to absolute rms normalization
## Not subtracting the noise (yet)!
abs_ci = ci.power * (2.0 * meta_dict['dt'] / float(n_range))
abs_ref = ref.power * (2.0 * meta_dict['dt'] / float(n_range))
temp_a = (abs_ref - Pnoise_ref) * Pnoise_ci
temp_b = (abs_ci - Pnoise_ci) * Pnoise_ref
temp_c = Pnoise_ref * Pnoise_ci
n_squared = (temp_a + temp_b + temp_c) / (n_range * meta_dict['n_seg'])
return n_squared
################################################################################
def compute_coherence(cross_spec, ci, ref, meta_dict, n_range):
"""
Compute the raw coherence of the cross spectrum. Coherence equation from
Uttley et al 2014 eqn 11, bias term equation from footnote 4 on same page.
Note that if the bias term gets way too wonky or undefined, it's usually
tiny so you can just set it to zero.
Parameters
----------
cross_spec : np.array of complex numbers
1-D array of the cross spectrum, averaged over the desired energy
range or frequency range. Size = detchans (if avg over freq) or
n_bins/2+1 (if avg over energy). Should be raw, not normalized or
noise-subtracted. Eqn 9 of Uttley et al 2014.
ci : Band object
The channel of interest or interest band. ci.power is raw (not
normalized and not Poisson-noise-subtracted), of the frequencies to be
averaged over, with size = 1 (if avg over freq) or n_bins/2+1 (if avg
over energy). ci.mean_rate is in units of cts/s (size = 1 if avg over
energy, size = detchans if avg over freq). Power and frequency
have only the positive (tot en met Nyquist) frequencies.
ref : Band object
The reference band. ref.power is raw (not normalized and not Poisson-
noise-subtracted), of the frequencies to be averaged over, with
size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy).
ref.mean_rate is in units of cts/s. Power and frequency have only the
positive (tot en met Nyquist) frequencies.
meta_dict : dict
Dictionary of meta-parameters needed for analysis.
n_range : int
Number of frequency bins averaged over per new frequency bin for lags.
For energy lags, this is the number of frequency bins averaged over. For
frequency lags not re-binned in frequency, this is 1. Same as K in
equations in Section 2 of Uttley et al. 2014.
Returns
-------
coherence : np.array of floats
The raw coherence of the cross spectrum. (Uttley et al 2014, eqn 11)
Size = n_bins/2+1 (if avg over energy) or detchans (if avg over freq).
"""
cs_bias = bias_term(ci, ref, meta_dict, n_range)
powers = ci.power * ref.power
crosses = cross_spec * np.conj(cross_spec) - cs_bias
with np.errstate(all='ignore'):
coherence = np.where(powers != 0, crosses / powers, 0)
# print("Coherence shape:", np.shape(coherence))
# print(coherence)
return np.real(coherence)
################################################################################
def get_phase_err(cs_avg, ci, ref, meta_dict, n_range):
"""
Compute the error on the complex phase (in radians) via the coherence.
Power should NOT be Poisson-noise-subtracted or normalized.
Parameters
----------
cs_avg : np.array of complex numbers
1-D array of the raw cross spectrum, averaged over Fourier segments and
energy channels or frequency bins.
Size = detchans (if avg over freq) or n_bins/2+1 (if avg over energy).
Eqn 9 of Uttley et al 2014.
ci : Band object
The channel of interest or interest band. ci.power is raw (not
normalized and not Poisson-noise-subtracted), of the frequencies to be
averaged over, with size = 1 (if avg over freq) or n_bins/2+1 (if avg
over energy). ci.mean_rate is in units of cts/s (size = 1 if avg over
energy, size = detchans if avg over freq). Power and frequency
have only the positive (tot en met Nyquist) frequencies.
ref : Band object
The reference band. ref.power is raw (not normalized and not Poisson-
noise-subtracted), of the frequencies to be averaged over, with
size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy).
ref.mean_rate is in units of cts/s. Power and frequency have only the
positive (tot en met Nyquist) frequencies.
meta_dict :
Dictionary of meta-paramters needed for analysis.
n_range : int
Number of frequency bins averaged over per new frequency bin for lags.
For energy lags, this is the number of frequency bins averaged over. For
frequency lags not re-binned in frequency, this is 1. Same as K in
equations in Section 2 of Uttley et al. 2014.
Returns
-------
phase_err : np.array of floats
1-D array of the error on the phase of the lag.
"""
# print("Pow ci:", np.shape(power_ci))
# print("Pow ref:", np.shape(power_ref))
# print("Pow cs:", np.shape(cs_avg))
coherence = compute_coherence(cs_avg, ci, ref, meta_dict, n_range)
with np.errstate(all='ignore'):
phase_err = np.sqrt(np.where(coherence != 0, (1 - coherence) /
(2 * coherence * n_range * meta_dict['n_seg']), 0))
return phase_err
################################################################################
def phase_to_tlags(phase, f):
"""
Convert a complex-plane cross-spectrum phase (in radians) to a time lag
(in seconds).
Parameters
----------
phase : float or np.array of floats
1-D array of the phase of the lag, in radians.
f : float or np.array of floats
1-D array of the Fourier frequency of the cross-spectrum, in Hz.
Returns
-------
tlags : float or np.array of floats
1-D array of the time of the lag, in seconds.
"""
if np.shape(phase) != np.shape(f):
## Reshaping (broadcasting) f to have same size as phase
f = np.resize(np.repeat(f, np.shape(phase)[1]), np.shape(phase))
assert np.shape(phase) == np.shape(f), "ERROR: Phase array must have same "\
"dimensions as frequency array."
with np.errstate(all='ignore'):
tlags = np.where(f != 0, phase / (2.0 * np.pi * f), 0)
return tlags
################################################################################
def main(interest_file, ref_file, out_base="./out", n_seconds=64, test=False):
"""
Read in two extracted light curves (interest band and reference band),
split into segments, compute the power spectra per band and cross spectrum
of each segment, averages cross spectrum of all the segments, and computes
frequency lags between the two bands.
Parameters
----------
interest_file : str
The name of the .lc extracted light curve for the interest band.
ref_file : str
The name of the .lc extracted light curve for the reference band.
out_base : str, default = "out"
The base name to save output to. The extension will be appended to the
end.
n_seconds : int, default = 64
Number of seconds in each Fourier segment. Must be a power of 2,
positive.
test : bool, default = False
If true, only computes one segment of data. If false, runs like normal.
"""
assert n_seconds > 0, "ERROR: Number of seconds per segment must be a "\
"positive integer."
try:
t_res = float(get_key_val(interest_file, 0, 'TIMEDEL'))
except KeyError:
t_res = float(get_key_val(interest_file, 1, 'TIMEDEL'))
try:
t_res_ref = float(get_key_val(ref_file, 0, 'TIMEDEL'))
except KeyError:
t_res_ref = float(get_key_val(ref_file, 1, 'TIMEDEL'))
assert t_res == t_res_ref, "ERROR: Interest band and reference band have "\
"different time binnings. Code cannot currently cope with that."
meta_dict = {'dt': t_res, ## assumes light curves are binned to desired
## resolution already
't_res': t_res,
'n_seconds': n_seconds,
'df': 1.0 / np.float(n_seconds),
'nyquist': 1.0 / (2.0 * t_res),
'n_bins': n_seconds * int(1.0 / t_res),
'detchans': 1, ## only using 1 interest band
'exposure': 0, ## will be computed later
'n_seg': 0} ## will be computed later
## Read in from the light curve files, compute power spectra and cross
## spectrum
total_cross, total_ci, total_ref, total_seg = lc_in(interest_file, ref_file,
meta_dict)
## Assign n_seg and exposure in meta_dict
meta_dict['n_seg'] = total_seg
meta_dict['exposure'] = meta_dict['dt'] * meta_dict['n_bins'] * \
meta_dict['n_seg']
## Turn running totals into averages
total_cross /= np.float(meta_dict['n_seg'])
total_ci.power /= np.float(meta_dict['n_seg'])
total_ci.mean_rate /= np.float(meta_dict['n_seg'])
total_ref.power /= np.float(meta_dict['n_seg'])
total_ref.mean_rate /= np.float(meta_dict['n_seg'])
## Only keeping the parts associated with positive Fourier frequencies
## numpy arrays slice at end-1, and we want to include 'nyq_index';
## for frequency, abs is because the nyquist freq is both pos and neg, and
## we want it pos here.
nyq_index = meta_dict['n_bins'] / 2
total_cross = total_cross[0:nyq_index + 1]
total_ci.power = total_ci.power[0:nyq_index + 1]
total_ci.freq = np.abs(total_ci.freq[0:nyq_index + 1])
total_ref.power = total_ref.power[0:nyq_index + 1]
total_ref.freq = np.abs(total_ref.freq[0:nyq_index + 1])
## Compute the variance and rms of the absolute-rms-normalized reference
## band power spectrum
absrms_ref_pow = raw_to_absrms(total_ref.power, total_ref.mean_rate,
meta_dict['n_bins'], meta_dict['dt'],
noisy=True)
total_ref.var, total_ref.rms = var_and_rms(absrms_ref_pow, meta_dict['df'])
## Save cross spectrum and power spectra to "*_cs.fits"
cs_out(out_base, meta_dict, total_cross, total_ci, total_ref)
## Computing frequency lags
## Negative sign is so that a positive lag is a hard energy lag
phase = -np.arctan2(total_cross.imag, total_cross.real)
# print(np.shape(phase))
err_phase = get_phase_err(total_cross, total_ci, total_ref, meta_dict, 1)
# print(np.shape(err_phase))
tlag = phase_to_tlags(phase, total_ci.freq)
err_tlag = phase_to_tlags(err_phase, total_ci.freq)
## Save lag-frequency spectrum to "*_lag-freq.fits"
freq_lag_out(out_base, meta_dict, total_ci.freq, phase, err_phase, tlag,
err_tlag, total_ci.mean_rate, total_ref.mean_rate)
################################################################################
if __name__ == "__main__":
#########################################
## Parse input arguments and call 'main'
#########################################
parser = argparse.ArgumentParser(usage="python simple_cross_spectra.py "
"interest_band_file reference_band_file [OPTIONAL ARGUMENTS]",
description=__doc__,
epilog="For optional arguments, default values are given in "\
"brackets at end of description.")
parser.add_argument('interest_band_file', help="The .lc background-"\
"subtracted extracted light curve for the interest band.")
parser.add_argument('reference_band_file', help="The .lc background-"\
"subtracted extracted light curve for the reference band. Assumes "\
"it has the same time binning as the interest band.")
parser.add_argument('-o', '--out', default="./out", dest='outbase',
help="The base name for output files. Extension will be"
" appended. [./out]")
parser.add_argument('-n', '--n_sec', type=type_power_of_two, default=64,
dest='n_seconds', help="Number of seconds in each "
"Fourier segment. Must be a "
"power of 2, positive, integer. "
"[64]")
parser.add_argument('--test', type=int, default=0, choices={0,1},
dest='test', help="Int flag: 0 if computing all "
"segments, 1 if only computing one "
"segment for testing. [0]")
args = parser.parse_args()
test = False
if args.test == 1:
test = True
main(args.interest_band_file, args.reference_band_file,
out_base=args.outbase, n_seconds=args.n_seconds, test=test)
| mit | -7,282,821,186,421,364,000 | 35.090047 | 80 | 0.588969 | false |
mahyarap/httpclient | httpclient/httpclient.py | 1 | 1653 | #!/usr/bin/env python3
import sys
import argparse
from httpclient.http import HttpRequest
__version__ = '0.1.0'
def parse_cmd_options(args):
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('url', help='URL to send the request to')
parser.add_argument('-m', '--method',
default='GET',
help='HTTP request method')
parser.add_argument('-H', '--header',
action='append',
default=[],
help='HTTP headers')
parser.add_argument('-v', '--verbose',
action='store_true',
help='be verbose')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s {}'.format(__version__),
help='show version and exit')
return parser.parse_args(args)
def main():
args = parse_cmd_options(sys.argv[1:])
if args.url:
headers = {}
for header in args.header:
key, val = header.split(':', maxsplit=1)
headers[key.strip()] = val.strip()
request = HttpRequest(args.url, method=args.method,
headers=headers)
response = request.send()
if args.verbose:
print(str(request))
if request.body is not None:
print(str(request.body))
print(str(response))
if response.body is not None:
print(response.body, end='')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 8,312,854,801,826,824,000 | 28 | 66 | 0.503932 | false |
mackal/faction.py | faction3.py | 1 | 35996 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Michael Cook <[email protected]>
#
# GPLv3
"""
Processes an eqlog file and generates SQL to update factions
Should work with a full log, but cleaning up the log will be quicker
The file needs at least the zone enter messages, faction messages,
and slain messages in their full to work
IMPORTANT: faction messages from non-kills should be filtered out ...
File prep:
I just did a $ grep 'faction\\|slain\\|entered' on the log file
to clean up the log for processing
"""
import re, sys, os
import collections
# str to str so we don't have to worry about string cat
factiontable = {
"Agents of Dreadspire": "396",
"Agents of Mistmoore": "1",
"Agnarr": "2",
"Ak'Anon Strike Force V": "497",
"Akheva": "3",
"Allize Taeew": "4",
"Allize Volew": "5",
"Ancestors of the Crypt": "499",
"Ancestors of Valdeholm": "498",
"Anchorites of Brell Serilis": "6",
"Ancient Cyclops": "481",
"Ankhefenmut": "397",
"Anti-mage": "8",
"Antonius Bayle": "9",
"Arboreans of Faydark": "10",
"Arcane Scientists": "11",
"Army of Light": "494",
"Ashen Order": "12",
"Askr the Lost": "13",
"Aviak": "14",
"Banker": "15",
"Battalion of Marr": "16",
"Beetle": "457",
"Befallen Inhabitants": "17",
"Bertoxxulous": "382",
"Beta Neutral": "18",
"Betrayers of Di`Zok": "19",
"Bloodgills": "20",
"Bloodsabers": "21",
"Broken Skull Clan": "22",
"Brood of Di`Zok": "23",
"Brood of Kotiz": "24",
"Brood of Ssraeshza": "25",
"Brownie": "26",
"Burning Dead": "27",
"Burynai Legion": "28",
"Butcherblock Bandits": "29",
"Cabilis Residents": "30",
"Carson McCabe": "31",
"Cazic Thule": "368",
"Chetari": "32",
"Children of Dranik": "398",
"Circle Of Unseen Hands": "33",
"Citizens of Froststone": "399",
"Citizens of Gukta": "35",
"Citizens of Qeynos": "36",
"Citizens of Seru": "37",
"Citizens of Sharvahl": "483",
"Citizens of Takish-Hiz": "38",
"Clan Grikbar": "39",
"Clan Kolbok": "40",
"Clan Runnyeye": "41",
"Class 41": "377",
"Claws of Veeshan": "42",
"Cleaving Tooth Clan": "383",
"Clerics of Tunare": "43",
"Clerics of Underfoot": "44",
"Clockwork Gnome": "45",
"Clurg": "46",
"Coalition of Tradefolk": "47",
"Coalition of TradeFolk III": "369",
"Coalition of Tradefolk Underground": "48",
"Coldain": "49",
"Combine Empire": "50",
"Commons Residents": "51",
"Concillium Universus": "52",
"Corrupt Qeynos Guards": "53",
"Coterie Elite": "54",
"Coterie of the Eternal Night": "55",
"Craftkeepers": "56",
"Craknek Warriors": "57",
"Creatures of Darkhollow": "400",
"Creatures of Gloomingdeep": "401",
"Creatures of Justice": "58",
"Creatures of Taelosia": "59",
"Creep Reapers": "402",
"Crescent Guards": "493",
"Crimson Hands": "60",
"Critters of Jaggedpine": "61",
"Crusaders of Greenmist": "62",
"Crushbone Orcs": "63",
"Crystal Caverns Terrors/Spiders/Crawlers": "395",
"Cult of the Arisen": "64",
"Cult of the Great Saprophyte": "65",
"Cursed Drakes": "403",
"DaBashers": "66",
"Dain Frostreaver IV": "67",
"Dar Khura": "68",
"Dark Bargainers": "69",
"Dark Ones": "70",
"Dark Reflection": "71",
"Dark Reign": "404",
"Dark Sendings": "72",
"Darkpaws of Jaggedpine": "73",
"Dawnhoppers": "74",
"Death Fist Orcs": "405",
"Deathfist Orcs": "75",
"Deep Muses": "76",
"Deep Sporali": "406",
"Deeppockets": "77",
"Deepshade Collective": "78",
"Deepwater Knights": "79",
"Defective Clockwork": "80",
"Defenders of the Broodlands": "407",
"Defenders of the Haven": "81",
"Deklean Korgad": "408",
"Denizens of Discord": "409",
"Denizens of Fear": "82",
"Denizens of Mischief": "391",
"Dervish Cutthroats": "83",
"Disciples of Kerafyrm": "84",
"Disciples of Rhag`Zadune": "85",
"Dismal Rage": "86",
"Dranik Loyalists": "410",
"Dreadguard Inner": "87",
"Dreadguard Outer": "88",
"Drusella Sathir": "89",
"Dulaks Clan": "459",
"Ebon Mask": "90",
"Eldritch Collective": "91",
"Elementals": "374",
"Emerald Warriors": "92",
"Emperor Ssraeshza": "93",
"Erudite Citizen": "380",
"EvilEye": "94",
"Exiled Frogloks": "95",
"Expedition 328": "411",
"Eye of Seru": "96",
"Faerie": "97",
"Fallen Guard of Illsalin": "412",
"Fallen of Bloody Kithicor": "98",
"Faydarks Champions": "99",
"FelGuard": "100",
"Firiona Vie": "101",
"Fizzlethorp": "414",
"Fizzlethorpe": "102",
"Followers of Korucust": "103",
"Forgotten Guktan Spirits": "104",
"Free Traders of Malgrinnor": "415",
"The Freeport Militia": "105",
"Frogloks of Guk": "106",
"Frogloks of Krup": "107",
"Frogloks of Kunark": "108",
"Frogloks of Sebilis": "109",
"Frostfoot Goblins": "110",
"FungusMan": "111",
"Gate Callers": "112",
"Gate Keepers": "113",
"Gelistial": "114",
"Gem Choppers": "115",
"Geonid Collective": "116",
"Ghouls of Neriak": "117",
"Giant Spider": "386",
"Gladiators of Mata Muram": "416",
"Goblin": "118",
"Goblins of Cleaving Tooth": "119",
"Goblins of Fire Peak": "120",
"Goblins of Mountain Death": "121",
"Gor Taku": "122",
"Gralloks": "123",
"Greater Brann Giants": "124",
"Greater Jord Giants": "125",
"Greater Vann Giants": "126",
"Greater Vind Giants": "127",
"Green Blood Knights": "128",
"Greenfoot Goblins": "417",
"Grieg": "129",
"Grimlings of the Forest": "392",
"Grimlings of the Moor": "130",
"Grobb Merchants": "131",
"Guardians of Shar Vahl": "132",
"Guardians of the Vale": "133",
"Guardians of Veeshan": "134",
"Guards of Gloomingdeep": "475",
"Guards of Qeynos": "135",
"Guktan Elders": "136",
"Guktan Suppliers": "484",
"Gunthaks Clan": "458",
"Hall of the Ebon Mask": "137",
"Hand Legionnaries": "138",
"Hand of Seru": "139",
"Harbingers Clan": "373",
"Haven Defenders": "140",
"Haven Smugglers": "141",
"Heart of Seru": "142",
"Heretics": "143",
"Hexxt": "144",
"High Council of Erudin": "145",
"High Council of Gukta": "146",
"High Guard of Erudin": "147",
"HighHold Citizens": "148",
"Highpass Guards": "149",
"HoHMaiden": "471",
"Holgresh": "150",
"Horde of Xalgoz": "151",
"House of Fordel": "152",
"House of Midst": "153",
"House of Stout": "154",
"Iksar": "371",
"Indifferent": "463",
"Indigo Brotherhood": "155",
"Inhabitants of Air": "464",
"Inhabitants of Firiona Vie": "418",
"Inhabitants of Hate": "156",
"Inhabitants of Tanaan": "157",
"Innoruuk's Curse of the Cauldron": "158",
"Invaders of the Moor": "503",
"Jaggedpine Treefolk": "159",
"Jaled-Dar": "160",
"Johanius Barleou": "161",
"Kaladim Citizens": "162",
"Kaladim Merchants": "419",
"Kane Bayle": "164",
"Karana": "165",
"Karana Bandits": "166",
"Karana Residents": "167",
"Katta Castellum Citizens": "168",
"Kazon Stormhammer": "169",
"Kedge": "420",
"Keepers of the Art": "170",
"Keepers of the Claw": "171",
"Kejek Village": "172",
"Kejekan": "173",
"Kelethin Merchants": "174",
"Kerra": "421",
"Kerra Isle": "175",
"Kessdona": "422",
"Khati Sha": "423",
"King Ak'Anon": "176",
"King Aythox Thex": "379",
"King Naythox Thex": "177",
"King Tearis Thex": "178",
"King Tormax": "179",
"King Xorbb": "180",
"Kingdom of Above and Below": "181",
"Kithicor Residents": "182",
"Knights of Thunder": "183",
"Knights of Truth": "184",
"Kobold": "185",
"Kobolds of Fire Pit": "186",
"Kobolds of Gloomingdeep": "424",
"Koka'Vor Tribe": "501",
"KOS": "366",
"KOS Inhabitants of Air": "465",
"KOS Plane of Disease": "466",
"KOS Plane of Innovation": "468",
"KOS Plane of Nightmare": "467",
"KOS Plane of Storms": "489",
"KOS Plane of Time": "469",
"KOS_animal": "367",
"Krag": "187",
"Kromrif": "188",
"Kromzek": "189",
"Kunark Fire Giants": "190",
"Lake Recondite Bandits": "191",
"Lanys T`Vyl": "425",
"League of Antonican Bards": "192",
"Legion of Cabilis": "193",
"Legion of Mata Muram": "194",
"Lesser Brann Giants": "195",
"Lesser Jord Giants": "196",
"Lesser Vann Giants": "197",
"Lesser Vind Giants": "198",
"Lithiniath": "199",
"Lizard Man": "200",
"Lodikai": "201",
"Lorekeepers of Gukta": "202",
"Lost Kingdom of Lok": "203",
"Lost Minions of Miragul": "204",
"Loyals": "454",
"Luclin": "205",
"Madmen": "480",
"Magus Conlegium": "206",
"Mayong Mistmoore": "207",
"Mayor Gubbin": "208",
"Meldrath": "209",
"Merchants of Ak'Anon": "210",
"Merchants of Erudin": "211",
"Merchants of Felwithe": "212",
"Merchants of Halas": "213",
"Merchants of Highpass": "214",
"Merchants of Kaladim": "215",
"Merchants of Ogguk": "216",
"Merchants of Qeynos": "217",
"Merchants of Rivervale": "218",
"Mermaid": "426",
"Mermaids": "375",
"Miners Guild 249": "219",
"Miners Guild 628": "220",
"Minions of Scale": "221",
"Minions of the Sunlord": "222",
"Minions of Tirranun": "427",
"Minions of Underfoot": "223",
"Mountain Death Clan": "384",
"Mucktail Gnolls": "224",
"Murrissa Sandwhisper": "372",
"Nadox Clan": "472",
"Nadox Initiate": "225",
"Nagafen": "226",
"Najena": "227",
"Nathyn Illuminious": "228",
"Needlite": "460",
"Neriak Merchants": "486",
"Neriak Ogre": "378",
"Neriak Trolls": "229",
"Nest Guardians": "428",
"New Alliance of Stone": "230",
"Nihil": "231",
"Nitram": "474",
"Noobie Monsters KOS to Guards": "394",
"Norrath's Keepers": "429",
"Oggok Citizens": "233",
"Oggok Guards": "232",
"Ogguk Residents": "430",
"Ogre": "431",
"Ogre Warriors": "234",
"OmensBatRat": "485",
"OmensMurks": "487",
"Opal Dark Briar": "235",
"Oracle of Karnon": "236",
"Oracle of Marud": "237",
"Orc": "238",
"Order of Autarkic Umbrage": "239",
"Order of Three": "240",
"Orphans": "452",
"Othmir": "241",
"Outcasts and Mutants": "242",
"Overlord Mata Muram": "432",
"Owlbears of the Moor": "505",
"Pack of Tomar": "243",
"Paebala": "244",
"Paladins of Gukta": "245",
"Paladins of Underfoot": "246",
"Paludal_Mushrooms": "490",
"Paludal_Underbulk": "491",
"Peace Keepers": "247",
"Phingel Autropos": "433",
"Phinigel Autropos": "248",
"Pickclaw Goblins": "249",
"Pirates of Gunthak": "250",
"Pirates of Iceclad": "251",
"Pirates of the Pine": "252",
"Pixie": "253",
"Pixtt": "254",
"Planar Collective": "455",
"Planes_Neutral": "488",
"Prexuz": "255",
"Priests of Innoruuk": "256",
"Priests of Life": "257",
"Priests of Marr": "258",
"Priests of Mischief": "259",
"Primordial Malice": "260",
"Prisoners of Justice": "261",
"Progeny": "262",
"Protectors of Growth": "263",
"Protectors of Gukta": "264",
"Protectors of Pine": "265",
"Qeynos Citizens": "434",
"QRG Protected Animals": "267",
"Queen Cristanos Thex": "268",
"Rallos Zek": "269",
"Rav": "270",
"Residents of Gloomingdeep": "476",
"Residents of Jaggedpine": "271",
"Residents of Karanas": "272",
"Riftseekers": "435",
"Rikkukin": "436",
"Ring of Scale": "273",
"Riptide Goblins": "274",
"Rogues of the White Rose": "275",
"Root of Innuruuk": "276",
"Rujarkian Slavers": "277",
"Rygorr Clan Snow Orcs": "278",
"Sabertooths of Blackburrow": "279",
"Sandworkers": "280",
"Sarnak Collective": "281",
"Scaled Mystics": "282",
"Scions of Dreadspire": "437",
"Scorchclaw Goblins": "438",
"Seru": "284",
"Servants of Aero": "285",
"Servants of Hydro": "286",
"Servants of Inferno": "287",
"Servants of Saryrn": "288",
"Servants of Terra": "289",
"Servants of Tunare": "290",
"Shadowed Men": "291",
"Shadowknights of Night Keep": "292",
"Shak Dratha": "293",
"Shamen of Justice": "294",
"Shamen of War": "295",
"Shei Vinitras": "296",
"Shik Nar": "297",
"Shoulders of Seru": "298",
"Shralok Orcs": "299",
"Silent Fist Clan": "300",
"Silla Herald": "496",
"Sirens of the Grotto": "301",
"Sky Talons": "439",
"Skytalons": "302",
"Snowfang Gnolls": "303",
"Soldiers of Tunare": "304",
"Solusek Mining Co": "305",
"Song Weavers": "306",
"Spider": "500",
"Spire Spirits": "388",
"Spirits of Katta Castellum": "307",
"Spirocs of Timorous": "308",
"Splitpaw Clan": "309",
"Sporali": "310",
"Sporali Collective": "440",
"Steel Warriors": "311",
"Steelslaves": "312",
"Stillmoon Acolytes": "441",
"Stone Hive Bixies": "313",
"Storm Guard": "314",
"Storm Guardians": "315",
"Storm Reapers": "316",
"Sustainers": "453",
"Swamp Giants of Kunark": "370",
"Swift Tails": "317",
"Syrik Iceblood": "318",
"Tarmok Tribe": "390",
"Taruun": "319",
"Temple Of Sol Ro": "442",
"Temple of Solusek Ro": "320",
"The Bloodtribe": "389",
"The Cral Ligi Clan": "321",
"The Dark Alliance": "443",
"The Dead": "322",
"The Forsaken": "323",
"The Grol Baku Clan": "324",
"The Guardians": "444",
"The HotWingz": "325",
"The Kromdek": "326",
"The Kromdul": "327",
"The Rainkeeper": "328",
"The Recuso": "329",
"The Sambata Tribe": "330",
"The Spurned": "331",
"The Tro Jeg Clan": "332",
"The Truth": "333",
"The Vas Ren Clan": "334",
"The_Angry_Sambata": "492",
"Thought Leeches": "335",
"Thrall of Kly": "336",
"Thunder Guardians": "445",
"Tirranun": "446",
"TizmakClan": "337",
"Traders of the Haven": "338",
"Trakanon": "339",
"Treants of Jaggedpine": "340",
"Tribe Vrodak": "341",
"True Spirit": "342",
"Trusik Tribe": "447",
"Tserrina Syl'Tor": "343",
"Tunare's Scouts": "283",
"Tunarean Court": "344",
"Ulthork": "345",
"Undead Frogloks of Guk": "346",
"Undead Residents of Kithicor": "381",
"Underbulks": "461",
"Unkempt Druids": "347",
"Unrest Inhabitants": "376",
"VahShir Crusaders": "348",
"Valdanov Zevfeer": "349",
"Validus Custodus": "350",
"Veeshan": "351",
"Velketor": "352",
"Venril Sathir": "353",
"Verish Mal": "456",
"VillagerRoom": "482",
"Vishimtar": "448",
"Volkara": "449",
"Volkara's Brood": "450",
"Vornol Transon": "354",
"Vox": "355",
"Warlord Ngrub": "473",
"Wayfarers Brotherhood": "356",
"WehateThelin": "470",
"Werewolf": "357",
"Whisperling": "358",
"Whistling Fist Brotherhood": "359",
"Wisps": "462",
"Witnesses of Hate": "393",
"Wizards of Gukta": "360",
"Wolves of the Moor": "504",
"Wolves of the North": "361",
"Yar`lir": "451",
"Yelinak": "362",
"Yunjo Slave Resistance": "363",
"Zazamoukh": "364",
"Zlandicar": "365",
"Zordakalicus Ragefire": "385",
"Zun'Muram": "502",
"Human": "506",
"Donovon":"507",
}
# There are some duplicate keys here, too lazy for now ..
zonetable = {
"The Abysmal Sea": 279,
"The Acrylia Caverns": 154,
"The Plane of Sky": 71,
"Ak'Anon": 55,
"The Akheva Ruins": 179,
"Anguish, the Fallen Palace": 317,
"Designer Apprentice": 999,
"Arcstone, Isle of Spirits": 369,
"The Arena": 77,
"The Arena Two": 180,
"Art Testing Domain": 996,
"Ashengate, Reliquary of the Scale": 406,
"Jewel of Atiiki": 418,
"Aviak Village": 53,
"Barindu, Hanging Gardens": 283,
"Barren Coast": 422,
"The Barter Hall": 346,
"The Bazaar": 151,
"Befallen": 36,
"Befallen": 411,
"The Gorge of King Xorbb": 16,
"Temple of Bertoxxulous": 469,
"Blackburrow": 17,
"Blacksail Folly": 428,
"The Bloodfields": 301,
"Bloodmoon Keep": 445,
"Bastion of Thunder": 209,
"The Broodlands": 337,
"The Buried Sea": 423,
"The Burning Wood": 87,
"Butcherblock Mountains": 68,
"Cabilis East": 106,
"Cabilis West": 82,
"Dagnor's Cauldron": 70,
"Nobles' Causeway": 303,
"Accursed Temple of CazicThule": 48,
"Muramite Proving Grounds": 304,
"Muramite Proving Grounds": 305,
"Muramite Proving Grounds": 306,
"Muramite Proving Grounds": 307,
"Muramite Proving Grounds": 308,
"Muramite Proving Grounds": 309,
"The Howling Stones": 105,
"Chardok": 103,
"Chardok: The Halls of Betrayal": 277,
"The City of Mist": 90,
"Loading": 190,
"Cobaltscar": 117,
"The Crypt of Decay": 200,
"The Commonlands": 408,
"West Commonlands": 21,
"Corathus Creep": 365,
"Sporali Caverns": 366,
"The Corathus Mines": 367,
"Crescent Reach": 394,
"Crushbone": 58,
"Crypt of Shade": 449,
"The Crystal Caverns": 121,
"Crystallos, Lair of the Awakened": 446,
"Sunset Home": 26,
"The Crypt of Dalnir": 104,
"The Dawnshroud Peaks": 174,
"Deadbone Reef": 427,
"Lavaspinner's Lair": 341,
"Tirranun's Delve": 342,
"The Seething Wall": 373,
"The Devastation": 372,
"Direwind Cliffs": 405,
"Korafax, Home of the Riders": 470,
"Citadel of the Worldslayer": 471,
"The Hive": 354,
"The Hatchery": 355,
"The Cocoons": 356,
"Queen Sendaii`s Lair": 357,
"Dragonscale Hills": 442,
"Deepscar's Den": 451,
"The Ruined City of Dranik": 336,
"Catacombs of Dranik": 328,
"Catacombs of Dranik": 329,
"Catacombs of Dranik": 330,
"Dranik's Hollows": 318,
"Dranik's Hollows": 319,
"Dranik's Hollows": 320,
"Sewers of Dranik": 331,
"Sewers of Dranik": 332,
"Sewers of Dranik": 333,
"Dranik's Scar": 302,
"The Dreadlands": 86,
"Dreadspire Keep": 351,
"The Temple of Droga": 81,
"Dulak's Harbor": 225,
"Eastern Plains of Karana": 15,
"The Undershore": 362,
"Snarlstone Dens": 363,
"Eastern Wastes": 116,
"The Echo Caverns": 153,
"East Commonlands": 22,
"The Elddar Forest": 378,
"Tunare's Shrine": 379,
"The Emerald Jungle": 94,
"Erudin": 24,
"The Erudin Palace": 23,
"Erud's Crossing": 98,
"Marauders Mire": 130,
"Everfrost Peaks": 30,
"The Plane of Fear": 72,
"The Feerrott": 47,
"Northern Felwithe": 61,
"Southern Felwithe": 62,
"Ferubi, Forgotten Temple of Taelosia": 284,
"The Forgotten Halls": 998,
"The Field of Bone": 78,
"Firiona Vie": 84,
"Academy of Arcane Sciences": 385,
"Arena": 388,
"City Hall": 389,
"East Freeport": 382,
"Hall of Truth: Bounty": 391,
"Freeport Militia House: My Precious": 387,
"Freeport Sewers": 384,
"Temple of Marr": 386,
"Theater of the Tranquil": 390,
"West Freeport": 383,
"East Freeport": 10,
"North Freeport": 8,
"West Freeport": 9,
"Frontier Mountains": 92,
"Frostcrypt, Throne of the Shade King": 402,
"The Tower of Frozen Shadow": 111,
"The Fungus Grove": 157,
"The Greater Faydark": 54,
"The Great Divide": 118,
"Grieg's End": 163,
"Grimling Forest": 167,
"Grobb": 52,
"The Plane of Growth": 127,
"The Mechamatic Guardian": 447,
"Guild Hall": 345,
"Guild Lobby": 344,
"Deepest Guk: Cauldron of Lost Souls": 229,
"The Drowning Crypt": 234,
"The Ruins of Old Guk": 66,
"Deepest Guk: Ancient Aqueducts": 239,
"The Mushroom Grove": 244,
"Deepest Guk: The Curse Reborn": 249,
"Deepest Guk: Chapel of the Witnesses": 254,
"The Root Garden": 259,
"Deepest Guk: Accursed Sanctuary": 264,
"The City of Guk": 65,
"The Gulf of Gunthak": 224,
"Gyrospire Beza": 440,
"Gyrospire Zeka": 441,
"Halas": 29,
"Harbinger's Spire": 335,
"Plane of Hate": 76,
"The Plane of Hate": 186,
"Hate's Fury": 228,
"High Keep": 6,
"Highpass Hold": 5,
"Highpass Hold": 407,
"HighKeep": 412,
"Hills of Shade": 444,
"The Halls of Honor": 211,
"The Temple of Marr": 220,
"The Hole": 39,
"Hollowshade Moor": 166,
"The Iceclad Ocean": 110,
"Icefall Glacier": 400,
"Ikkinz, Chambers of Transcendence": 294,
"Ruins of Illsalin": 347,
"Illsalin Marketplace": 348,
"Temple of Korlach": 349,
"The Nargil Pits": 350,
"Inktu'Ta, the Unmasked Chapel": 296,
"Innothule Swamp": 46,
"The Innothule Swamp": 413,
"The Jaggedpine Forest": 181,
"Jardel's Hook": 424,
"Kael Drakkel": 113,
"Kaesora": 88,
"South Kaladim": 60,
"North Kaladim": 67,
"Karnor's Castle": 102,
"Katta Castellum": 160,
"Katta Castrum": 416,
"Kedge Keep": 64,
"Kerra Isle": 74,
"Kithicor Forest": 410,
"Kithicor Forest": 20,
"Kod'Taz, Broken Trial Grounds": 293,
"Korascian Warrens": 476,
"Kurn's Tower": 97,
"Lake of Ill Omen": 85,
"Lake Rathetear": 51,
"The Lavastorm Mountains": 27,
"Mons Letalis": 169,
"The Lesser Faydark": 57,
"Loading Zone": 184,
"New Loading Zone": 185,
"Loping Plains": 443,
"The Maiden's Eye": 173,
"Maiden's Grave": 429,
"Meldrath's Majestic Mansion": 437,
"Fortress Mechanotus": 436,
"Goru`kar Mesa": 397,
"Miragul's Menagerie: Silent Gallery": 232,
"Miragul's Menagerie: Frozen Nightmare": 237,
"The Spider Den": 242,
"Miragul's Menagerie: Hushed Banquet": 247,
"The Frosted Halls": 252,
"The Forgotten Wastes": 257,
"Miragul's Menagerie: Heart of the Menagerie": 262,
"The Morbid Laboratory": 267,
"The Theater of Imprisoned Horror": 271,
"Miragul's Menagerie: Grand Library": 275,
"The Plane of Mischief": 126,
"The Castle of Mistmoore": 59,
"Misty Thicket": 33,
"The Misty Thicket": 415,
"Mistmoore's Catacombs: Forlorn Caverns": 233,
"Mistmoore's Catacombs: Dreary Grotto": 238,
"Mistmoore's Catacombs: Struggles within the Progeny": 243,
"Mistmoore's Catacombs: Chambers of Eternal Affliction": 248,
"Mistmoore's Catacombs: Sepulcher of the Damned": 253,
"Mistmoore's Catacombs: Scion Lair of Fury": 258,
"Mistmoore's Catacombs: Cesspits of Putrescence": 263,
"Mistmoore's Catacombs: Aisles of Blood": 268,
"Mistmoore's Catacombs: Halls of Sanguinary Rites": 272,
"Mistmoore's Catacombs: Infernal Sanctuary": 276,
"Monkey Rock": 425,
"Blightfire Moors": 395,
"Marus Seru": 168,
"The Crypt of Nadox": 227,
"Najena": 44,
"Natimbi, the Broken Shores": 280,
"Dragon Necropolis": 123,
"Nedaria's Landing": 182,
"Nektropos": 28,
"The Nektulos Forest": 25,
"Shadowed Grove": 368,
"Neriak - Foreign Quarter": 40,
"Neriak - Commons": 41,
"Neriak - 3rd Gate": 42,
"Neriak Palace": 43,
"Netherbian Lair": 161,
"Nexus": 152,
"The Lair of Terris Thule": 221,
"The Northern Plains of Karana": 13,
"North Desert of Ro": 392,
"Northern Desert of Ro": 34,
"The Mines of Nurga": 107,
"Oasis of Marr": 37,
"Oceangreen Hills": 466,
"Oceangreen Village": 467,
"The Ocean of Tears": 409,
"Oggok": 49,
"BlackBurrow": 468,
"Old Bloodfields": 472,
"Old Commonlands": 457,
"City of Dranik": 474,
"Field of Scale": 452,
"Highpass Hold": 458,
"Kaesora Library": 453,
"Kaesora Hatchery": 454,
"Bloody Kithicor": 456,
"Kurn's Tower": 455,
"Ocean of Tears": 69,
"The Overthere": 93,
"Paineel": 75,
"The Paludal Caverns": 156,
"The Lair of the Splitpaw": 18,
"The Permafrost Caverns": 73,
"The Plane of Air": 215,
"The Plane of Disease": 205,
"The Plane of Earth": 218,
"The Plane of Earth": 222,
"The Plane of Fire": 217,
"The Plane of Innovation": 206,
"The Plane of Justice": 201,
"The Plane of Knowledge": 202,
"The Plane of Nightmares": 204,
"The Plane of Storms": 210,
"Drunder, the Fortress of Zek": 214,
"The Plane of Time": 219,
"The Plane of Time": 223,
"Torment, the Plane of Pain": 207,
"The Plane of Tranquility": 203,
"The Plane of Valor": 208,
"Plane of War": 213,
"The Plane of Water": 216,
"The Precipice of War": 473,
"Muramite Provinggrounds": 316,
"The Qeynos Aqueduct System": 45,
"The Western Plains of Karana": 12,
"South Qeynos": 1,
"North Qeynos": 2,
"The Qeynos Hills": 4,
"Qinimi, Court of Nihilia": 281,
"The Surefall Glade": 3,
"Qvic, Prayer Grounds of Calling": 295,
"Qvic, the Hidden Vault": 299,
"Sverag, Stronghold of Rage": 374,
"Razorthorn, Tower of Sullon Zek": 375,
"Rathe Council Chamber": 477,
"The Rathe Mountains": 50,
"Redfeather Isle": 430,
"Relic, the Artifact City": 370,
"Riftseekers' Sanctum": 334,
"Rivervale": 19,
"Riwwi, Coliseum of Games": 282,
"Blackfeather Roost": 398,
"The Rujarkian Hills: Bloodied Quarries": 230,
"The Rujarkian Hills: Halls of War": 235,
"The Rujarkian Hills: Wind Bridges": 240,
"The Rujarkian Hills: Prison Break": 245,
"The Rujarkian Hills: Drudge Hollows": 250,
"The Rujarkian Hills: Fortified Lair of the Taskmasters": 255,
"The Rujarkian Hills: Hidden Vale of Deceit": 260,
"The Rujarkian Hills: Blazing Forge ": 265,
"The Rujarkian Hills: Arena of Chance": 269,
"The Rujarkian Hills: Barracks of War": 273,
"The Liberated Citadel of Runnyeye": 11,
"The Scarlet Desert": 175,
"The Ruins of Sebilis": 89,
"Shadeweaver's Thicket": 165,
"Shadow Haven": 150,
"Shadowrest": 187,
"Shadow Spine": 364,
"The City of Shar Vahl": 155,
"The Open Sea": 435,
"The Open Sea": 431,
"The Open Sea": 432,
"The Open Sea": 433,
"The Open Sea": 434,
"S.H.I.P. Workshop": 439,
"Silyssar, New Chelsith": 420,
"Siren's Grotto": 125,
"The Skyfire Mountains": 91,
"Skylance": 371,
"Skyshrine": 114,
"The Sleeper's Tomb": 128,
"Sewers of Nihilia, Emanating Cre": 288,
"Sewers of Nihilia, Lair of Trapp": 286,
"Sewers of Nihilia, Purifying Pla": 287,
"Sewers of Nihilia, Pool of Sludg": 285,
"Solusek's Eye": 31,
"Nagafen's Lair": 32,
"The Caverns of Exile": 278,
"The Tower of Solusek Ro": 212,
"The Temple of Solusek Ro": 80,
"Solteris, the Throne of Ro": 421,
"The Southern Plains of Karana": 14,
"South Desert of Ro": 393,
"Southern Desert of Ro": 35,
"Sanctus Seru": 159,
"Ssraeshza Temple": 162,
"The Steam Factory": 438,
"Steamfont Mountains": 56,
"The Steamfont Mountains": 448,
"The Steppes": 399,
"Stillmoon Temple": 338,
"The Ascent": 339,
"The Stonebrunt Mountains": 100,
"Stone Hive": 396,
"Suncrest Isle": 426,
"Sunderock Springs": 403,
"The Swamp of No Hope": 83,
"Tacvi, The Broken Temple": 298,
"Takish-Hiz: Sunken Library": 231,
"Takish-Hiz: Shifting Tower": 236,
"Takish-Hiz: Fading Temple": 241,
"Takish-Hiz: Royal Observatory": 246,
"Takish-Hiz: River of Recollection": 251,
"Takish-Hiz: Sandfall Corridors": 256,
"Takish-Hiz: Balancing Chamber": 261,
"Takish-Hiz: Sweeping Tides": 266,
"Takish-Hiz: Antiquated Palace": 270,
"Ruins of Takish-Hiz": 376,
"The Root of Ro": 377,
"Takish-Hiz: Prismatic Corridors": 274,
"The Temple of Veeshan": 124,
"The Tenebrous Mountains": 172,
"Thalassius, the Coral Keep": 417,
"Theater of Blood": 380,
"Deathknell, Tower of Dissonance": 381,
"The Deep": 164,
"The Grey": 171,
"The Nest": 343,
"The Void": 459,
"The Void": 460,
"The Void": 461,
"The Void": 462,
"The Void": 463,
"The Void": 464,
"The Void": 465,
"Thundercrest Isles": 340,
"The City of Thurgadin": 115,
"Icewell Keep": 129,
"Timorous Deep": 96,
"Tipt, Treacherous Crags": 289,
"The Torgiran Mines": 226,
"Toskirakk": 475,
"Toxxulia Forest": 38,
"Toxxulia Forest": 414,
"Trakanon's Teeth": 95,
"EverQuest Tutorial": 183,
"The Mines of Gloomingdeep": 188,
"The Mines of Gloomingdeep": 189,
"The Twilight Sea": 170,
"Txevu, Lair of the Elite": 297,
"The Umbral Plains": 176,
"The Estate of Unrest": 63,
"Uqua, the Ocean God Chantry": 292,
"Valdeholm": 401,
"Veeshan's Peak": 108,
"Veksar": 109,
"Velketor's Labyrinth": 112,
"Vergalid Mines": 404,
"Vex Thal": 158,
"Vxed, the Crumbling Caverns": 290,
"The Wakening Land": 119,
"Wall of Slaughter": 300,
"The Warrens": 101,
"The Warsliks Woods": 79,
"Stoneroot Falls": 358,
"Prince's Manor": 359,
"Caverns of the Lost": 360,
"Lair of the Korlach": 361,
"The Western Wastes": 120,
"Yxtta, Pulpit of Exiles ": 291,
"Zhisza, the Shissar Sanctuary": 419,
"The Nektulos Forest": 25,
"Brell's Rest": 480,
"The Cooling Chamber": 483,
"Pellucid Grotto": 488,
"Arthicrex": 485,
"The Foundation": 486,
"The Underquarry": 482,
"Brell's Arena": 492,
"Volska's Husk": 489,
"The Convorteum": 491,
"The Library": 704,
"Morell's Castle": 707,
"Al'Kabor's Nightmare": 709,
"Erudin Burning": 706,
"The Feerrott": 700,
"The Grounds": 703,
"Miragul's Nightmare": 710,
"Sanctum Somnium": 708,
"Fear Itself": 711,
"House of Thule": 701,
"House of Thule, Upper Floors": 702,
"The Well": 705,
"Sunrise Hills": 712,
"Argath, Bastion of Illdaera": 724,
"Valley of Lunanyn": 725,
"Sarith, City of Tides": 726,
"Rubak Oseka, Temple of the Sea": 727,
"Beasts' Domain": 728,
"The Resplendent Temple": 729,
"Pillars of Alra": 730,
"Windsong Sanctuary": 731,
"Erillion, City of Bronze": 732,
"Sepulcher of Order": 733,
"Sepulcher East": 734,
"Sepulcher West": 735,
"Wedding Chapel": 493,
"Wedding Chapel": 494,
"Lair of the Risen": 495,
"The Bazaar": 151,
"Brell's Temple": 490,
"Fungal Forest": 481,
"Lichen Creep": 487,
"Kernagir, the Shining City": 484,
"The Breeding Grounds": 757,
"Chapterhouse of the Fallen": 760,
"The Crystal Caverns: Fragment of Fear": 756,
"East Wastes: Zeixshi-Kar's Awakening": 755,
"Evantil, the Vile Oak": 758,
"Grelleth's Palace, the Chateau of Filth": 759,
"Kael Drakkel: The King's Madness": 754,
"Shard's Landing": 752,
"Valley of King Xorbb": 753,
}
def factionsetname(item):
"Generates faction set name"
return re.sub(' ', '', item[0]) + re.sub('-', '', item[1])
def cleanmobname(name):
"Cleans mob name for DB look up"
return re.sub(' ', '_', name)
class FactionSet(object):
"""
FactionSet class
name: name of the faction set
primary: primary faction ID
hits: faction hits assumes a dict like object
faction ID: hit value
"""
def __init__(self, name, primid, hits):
self.name = name
self.primary = primid
self.hits = hits.copy()
def __repr__(self):
return str((self.name, self.primary, self.hits))
# factionsets[name].hits[key] == factionsets[name][key]
def __getitem__(self, key):
return self.hits[key]
# names need to be unique to the set to work
def __eq__(self, other):
return self.name == other.name
def __contains__(self, key):
"Wrapper to key in hits"
return key in self.hits
def generate_sql(self):
"Generates SQL statements"
statement = ('INSERT INTO npc_faction (name, primaryfaction) VALUES '
'(\'{}\', \'{}\');\n'.format(self.name, self.primary) +
'SELECT id INTO @setid FROM npc_faction WHERE name = '
'\'{}\' LIMIT 1;\n'.format(self.name))
for hit in self.hits:
statement += ('INSERT INTO npc_faction_entries '
'(npc_faction_id, faction_id, value, npc_value) '
'VALUES (@setid, \'{}\', \'{}\', \'{}\');\n'
.format(hit, self.hits[hit],
1 if int(self.hits[hit]) < 0 else 0))
return statement
class Mob(object):
"""
Mob class
name: name of mob
zone: zone ID for mob
faction: faction set name
"""
def __init__(self, name, zone, faction):
self.name = name
self.zone = zone
self.faction = faction
def __repr__(self):
return str((self.name, self.zone, self.faction))
def __eq__(self, other):
return self.name == other.name and self.zone == other.zone
def generate_sql(self):
"Generates SQL statements"
return ('UPDATE npc_types SET npc_faction_id = @{} WHERE '
'name RLIKE \'{}\' AND id >= {} AND id <= {};'
.format(self.faction, cleanmobname(self.name), self.zone * 1000,
self.zone * 1000 + 999))
def main(filename):
"Processes eqlog and generates SQL to update mob factions"
if not os.path.exists(filename):
print(filename + ' not found')
exit(-1)
pfaction = re.compile(r'\[.*\] Your faction standing with (.*) has been '
r'adjusted by (.*)\.')
pslain1 = re.compile(r'\[.*\] You have slain (.*)!')
pslain2 = re.compile(r'\[.*\] (.*) has been slain by .*!')
penter = re.compile(r'\[.*\] You have entered (.*)\.')
factions = {} # mob: mob object
factionsets = {} # set name: set object
hits = collections.OrderedDict() # faction ID: value
nohits = [] # mobs with no faction hits
setname = None
primary = None
zone = None
eqlog = open(filename, 'r')
for line in eqlog:
m = penter.match(line)
if m:
if not re.search('PvP|levitation', line):
zone = zonetable[m.group(1)] if \
m.group(1) in zonetable else m.group(1)
continue
m = pfaction.match(line)
if m:
if not setname and not hits.items():
setname = factionsetname(m.groups())
primary = factiontable[m.group(1)]
hits[factiontable[m.group(1)]] = m.group(2)
continue
m = pslain1.match(line)
if not m:
m = pslain2.match(line)
if m:
# hits will be empty if no faction hits, so we skip it
if m.group(1) not in factions and hits.items():
factions[m.group(1)] = Mob(m.group(1), zone, setname)
if setname not in factionsets:
factionsets[setname] = FactionSet(setname, primary, hits)
elif not hits.items():
nohits.append(m.group(1))
hits.clear()
setname = None
primary = None
continue
eqlog.close()
print('-- Faction set entries')
for fset in factionsets.values():
print(fset.generate_sql())
print('-- Mob entries')
for setname in factionsets:
print('SELECT id INTO @{0} FROM npc_faction WHERE name = \'{0}\' '
'LIMIT 1;'.format(setname))
print()
# The zone limiting assumes the mob ids follows PEQ's scheme
for mob in factions.values():
print(mob.generate_sql())
# This might output some pets
if len(nohits):
print('-- some of these might be pets')
for mob in nohits:
print('-- no faction hit {}'.format(mob))
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Incorrect arguments. python ' + sys.argv[0] + ' filename')
exit(-1)
main(sys.argv[1])
| gpl-3.0 | -6,577,107,049,170,106,000 | 29.844045 | 80 | 0.576969 | false |
Tilapiatsu/modo-tila_batchexporter | lxserv/Tila_BatchTransform.py | 1 | 2520 | #!/usr/bin/env python
import modo
import lx
import lxu.command
import lxu.select
import traceback
import Tila_BatchExportModule as t
from Tila_BatchExportModule import user_value
from Tila_BatchExportModule import batch_export
class CmdBatchExport(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
reload(user_value)
reload(t)
user_value.add_User_Values(self, t.userValues)
def cmd_Flags(self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def basic_Enable(self, msg):
return True
def cmd_Interact(self):
pass
def basic_Execute(self, msg, flags):
reload(t)
reload(batch_export)
try:
scn = modo.Scene()
currScn = modo.scene.current()
userSelection = scn.selected
userSelectionCount = len(userSelection)
olderSelection = []
currPath = currScn.filename
if currPath is None:
currPath = ""
scnIndex = lx.eval('query sceneservice scene.index ? current')
userValues = user_value.query_User_Values(self, t.kit_prefix)
tbe = batch_export.TilaBacthExport
userValues[1] = False
userValues[2] = False
if bool(userValues[0]):
olderSelection = userSelection
userSelection = tbe.select_visible_items(tbe(userSelection,
userSelectionCount,
scn,
currScn,
currPath,
scnIndex,
userValues))
userSelectionCount = len(userSelection)
tbe.batch_transform(tbe(userSelection,
userSelectionCount,
scn,
currScn,
currPath,
scnIndex,
userValues))
if bool(userValues[0]):
scn.select(olderSelection)
except:
lx.out(traceback.format_exc())
def cmd_Query(self, index, vaQuery):
lx.notimpl()
lx.bless(CmdBatchExport, t.TILA_BATCH_TRANSFORM) | mit | -7,176,125,561,679,100,000 | 28.658824 | 76 | 0.483333 | false |
no13bus/btcproject | btc/tasks.py | 1 | 13215 | #encoding=utf-8
from __future__ import absolute_import
from celery import shared_task
# from celery.task import task
from btcproject import celery_app
from btc.lib.okcoin import *
from btc.lib.btceapi import *
from btc.lib.bitfinex import *
from btc.lib.huobi import *
from btc.lib.btcchina import *
from celery import Celery,platforms,group
import time
import pprint
import datetime
from btc.models import *
from datetime import timedelta
from django.utils.timezone import utc
from django.conf import settings
import logging
import logging.handlers
from mailer import Mailer
from mailer import Message
LOG_FILE = 'btc_celery.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 1024*1024*20, backupCount = 10)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger('btc_celery')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@celery_app.task
def send_btc_mail(subjectstring, messagestring):
message = Message(From=settings.MAILSENDER,To=settings.TOLIST, charset="utf-8")
message.Subject = subjectstring
message.Html = messagestring
mysender = Mailer(host=settings.MAILHOST, pwd=settings.MAILPWD, usr=settings.MAILSENDER)
mysender.send(message)
@celery_app.task
def set_bool_task():
if settings.SENDBOOL == False:
logger.debug('set_bool is seted to true')
settings.SENDBOOL = True
@celery_app.task
def bitfinex_task_btc(bfx):
payload = {}
book = bfx.book(payload)
bitfinex_seller_price = float(book['asks'][1]['price'])
bitfinex_buyer_price = float(book['bids'][1]['price'])
bitfinex_buyer_price_done = float(book['asks'][5]['price'])
bitfinex_seller_price_done = float(book['bids'][5]['price'])
return [bitfinex_seller_price, bitfinex_buyer_price, bitfinex_seller_price_done, bitfinex_buyer_price_done]
@celery_app.task
def bitfinex_task_ltc(bfx):
payload = {}
ltc_book = bfx.book(payload,'ltcusd')
bitfinex_seller_price_ltc = float(ltc_book['asks'][1]['price'])
bitfinex_buyer_price_ltc = float(ltc_book['bids'][1]['price'])
bitfinex_buyer_price_ltc_done = float(ltc_book['bids'][5]['price'])
bitfinex_seller_price_ltc_done = float(ltc_book['asks'][5]['price'])
return [bitfinex_seller_price_ltc, bitfinex_buyer_price_ltc, bitfinex_seller_price_ltc_done, bitfinex_buyer_price_ltc_done]
@celery_app.task
def bitfinex_task_info(bfx):
user_info = bfx.balances()
if [i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='usd']:
usd = float([i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='usd'][0])
else:
usd = 0.0
if [i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='ltc']:
ltc = float([i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='ltc'][0])
else:
ltc = 0.0
if [i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='btc']:
btc = float([i['amount'] for i in user_info if i['type']=='exchange' and i['currency']=='btc'][0])
else:
btc = 0.0
return [usd,btc,ltc]
@celery_app.task
def btce_task_btc(btce):
de = btce.get_Depth()
btce_seller_price = de['bids'][1][0]
btce_buyer_price = de['asks'][1][0]
btce_seller_price_done = de['bids'][5][0]
btce_buyer_price_done = de['asks'][5][0]
return [btce_seller_price, btce_buyer_price, btce_seller_price_done, btce_buyer_price_done]
@celery_app.task
def btce_task_ltc(btce):
ltc_de = btce.get_Depth('ltc_usd')
btce_seller_price_ltc = ltc_de['bids'][1][0]
btce_buyer_price_ltc = ltc_de['asks'][1][0]
btce_seller_price_ltc_done = ltc_de['bids'][5][0]
btce_buyer_price_ltc_done = ltc_de['asks'][5][0]
return [btce_seller_price_ltc, btce_buyer_price_ltc, btce_seller_price_ltc_done, btce_buyer_price_ltc_done]
@celery_app.task
def btce_task_info(btce):
user_info = btce.getInfo()
usd = user_info['return']['funds']['usd']
btc = user_info['return']['funds']['btc']
ltc = user_info['return']['funds']['ltc']
return [usd,btc,ltc]
@celery_app.task
def okcoin_task_btc(okcoin,rate):
de = okcoin.get_depth()
okcoin_seller_price = de['asks'][-1][0]
okcoin_buyer_price = de['bids'][1][0]
okcoin_seller_price_done = de['asks'][-5][0]
okcoin_buyer_price_done = de['bids'][5][0]
return [okcoin_seller_price, okcoin_buyer_price, okcoin_seller_price_done, okcoin_buyer_price_done]
@celery_app.task
def okcoin_task_ltc(okcoin,rate):
ltc_de = okcoin.get_depth_ltc()
okcoin_seller_price_ltc = ltc_de['asks'][-1][0]
okcoin_buyer_price_ltc = ltc_de['bids'][1][0]
okcoin_seller_price_ltc_done = ltc_de['asks'][-5][0]
okcoin_buyer_price_ltc_done = ltc_de['bids'][5][0]
return [okcoin_seller_price_ltc, okcoin_buyer_price_ltc, okcoin_seller_price_ltc_done, okcoin_buyer_price_ltc_done]
@celery_app.task
def okcoin_task_info(okcoin,rate):
user_info = okcoin.get_account()
cny = float(user_info['info']['funds']['free']['cny'])
ltc = float(user_info['info']['funds']['free']['ltc'])
btc = float(user_info['info']['funds']['free']['btc'])
return [cny,btc,ltc]
@celery_app.task
def huobi_task_btc(huobi,rate):
de = huobi.get_depth('btc')
huobi_seller_price = float(de['asks'][-1][0])
huobi_buyer_price = float(de['bids'][1][0])
huobi_buyer_price_done = float(de['bids'][5][0])
huobi_seller_price_done = float(de['asks'][-5][0])
return [huobi_seller_price, huobi_buyer_price, huobi_seller_price_done, huobi_buyer_price_done]
@celery_app.task
def huobi_task_ltc(huobi,rate):
ltc_de = huobi.get_depth('ltc')
huobi_seller_price_ltc = float(ltc_de['asks'][-1][0])
huobi_buyer_price_ltc = float(ltc_de['bids'][1][0])
huobi_buyer_price_ltc_done = float(ltc_de['bids'][5][0])
huobi_seller_price_ltc_done = float(ltc_de['asks'][-5][0])
return [huobi_seller_price_ltc, huobi_buyer_price_ltc, huobi_seller_price_ltc_done, huobi_buyer_price_ltc_done]
@celery_app.task
def huobi_task_info(huobi,rate):
user_info = huobi.get_account_info()
cny = float(user_info['available_cny_display']) if 'available_cny_display' in user_info else 0.0
ltc = float(user_info['available_ltc_display']) if 'available_ltc_display' in user_info else 0.0
btc = float(user_info['available_btc_display']) if 'available_btc_display' in user_info else 0.0
return [cny,btc,ltc]
### http.cannot requests
@celery_app.task
def btcchina_task_btc(btcchina,rate):
de = btcchina.get_depth()
btcchina_seller_price = de['asks'][-1][0]
btcchina_buyer_price = de['bids'][1][0]
btcchina_buyer_price_done = de['bids'][3][0]
btcchina_seller_price_done = de['asks'][-3][0]
return [btcchina_seller_price, btcchina_buyer_price, btcchina_seller_price_done, btcchina_buyer_price_done]
@celery_app.task
def btcchina_task_ltc(btcchina,rate):
ltc_de = btcchina.get_depth('ltccny')
btcchina_seller_price_ltc = ltc_de['asks'][-1][0]
btcchina_buyer_price_ltc = ltc_de['bids'][1][0]
btcchina_buyer_price_ltc_done = ltc_de['bids'][3][0]
btcchina_seller_price_ltc_done = ltc_de['asks'][-3][0]
return [btcchina_seller_price_ltc, btcchina_buyer_price_ltc, btcchina_seller_price_ltc_done, btcchina_buyer_price_ltc_done]
@celery_app.task
def btcchina_task_info(bc,rate):
user_info = bc.get_account_info()
cny = user_info['balance']['cny']['amount']
ltc = user_info['balance']['ltc']['amount']
btc = user_info['balance']['btc']['amount']
cny = float(cny)
ltc = float(ltc)
btc = float(btc)
return [cny,btc,ltc]
@celery_app.task
def insert_buy_info(okcoin_buyprice,huobi_buyprice,btcchina_buyprice,bitfinex_buyprice,okcoin_buyprice_ltc,huobi_buyprice_ltc,btcchina_buyprice_ltc,bitfinex_buyprice_ltc,created):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
p = Pricebuysell(okcoin_buyprice=okcoin_buyprice,huobi_buyprice=huobi_buyprice,
btcchina_buyprice=btcchina_buyprice, bitfinex_buyprice=bitfinex_buyprice,
okcoin_buyprice_ltc=okcoin_buyprice_ltc,huobi_buyprice_ltc=huobi_buyprice_ltc,
btcchina_buyprice_ltc=btcchina_buyprice_ltc, bitfinex_buyprice_ltc=bitfinex_buyprice_ltc,created=now)
p.save()
####实时抓取交易价格 并入mysql库 为了将其在前台显示出来
@celery_app.task
def user_trade():
#### admin's settings
user = Userprofile.objects.filter(id=1)
user = user[0]
rate = user.rate
amount = user.amount
ltcamount = user.ltcamount
auto_trade = user.auto_trade
user_id = user.user.id
okcoin2bitfinex = user.okCoin2bitfinex
bitfinex2okcoin = user.bitfinex2okCoin
okcoin2huobi = user.okCoin2huobi
huobi2okcoin = user.huobi2okCoin
okcoin2btcchina = user.okCoin2btcchina
btcchina2okcoin = user.btcchina2okCoin
huobi2btcchina = user.huobi2btcchina
btcchina2huobi = user.btcchina2huobi
huobi2bitfinex = user.huobi2bitfinex
bitfinex2huobi = user.bitfinex2huobi
bitfinex2btcchina = user.bitfinex2btcchina
btcchina2bitfinex = user.btcchina2bitfinex
okcoin2bitfinex_ltc = user.okCoin2bitfinex_ltc
bitfinex2okcoin_ltc = user.bitfinex2okCoin_ltc
okcoin2huobi_ltc = user.okCoin2huobi_ltc
huobi2okcoin_ltc = user.huobi2okCoin_ltc
okcoin2btcchina_ltc = user.okCoin2btcchina_ltc
btcchina2okcoin_ltc = user.btcchina2okCoin_ltc
huobi2btcchina_ltc = user.huobi2btcchina_ltc
btcchina2huobi_ltc = user.btcchina2huobi_ltc
huobi2bitfinex_ltc = user.huobi2bitfinex_ltc
bitfinex2huobi_ltc = user.bitfinex2huobi_ltc
bitfinex2btcchina_ltc = user.bitfinex2btcchina_ltc
btcchina2bitfinex_ltc = user.btcchina2bitfinex_ltc
##
okcoin = OkCoin(user.okcoin_key.__str__(),user.okcoin_secret.__str__())
bfx = Bitfinex()
bfx.key = user.bitfinex_key.__str__()
bfx.secret = user.bitfinex_secret.__str__()
huobi = HuoBi(user.huobi_key.__str__(), user.huobi_secret.__str__())
btcchina = BTCChina(user.btcchina_key.__str__(), user.btcchina_secret.__str__())
g=group(bitfinex_task_btc.s(bfx), huobi_task_btc.s(huobi, rate),
btcchina_task_btc.s(btcchina, rate), okcoin_task_btc.s(okcoin, rate),
bitfinex_task_ltc.s(bfx), huobi_task_ltc.s(huobi, rate),
btcchina_task_ltc.s(btcchina, rate), okcoin_task_ltc.s(okcoin, rate),
bitfinex_task_info.s(bfx),huobi_task_info.s(huobi, rate),btcchina_task_info.s(btcchina, rate),okcoin_task_info.s(okcoin, rate))
result = g().get()
okcoin_buyprice_btc = result[3][1]
huobi_buyprice_btc = result[1][1]
btcchina_buyprice_btc = result[2][1]
bitfinex_buyprice_btc = result[0][1]
okcoin_sellprice_btc = result[3][0]
huobi_sellprice_btc = result[1][0]
btcchina_sellprice_btc = result[2][0]
bitfinex_sellprice_btc = result[0][0]
okcoin_buyprice_ltc = result[7][1]
huobi_buyprice_ltc = result[5][1]
btcchina_buyprice_ltc = result[6][1]
bitfinex_buyprice_ltc = result[4][1]
okcoin_sellprice_ltc = result[7][0]
huobi_sellprice_ltc = result[5][0]
btcchina_sellprice_ltc = result[6][0]
bitfinex_sellprice_ltc = result[4][0]
created = datetime.datetime.utcnow().replace(tzinfo=utc)
insert_buy_info.delay(okcoin_buyprice_btc,huobi_buyprice_btc,btcchina_buyprice_btc,bitfinex_buyprice_btc,
okcoin_buyprice_ltc,huobi_buyprice_ltc,btcchina_buyprice_ltc,bitfinex_buyprice_ltc,created)
@celery_app.task
def tradedate():
user = Userprofile.objects.filter(id=1)
user = user[0]
rate = user.rate
amount = user.amount
ltcamount = user.ltcamount
auto_trade = user.auto_trade
user_id = user.user.id
huobi = HuoBi(user.huobi_key.__str__(), user.huobi_secret.__str__())
huobi_j = huobi.get_trades_history('btc')
trade_dates = huobi_j['trades']
for data in trade_dates:
price = data['price']
amount = data['amount']
mtype = data['type']
created_day = datetime.datetime.now().strftime("%Y-%m-%d")
mtime = '%s %s' % (created_day, data['time'])
now = datetime.datetime.utcnow().replace(tzinfo=utc)
td = Tradedate.objects.filter(mtime=mtime,price=price,amount=amount,mtype=mtype)
if not td:
trade_item = Tradedate(mtime=mtime,price=price,amount=amount,mtype=mtype,created=now)
trade_item.save()
@celery_app.task
def tradedate_analysis():
t_delta = datetime.timedelta(seconds=60)
nowdate = datetime.datetime.now()
start_time = nowdate.strftime("%Y-%m-%d %H:%M:%S")
end_time = (nowdate - t_delta).strftime("%Y-%m-%d %H:%M:%S")
td = Tradedate.objects.filter(mtime__gte=end_time, mtime__lte=start_time).order_by('-mtime')
if not td:
return
avg_price = sum([item.price for item in td]) / len(td)
avg_price = round(avg_price,4)
buy_data = td.filter(mtype=u'买入')
buy_amount = sum([item.amount for item in buy_data])
buy_amount = round(buy_amount,4)
sell_data = td.filter(mtype=u'卖出')
sell_amount = sum([item.amount for item in sell_data])
sell_amount = round(sell_amount,4)
if buy_amount > sell_amount:
buyorsell = 'buy'
else:
buyorsell = 'sell'
if not Tradedate_analysis.objects.filter(start_time=start_time,end_time=end_time):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
ta = Tradedate_analysis(buyorsell=buyorsell,avg_price=avg_price,start_time=start_time,end_time=end_time,
buy_amount=buy_amount,sell_amount=sell_amount,created=now)
ta.save()
| mit | -85,900,734,797,334,750 | 36.043353 | 179 | 0.697713 | false |
valuesandvalue/valuesandvalue | vavs_project/fbdata/fields.py | 1 | 2071 | # fbdata.fields
# DJANGO
from django.db import models
from django.utils import six
# SOUTH
from south.modelsinspector import add_introspection_rules
class IntegerListField(models.Field):
description = "Integer List"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 120
super(IntegerListField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'char(%s)' % self.max_length
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if isinstance(value, basestring):
return [int(s) for s in value.split(',') if s.isdigit()]
elif isinstance(value, list):
return value
def get_prep_value(self, value):
return ','.join([str(v) for v in value])
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if not value and self.default:
value = list(self.default)
setattr(model_instance, self.attname, value)
return value
def get_prep_lookup(self, lookup_type, value):
# We only handle 'exact' and 'in'. All others are errors.
if lookup_type == 'exact':
return self.get_prep_value(value)
elif lookup_type == 'in':
return [self.get_prep_value(v) for v in value]
else:
raise TypeError('Lookup type %r not supported.' % lookup_type)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def formfield(self, **kwargs):
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(IntegerListField, self).formfield(**defaults)
add_introspection_rules([
(
[IntegerListField], # Class(es) these apply to
[], # Positional arguments (not used)
{}, # Keyword argument
),
], ["^fbdata\.fields\.IntegerListField"])
| mit | 5,486,886,093,068,114,000 | 30.378788 | 74 | 0.593916 | false |
jcurry/ZenPacks.ZenSystems.Juniper | ZenPacks/ZenSystems/Juniper/modeler/plugins/JuniperFPCMap.py | 1 | 6265 | ##########################################################################
# Author: Jane Curry, [email protected]
# Date: February 28th, 2011
# Revised: Extra debugging added Aug 23, 2011
#
# JuniperFPC modeler plugin
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
##########################################################################
__doc__ = """JuniperFPCMap
Gather table information from Juniper Contents tables
"""
import re
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap, GetTableMap
class JuniperFPCMap(SnmpPlugin):
"""Map Juniper FPC table to model."""
maptype = "JuniperFPCMap"
modname = "ZenPacks.ZenSystems.Juniper.JuniperFPC"
relname = "JuniperFP"
compname = ""
snmpGetTableMaps = (
GetTableMap('jnxContentsTable',
'.1.3.6.1.4.1.2636.3.1.8.1',
{
'.1': 'containerIndex',
'.5': 'FPCType',
'.6': 'FPCDescr',
'.7': 'FPCSerialNo',
'.8': 'FPCRevision',
'.10': 'FPCPartNo',
'.11': 'FPCChassisId',
'.12': 'FPCChassisDescr',
'.13': 'FPCChassisCLEI',
}
),
GetTableMap('jnxOperatingTable',
'.1.3.6.1.4.1.2636.3.1.13.1',
{
'.6': 'FPCState',
'.7': 'FPCTemp',
'.8': 'FPCCPU',
'.13': 'FPCUpTime',
'.15': 'FPCMemory',
}
),
GetTableMap('jnxContainersTable',
'.1.3.6.1.4.1.2636.3.1.6.1',
{
'.1': 'containerIndex',
'.3': 'containerLevel',
'.4': 'containerNextLevel',
'.5': 'containerType',
'.6': 'containerDescr',
}
),
)
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
rm = self.relMap()
contentsTable = tabledata.get('jnxContentsTable')
operatingTable = tabledata.get('jnxOperatingTable')
containersTable = tabledata.get('jnxContainersTable')
# If no data supplied then simply return
if not contentsTable:
log.warn( 'No SNMP response from %s for the %s plugin for contents', device.id, self.name() )
log.warn( "Data= %s", tabledata )
return
if not operatingTable:
log.warn( 'No SNMP response from %s for the %s plugin for operating system', device.id, self.name() )
log.warn( "Data= %s", tabledata )
return
if not containersTable:
log.warn( 'No SNMP response from %s for the %s plugin for containers', device.id, self.name() )
log.warn( "Data= %s", tabledata )
return
for oid, data in contentsTable.items():
try:
om = self.objectMap(data)
FPCDescr = om.FPCDescr
# log.info(' FPCDescr is %s ' % (om.FPCDescr))
isaFPC = re.match(r'(.*FPC.*)', FPCDescr.upper())
if not isaFPC:
continue
else:
for oid1, data1 in operatingTable.items():
if oid1 == oid:
om.FPCState = data1['FPCState']
om.FPCTemp = data1['FPCTemp']
om.FPCCPU = data1['FPCCPU']
om.FPCUpTime = data1['FPCUpTime']
om.FPCMemory = data1['FPCMemory']
for oid2, data2 in containersTable.items():
# log.info( ' oid is %s - oid2 is %s - data is %s' % (oid, oid2 , data2))
if oid.startswith(oid2):
om.containerDescr = data2['containerDescr']
if data2['containerLevel'] == 1:
om.containerDescr = '....' + om.containerDescr
elif data2['containerLevel'] == 2:
om.containerDescr = '........' + om.containerDescr
om.containerParentIndex = data2['containerNextLevel']
if om.containerParentIndex != 0:
for oid3, data3 in containersTable.items():
if oid3.endswith(str(om.containerParentIndex)):
om.containerParentDescr = data3['containerDescr']
om.snmpindex = oid1.strip('.')
# Convert FPCUpTime from milliseconds to hours
om.FPCUpTime = om.FPCUpTime / 1000 / 60 / 60 /24
# Transform numeric FPCState into a status string via operatingStateLookup
if (om.FPCState < 1 or om.FPCState > 7):
om.FPCState = 1
om.FPCState = self.operatingStateLookup[om.FPCState]
om.id = self.prepId( om.FPCDescr.replace(' ','_') + '_' + str( om.snmpindex.replace('.','_') ) )
except (KeyError, IndexError, AttributeError, TypeError), errorInfo:
log.warn( ' Error in %s modeler plugin %s' % ( self.name(), errorInfo))
continue
rm.append(om)
# log.info('rm %s' % (rm) )
return rm
operatingStateLookup = { 1: 'Unknown',
2: 'Running',
3: 'Ready',
4: 'Reset',
5: 'RunningAtFullSpeed (Fan)',
6: 'Down',
7: 'Standby'
}
| gpl-2.0 | 707,825,517,397,618,400 | 43.75 | 116 | 0.444054 | false |
kussj/mesosbeat | scripts/generate_field_docs.py | 1 | 2634 | #!/usr/bin/env python
"""
This script generates markdown documentation from the fields yml file.
Usage: python generate_field_docs.py file.yml file.asciidoc
"""
import sys
import yaml
SECTIONS = [
("env", "Common"),
("cluster_health", "Contains elasticsearch cluster health statistics"),
("cluster_stats", "Contains elasticsearch cluster stats statistics"),
("cluster_node", "Contains elasticsearch node stats statistics")]
def document_fields(output, section):
if "anchor" in section:
output.write("[[exported-fields-{}]]\n".format(section["anchor"]))
output.write("=== {} Fields\n\n".format(section["name"]))
if "description" in section:
output.write("{}\n\n".format(section["description"]))
output.write("\n")
for field in section["fields"]:
if "type" in field and field["type"] == "group":
for sec, name in SECTIONS:
if sec == field["name"]:
field["anchor"] = field["name"]
field["name"] = name
break
document_fields(output, field)
else:
document_field(output, field)
def document_field(output, field):
if "path" not in field:
field["path"] = field["name"]
output.write("==== {}\n\n".format(field["path"]))
if "type" in field:
output.write("type: {}\n\n".format(field["type"]))
if "example" in field:
output.write("example: {}\n\n".format(field["example"]))
if "format" in field:
output.write("format: {}\n\n".format(field["format"]))
if "required" in field:
output.write("required: {}\n\n".format(field["required"]))
if "description" in field:
output.write("{}\n\n".format(field["description"]))
def fields_to_asciidoc(input, output):
output.write("""
////
This file is generated! See etc/fields.yml and scripts/generate_field_docs.py
////
[[exported-fields]]
== Exported Fields
This document describes the fields that are exported by
ApacheBeat. They are grouped in the
following categories:
""")
for doc, _ in SECTIONS:
output.write("* <<exported-fields-{}>>\n".format(doc))
output.write("\n")
docs = yaml.load(input)
for doc, name in SECTIONS:
if doc in docs:
section = docs[doc]
if "type" in section:
if section["type"] == "group":
section["name"] = name
section["anchor"] = doc
document_fields(output, section)
if __name__ == "__main__":
if len(sys.argv) != 3:
print ("Usage: %s file.yml file.asciidoc" % (sys.argv[0]))
sys.exit(1)
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
try:
fields_to_asciidoc(input, output)
finally:
input.close()
output.close()
| apache-2.0 | -3,599,554,905,244,448,000 | 24.572816 | 77 | 0.629461 | false |
fragaria/BorIS | post_migration_restart.py | 1 | 2733 | """
This is script should bring existing installations in line with the state
in repository. It is supposed to be run after:
1. The migration_restart branch has been merged to master and deployed.
2. south_migrationhistory has been truncated.
3. The initial migrations for clients and services have been faked.
"""
from django.contrib.auth.management import create_permissions
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.db.models import get_models, get_app
from boris.services.management import proxy_permissions_fix
# First, create the missing permissions.
create_permissions(get_app('services'), get_models(), 2)
# Then remove the obsolete permissions.
# Obsolete models
contenttypes = (
('services', 'crisisintervention'),
('clients', 'riskybehavior'),
)
for app_label, model in contenttypes:
try:
ct = ContentType.objects.get(app_label=app_label, model=model)
except ContentType.DoesNotExist:
print 'ContentType for %s not found!' % model
else:
qset = Permission.objects.filter(content_type=ct)
print "Deleting %i permissions for %s" % (qset.count(), model)
qset.delete()
# Remove services proxy permissions.
services_ct = ContentType.objects.get(app_label='services', model='service')
codenames = [
'add_utilitywork',
'change_utilitywork',
'delete_utilitywork',
'add_incomeexamination',
'change_incomeexamination',
'delete_incomeexamination',
'add_individualcounselling',
'change_individualcounselling',
'delete_individualcounselling',
'add_phoneusage',
'change_phoneusage',
'delete_phoneusage',
]
print "Deleting the proxy permissions: %s" % ', '.join(codenames)
for codename in codenames:
qset = Permission.objects.filter(codename=codename, content_type=services_ct)
if qset.count() != 1:
print "Something's wrong with the %s permission." % codename
else:
qset.delete()
# Run the proxy permissions fix hook.
services = get_app('services')
proxy_permissions_fix.delete_proxy_permissions(services, get_models(services), 2)
# Delete the obsolete contenttypes.
contenttypes = (
('clients', 'riskybehavior'),
('services', 'practitionerencounter'),
)
for app_label, model in contenttypes:
try:
ct = ContentType.objects.get(app_label=app_label, model=model)
except ContentType.DoesNotExist:
print 'ContentType for %s not found!' % model
else:
print "Deleting contenttype: %s, %s" % (app_label, model)
ct.delete()
# Finally, reload the group permissions fixture.
call_command('loaddata', 'groups.json')
| mit | -3,734,604,905,836,807,700 | 32.329268 | 81 | 0.716429 | false |
ars599/mom5 | test/test_bit_reproducibility.py | 1 | 1741 |
from __future__ import print_function
import os
import sys
import re
from model_test_setup import ModelTestSetup
from test_run import tests as test_specs
class TestBitReproducibility(ModelTestSetup):
def __init__(self):
super(TestBitReproducibility, self).__init__()
def checksums_to_dict(self, filename):
"""
Look at each line an make a dictionary entry.
"""
regex = re.compile(r'\[chksum\]\s+(.*)\s+(-?[0-9]+)$')
dict = {}
with open(filename) as f:
for line in f:
m = regex.match(line)
if m is not None:
dict[m.group(1).rstrip()] = int(m.group(2))
return dict
def expected_checksums(self, test_name):
filename = os.path.join(self.my_dir, 'checksums',
'{}.txt'.format(test_name))
return self.checksums_to_dict(filename)
def produced_checksums(self, test_name):
"""
Extract checksums from model run output.
"""
filename = os.path.join(self.work_dir, test_name, 'fms.out')
return self.checksums_to_dict(filename)
def check_run(self, key):
# Compare expected to produced.
expected = self.expected_checksums(key)
produced = self.produced_checksums(key)
for k in expected:
assert(produced.has_key(k))
if expected[k] != produced[k]:
print('{}: expected {}, produced {}'.format(key, expected[k],
produced[k]))
assert(expected[k] == produced[k])
def test_checksums(self):
for k in test_specs.keys():
yield self.check_run, k
| gpl-2.0 | 3,605,980,634,809,117,700 | 26.203125 | 77 | 0.546238 | false |
ATenderholt/cclib | test/bridge/testpyquante.py | 1 | 2276 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
import unittest
import numpy
from cclib.bridge import cclib2pyquante
from ..test_data import getdatafile
from cclib.parser.utils import find_package
from numpy.testing import assert_array_almost_equal
class PyquanteTest(unittest.TestCase):
"""Tests for the cclib2pyquante bridge in cclib."""
def setUp(self):
super(PyquanteTest, self).setUp()
self._found_pyquante = find_package("PyQuante")
self.data, self.logfile = getdatafile("Gaussian", "basicGaussian16", ["water_ccsd.log"])
def test_makepyquante(self):
# Test older PyQuante bridge
from PyQuante.hartree_fock import hf
from PyQuante.Molecule import Molecule
reference = Molecule(
"h2o",
[(8, (0, 0, 0.119159)), (1, (0, 0.790649, -0.476637)), (1, (0, -0.790649, -0.476637)),],
units="Angstroms",
)
refen, reforbe, reforbs = hf(reference)
pyqmol = cclib2pyquante.makepyquante(self.data)
en, orbe, orbs = hf(pyqmol)
self.assertAlmostEqual(en, refen, delta=1.0e-6)
class pyquante2Test(unittest.TestCase):
"""Tests for the cclib2pyquante bridge in cclib."""
def setUp(self):
super(pyquante2Test, self).setUp()
self._found_pyquante2 = find_package("pyquante2")
self.data, self.logfile = getdatafile("Gaussian", "basicGaussian16", ["water_ccsd.log"])
def test_makepyquante(self):
# Test pyquante2 bridge
from pyquante2 import molecule, rhf, h2o, basisset
bfs = basisset(h2o)
# Copied from water_ccsd.log
refmol = molecule(
[(8, 0.0, 0.0, 0.119159), (1, 0, 0.790649, -0.476637), (1, 0, -0.790649, -0.476637)],
units="Angstroms",
)
refsolver = rhf(refmol, bfs)
refsolver.converge()
pyqmol = cclib2pyquante.makepyquante(self.data)
pyqsolver = rhf(pyqmol, bfs)
pyqsolver.converge()
assert_array_almost_equal(refsolver.energies[-1], pyqsolver.energies[-1], decimal=6)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -8,485,751,219,183,631,000 | 29.756757 | 100 | 0.631371 | false |
Zlash65/erpnext | erpnext/controllers/item_variant.py | 1 | 11842 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr, flt
import json, copy
from six import string_types
class ItemVariantExistsError(frappe.ValidationError): pass
class InvalidItemAttributeValueError(frappe.ValidationError): pass
class ItemTemplateCannotHaveStock(frappe.ValidationError): pass
@frappe.whitelist()
def get_variant(template, args=None, variant=None, manufacturer=None,
manufacturer_part_no=None):
"""Validates Attributes and their Values, then looks for an exactly
matching Item Variant
:param item: Template Item
:param args: A dictionary with "Attribute" as key and "Attribute Value" as value
"""
item_template = frappe.get_doc('Item', template)
if item_template.variant_based_on=='Manufacturer' and manufacturer:
return make_variant_based_on_manufacturer(item_template, manufacturer,
manufacturer_part_no)
else:
if isinstance(args, string_types):
args = json.loads(args)
if not args:
frappe.throw(_("Please specify at least one attribute in the Attributes table"))
return find_variant(template, args, variant)
def make_variant_based_on_manufacturer(template, manufacturer, manufacturer_part_no):
'''Make and return a new variant based on manufacturer and
manufacturer part no'''
from frappe.model.naming import append_number_if_name_exists
variant = frappe.new_doc('Item')
copy_attributes_to_variant(template, variant)
variant.manufacturer = manufacturer
variant.manufacturer_part_no = manufacturer_part_no
variant.item_code = append_number_if_name_exists('Item', template.name)
return variant
def validate_item_variant_attributes(item, args=None):
if isinstance(item, string_types):
item = frappe.get_doc('Item', item)
if not args:
args = {d.attribute.lower():d.attribute_value for d in item.attributes}
attribute_values, numeric_values = get_attribute_values(item)
for attribute, value in args.items():
if not value:
continue
if attribute.lower() in numeric_values:
numeric_attribute = numeric_values[attribute.lower()]
validate_is_incremental(numeric_attribute, attribute, value, item.name)
else:
attributes_list = attribute_values.get(attribute.lower(), [])
validate_item_attribute_value(attributes_list, attribute, value, item.name)
def validate_is_incremental(numeric_attribute, attribute, value, item):
from_range = numeric_attribute.from_range
to_range = numeric_attribute.to_range
increment = numeric_attribute.increment
if increment == 0:
# defensive validation to prevent ZeroDivisionError
frappe.throw(_("Increment for Attribute {0} cannot be 0").format(attribute))
is_in_range = from_range <= flt(value) <= to_range
precision = max(len(cstr(v).split(".")[-1].rstrip("0")) for v in (value, increment))
#avoid precision error by rounding the remainder
remainder = flt((flt(value) - from_range) % increment, precision)
is_incremental = remainder==0 or remainder==increment
if not (is_in_range and is_incremental):
frappe.throw(_("Value for Attribute {0} must be within the range of {1} to {2} in the increments of {3} for Item {4}")\
.format(attribute, from_range, to_range, increment, item),
InvalidItemAttributeValueError, title=_('Invalid Attribute'))
def validate_item_attribute_value(attributes_list, attribute, attribute_value, item):
allow_rename_attribute_value = frappe.db.get_single_value('Item Variant Settings', 'allow_rename_attribute_value')
if allow_rename_attribute_value:
pass
elif attribute_value not in attributes_list:
frappe.throw(_("The value {0} is already assigned to an exisiting Item {2}.").format(
attribute_value, attribute, item), InvalidItemAttributeValueError, title=_('Rename Not Allowed'))
def get_attribute_values(item):
if not frappe.flags.attribute_values:
attribute_values = {}
numeric_values = {}
for t in frappe.get_all("Item Attribute Value", fields=["parent", "attribute_value"]):
attribute_values.setdefault(t.parent.lower(), []).append(t.attribute_value)
for t in frappe.get_all('Item Variant Attribute',
fields=["attribute", "from_range", "to_range", "increment"],
filters={'numeric_values': 1, 'parent': item.variant_of}):
numeric_values[t.attribute.lower()] = t
frappe.flags.attribute_values = attribute_values
frappe.flags.numeric_values = numeric_values
return frappe.flags.attribute_values, frappe.flags.numeric_values
def find_variant(template, args, variant_item_code=None):
conditions = ["""(iv_attribute.attribute={0} and iv_attribute.attribute_value={1})"""\
.format(frappe.db.escape(key), frappe.db.escape(cstr(value))) for key, value in args.items()]
conditions = " or ".join(conditions)
from erpnext.portal.product_configurator.utils import get_item_codes_by_attributes
possible_variants = [i for i in get_item_codes_by_attributes(args, template) if i != variant_item_code]
for variant in possible_variants:
variant = frappe.get_doc("Item", variant)
if len(args.keys()) == len(variant.get("attributes")):
# has the same number of attributes and values
# assuming no duplication as per the validation in Item
match_count = 0
for attribute, value in args.items():
for row in variant.attributes:
if row.attribute==attribute and row.attribute_value== cstr(value):
# this row matches
match_count += 1
break
if match_count == len(args.keys()):
return variant.name
@frappe.whitelist()
def create_variant(item, args):
if isinstance(args, string_types):
args = json.loads(args)
template = frappe.get_doc("Item", item)
variant = frappe.new_doc("Item")
variant.variant_based_on = 'Item Attribute'
variant_attributes = []
for d in template.attributes:
variant_attributes.append({
"attribute": d.attribute,
"attribute_value": args.get(d.attribute)
})
variant.set("attributes", variant_attributes)
copy_attributes_to_variant(template, variant)
make_variant_item_code(template.item_code, template.item_name, variant)
return variant
@frappe.whitelist()
def enqueue_multiple_variant_creation(item, args):
# There can be innumerable attribute combinations, enqueue
if isinstance(args, string_types):
variants = json.loads(args)
total_variants = 1
for key in variants:
total_variants *= len(variants[key])
if total_variants >= 600:
frappe.throw(_("Please do not create more than 500 items at a time"))
return
if total_variants < 10:
return create_multiple_variants(item, args)
else:
frappe.enqueue("erpnext.controllers.item_variant.create_multiple_variants",
item=item, args=args, now=frappe.flags.in_test);
return 'queued'
def create_multiple_variants(item, args):
count = 0
if isinstance(args, string_types):
args = json.loads(args)
args_set = generate_keyed_value_combinations(args)
for attribute_values in args_set:
if not get_variant(item, args=attribute_values):
variant = create_variant(item, attribute_values)
variant.save()
count +=1
return count
def generate_keyed_value_combinations(args):
"""
From this:
args = {"attr1": ["a", "b", "c"], "attr2": ["1", "2"], "attr3": ["A"]}
To this:
[
{u'attr1': u'a', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'1', u'attr3': u'A'},
{u'attr1': u'a', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'b', u'attr2': u'2', u'attr3': u'A'},
{u'attr1': u'c', u'attr2': u'2', u'attr3': u'A'}
]
"""
# Return empty list if empty
if not args:
return []
# Turn `args` into a list of lists of key-value tuples:
# [
# [(u'attr2', u'1'), (u'attr2', u'2')],
# [(u'attr3', u'A')],
# [(u'attr1', u'a'), (u'attr1', u'b'), (u'attr1', u'c')]
# ]
key_value_lists = [[(key, val) for val in args[key]] for key in args.keys()]
# Store the first, but as objects
# [{u'attr2': u'1'}, {u'attr2': u'2'}]
results = key_value_lists.pop(0)
results = [{d[0]: d[1]} for d in results]
# Iterate the remaining
# Take the next list to fuse with existing results
for l in key_value_lists:
new_results = []
for res in results:
for key_val in l:
# create a new clone of object in result
obj = copy.deepcopy(res)
# to be used with every incoming new value
obj[key_val[0]] = key_val[1]
# and pushed into new_results
new_results.append(obj)
results = new_results
return results
def copy_attributes_to_variant(item, variant):
# copy non no-copy fields
exclude_fields = ["naming_series", "item_code", "item_name", "show_in_website",
"show_variant_in_website", "opening_stock", "variant_of", "valuation_rate"]
if item.variant_based_on=='Manufacturer':
# don't copy manufacturer values if based on part no
exclude_fields += ['manufacturer', 'manufacturer_part_no']
allow_fields = [d.field_name for d in frappe.get_all("Variant Field", fields = ['field_name'])]
if "variant_based_on" not in allow_fields:
allow_fields.append("variant_based_on")
for field in item.meta.fields:
# "Table" is part of `no_value_field` but we shouldn't ignore tables
if (field.reqd or field.fieldname in allow_fields) and field.fieldname not in exclude_fields:
if variant.get(field.fieldname) != item.get(field.fieldname):
if field.fieldtype == "Table":
variant.set(field.fieldname, [])
for d in item.get(field.fieldname):
row = copy.deepcopy(d)
if row.get("name"):
row.name = None
variant.append(field.fieldname, row)
else:
variant.set(field.fieldname, item.get(field.fieldname))
variant.variant_of = item.name
if 'description' not in allow_fields:
if not variant.description:
variant.description = ""
if item.variant_based_on=='Item Attribute':
if variant.attributes:
attributes_description = item.description + " "
for d in variant.attributes:
attributes_description += "<div>" + d.attribute + ": " + cstr(d.attribute_value) + "</div>"
if attributes_description not in variant.description:
variant.description += attributes_description
def make_variant_item_code(template_item_code, template_item_name, variant):
"""Uses template's item code and abbreviations to make variant's item code"""
if variant.item_code:
return
abbreviations = []
for attr in variant.attributes:
item_attribute = frappe.db.sql("""select i.numeric_values, v.abbr
from `tabItem Attribute` i left join `tabItem Attribute Value` v
on (i.name=v.parent)
where i.name=%(attribute)s and (v.attribute_value=%(attribute_value)s or i.numeric_values = 1)""", {
"attribute": attr.attribute,
"attribute_value": attr.attribute_value
}, as_dict=True)
if not item_attribute:
continue
# frappe.throw(_('Invalid attribute {0} {1}').format(frappe.bold(attr.attribute),
# frappe.bold(attr.attribute_value)), title=_('Invalid Attribute'),
# exc=InvalidItemAttributeValueError)
abbr_or_value = cstr(attr.attribute_value) if item_attribute[0].numeric_values else item_attribute[0].abbr
abbreviations.append(abbr_or_value)
if abbreviations:
variant.item_code = "{0}-{1}".format(template_item_code, "-".join(abbreviations))
variant.item_name = "{0}-{1}".format(template_item_name, "-".join(abbreviations))
@frappe.whitelist()
def create_variant_doc_for_quick_entry(template, args):
variant_based_on = frappe.db.get_value("Item", template, "variant_based_on")
args = json.loads(args)
if variant_based_on == "Manufacturer":
variant = get_variant(template, **args)
else:
existing_variant = get_variant(template, args)
if existing_variant:
return existing_variant
else:
variant = create_variant(template, args=args)
variant.name = variant.item_code
validate_item_variant_attributes(variant, args)
return variant.as_dict()
| gpl-3.0 | 1,849,677,033,109,478,400 | 33.932153 | 121 | 0.708326 | false |
FlorianLudwig/scope | setup.py | 1 | 2287 | # -*- coding: utf-8 -*-
import os
import sys
from distutils.command.sdist import sdist
from setuptools import setup, find_packages
import setuptools.command.test
class TestCommand(setuptools.command.test.test):
def finalize_options(self):
setuptools.command.test.test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
fails = []
from tox._config import parseconfig
from tox._cmdline import Session
config = parseconfig(self.test_args, 'tox')
retcode = Session(config).runcommand()
if retcode != 0:
fails.append('tox returned errors')
import pep8
style_guide = pep8.StyleGuide(config_file=BASE_PATH + '/.pep8')
style_guide.input_dir(BASE_PATH + '/rw')
if style_guide.options.report.get_count() != 0:
fails.append('pep8 returned errros for rw/')
style_guide = pep8.StyleGuide(config_file=BASE_PATH + '/.pep8')
style_guide.input_dir(BASE_PATH + '/test')
if style_guide.options.report.get_count() != 0:
fails.append('pep8 returned errros for test/')
if fails:
print('\n'.join(fails))
sys.exit(1)
setup(
name="scope",
version="0.0.1",
url='https://github.com/FlorianLudwig/scoe',
description='call-stack based, nested dependecy injection',
author='Florian Ludwig',
install_requires=['tornado>=4.0.0,<5.0'],
extras_requires={
'test': ['tox', 'pytest', 'pep8'],
'docs': ['sphinx_rtd_theme']
},
packages=['scope'],
include_package_data=True,
package_data={
'rw': ['*.html', '*.css', 'templates/html5', 'templates/form', 'templates/nginx']
},
cmdclass={
'test': TestCommand
},
license="http://www.apache.org/licenses/LICENSE-2.0",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| apache-2.0 | -363,262,786,118,607,400 | 31.211268 | 89 | 0.601224 | false |
lewisodriscoll/sasview | src/sas/sascalc/simulation/pointsmodelpy/tests/testlores2d.py | 3 | 3235 | from __future__ import print_function
def test_lores2d(phi):
from sasModeling.pointsmodelpy import pointsmodelpy
from sasModeling.iqPy import iqPy
from sasModeling.geoshapespy import geoshapespy
#lores model is to hold several geometric objects
lm = pointsmodelpy.new_loresmodel(0.1)
#generate single geometry shape
c = geoshapespy.new_cylinder(10,40)
geoshapespy.set_center(c,1,1,1)
geoshapespy.set_orientation(c,0,0,0)
#add single geometry shape to lores model
pointsmodelpy.lores_add(lm,c,3.0)
#retrieve the points from lores model for sas calculation
vp = pointsmodelpy.new_point3dvec()
pointsmodelpy.get_lorespoints(lm,vp)
#Calculate I(Q) and P(r) 2D
pointsmodelpy.distdistribution_xy(lm,vp)
pointsmodelpy.outputPR_xy(lm,"out_xy.pr")
iq = iqPy.new_iq(100,0.001, 0.3)
pointsmodelpy.calculateIQ_2D(lm,iq,phi)
iqPy.OutputIQ(iq, "out_xy.iq")
def get2d():
from math import pi
from Numeric import arange,zeros
from enthought.util.numerix import Float,zeros
from sasModeling.file2array import readfile2array
from sasModeling.pointsmodelpy import pointsmodelpy
from sasModeling.geoshapespy import geoshapespy
lm = pointsmodelpy.new_loresmodel(0.1)
sph = geoshapespy.new_sphere(20)
pointsmodelpy.lores_add(lm,sph,1.0)
vp = pointsmodelpy.new_point3dvec()
pointsmodelpy.get_lorespoints(lm,vp)
pointsmodelpy.distdistribution_xy(lm,vp)
value_grid = zeros((100,100),Float)
width, height = value_grid.shape
print(width,height)
I = pointsmodelpy.calculateI_Qxy(lm,0.00001,0.000002)
print(I)
Imax = 0
for i in range(width):
for j in range(height):
qx = float(i-50)/200.0
qy = float(j-50)/200.0
value_grid[i,j] = pointsmodelpy.calculateI_Qxy(lm,qx,qy)
if value_grid[i][j] > Imax:
Imax = value_grid[i][j]
for i in range(width):
for j in range(height):
value_grid[i][j] = value_grid[i][j]/Imax
value_grid[50,50] = 1
return value_grid
def get2d_2():
from math import pi
from Numeric import arange,zeros
from enthought.util.numerix import Float,zeros
from sasModeling.file2array import readfile2array
from sasModeling.pointsmodelpy import pointsmodelpy
from sasModeling.geoshapespy import geoshapespy
lm = pointsmodelpy.new_loresmodel(0.1)
cyn = geoshapespy.new_cylinder(5,20)
geoshapespy.set_orientation(cyn,0,0,90)
pointsmodelpy.lores_add(lm,cyn,1.0)
vp = pointsmodelpy.new_point3dvec()
pointsmodelpy.get_lorespoints(lm,vp)
pointsmodelpy.distdistribution_xy(lm,vp)
value_grid = zeros((100,100),Float)
width, height = value_grid.shape
print(width,height)
I = pointsmodelpy.calculateI_Qxy(lm,0.00001,0.000002)
print(I)
Imax = 0
for i in range(width):
for j in range(height):
qx = float(i-50)/200.0
qy = float(j-50)/200.0
value_grid[i,j] = pointsmodelpy.calculateI_Qxy(lm,qx,qy)
if value_grid[i][j] > Imax:
Imax = value_grid[i][j]
for i in range(width):
for j in range(height):
value_grid[i][j] = value_grid[i][j]/Imax
value_grid[50,50] = 1
return value_grid
if __name__ == "__main__":
print("start to test lores 2D")
# test_lores2d(10)
value_grid = get2d_2()
print(value_grid)
print("pass")
| bsd-3-clause | 863,925,136,733,527,400 | 26.415254 | 62 | 0.705719 | false |
zhaochao/fuel-web | nailgun/nailgun/utils/zabbix.py | 1 | 3931 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import simplejson as json
import urllib2
from nailgun.errors import errors
from nailgun.logger import logger
class ZabbixManager(object):
@classmethod
def _make_zabbix_request(cls, url, method, params, auth=None):
header = {'Content-type': 'application/json'}
data = {'jsonrpc': '2.0',
'id': '1',
'method': method,
'params': params}
if auth:
data['auth'] = auth
logger.debug("Zabbix request: %s", data)
request = urllib2.Request(url, json.dumps(data), header)
try:
response = urllib2.urlopen(request)
except urllib2.URLError as e:
raise errors.CannotMakeZabbixRequest(
"Can't make a request to Zabbix: {0}".format(e)
)
result = json.loads(response.read())
logger.debug("Zabbix response: %s", result)
if 'error' in result:
code = result['error']['code']
msg = result['error']['message']
data = result['error'].get('data', '')
raise errors.ZabbixRequestError(
"Zabbix returned error code {0}, {1}: {2}".format(
code, msg, data
)
)
return result['result']
@classmethod
def _zabbix_auth(cls, url, user, password):
method = 'user.authenticate'
params = {'user': user,
'password': password}
auth_hash = cls._make_zabbix_request(url, method, params)
return auth_hash
@classmethod
def _get_zabbix_hostid(cls, url, auth, name):
method = 'host.get'
params = {'filter': {'host': name}}
result = cls._make_zabbix_request(url, method, params, auth=auth)
if len(result) == 0:
logger.info("Host %s does not exist in zabbix, skipping", name)
return None
return result[0]['hostid']
@classmethod
def remove_from_zabbix(cls, zabbix, nodes):
url = zabbix['url']
user = zabbix['user']
password = zabbix['password']
auth = cls._zabbix_auth(url, user, password)
hostids = []
method = "host.delete"
for node in nodes:
name = node['slave_name']
hostid = cls._get_zabbix_hostid(url, auth, name)
if hostid:
hostids.append(hostid)
if hostids:
cls._make_zabbix_request(url, method, hostids, auth=auth)
@classmethod
def get_zabbix_node(cls, cluster):
zabbix_nodes = filter(
lambda node: filter(
lambda role: role.name == 'zabbix-server',
node.role_list
),
cluster.nodes
)
if not zabbix_nodes:
return None
return zabbix_nodes[0]
@classmethod
def get_zabbix_credentials(cls, cluster):
creds = {}
zabbix_node = cls.get_zabbix_node(cluster)
attributes = cluster.attributes
zabbix_attrs = attributes.editable['zabbix']
creds['user'] = zabbix_attrs['username']['value']
creds['password'] = zabbix_attrs['password']['value']
creds['url'] = "http://{0}/zabbix/api_jsonrpc.php".format(
zabbix_node.ip
)
return creds
| apache-2.0 | -6,961,527,353,915,873,000 | 30.198413 | 78 | 0.568303 | false |
lixiangning888/whole_project | modules/signatures_orginal_20151110/multiple_ua.py | 1 | 2186 | # Copyright (C) 2015 KillerInstinct
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Multiple_UA(Signature):
name = "multiple_useragents"
description = "Network activity contains more than one unique useragent."
severity = 3
categories = ["network"]
authors = ["KillerInstinct"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.useragents = list()
self.procs = list()
filter_apinames = set(["InternetOpenA", "InternetOpenW"])
def on_call(self, call, process):
# Dict whitelist with process name as key, and useragents as values
whitelist = {
"acrord32.exe": ["Mozilla/3.0 (compatible; Acrobat 5.0; Windows)"],
"iexplore.exe": ["VCSoapClient", "Shockwave Flash"],
}
ua = self.get_argument(call, "Agent")
proc = process["process_name"].lower()
if proc in whitelist.keys() and ua in whitelist[proc]:
return None
else:
if ua not in self.useragents:
if self.results["target"]["category"] == "file" or proc != "iexplore.exe":
self.useragents.append(ua)
self.procs.append((process["process_name"], ua))
def on_complete(self):
if len(self.useragents) < 2:
return False
for item in self.procs:
self.data.append({"Process" : item[0]})
self.data.append({"User-Agent" : item[1]})
return True
| lgpl-3.0 | 7,584,770,034,356,479,000 | 36.050847 | 90 | 0.63495 | false |
maclogan/VirtualPenPal | chatterbot/conversation/statement.py | 1 | 4801 | # -*- coding: utf-8 -*-
from .response import Response
from datetime import datetime
class Statement(object):
"""
A statement represents a single spoken entity, sentence or
phrase that someone can say.
"""
def __init__(self, text, **kwargs):
self.text = text
self.in_response_to = kwargs.pop('in_response_to', [])
# The date and time that this statement was created at
self.created_at = kwargs.pop('created_at', datetime.now())
self.extra_data = kwargs.pop('extra_data', {})
# This is the confidence with which the chat bot believes
# this is an accurate response. This value is set when the
# statement is returned by the chat bot.
self.confidence = 0
self.storage = None
def __str__(self):
return self.text
def __repr__(self):
return '<Statement text:%s>' % (self.text)
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Statement):
return self.text == other.text
return self.text == other
def save(self):
"""
Save the statement in the database.
"""
self.storage.update(self)
def add_extra_data(self, key, value):
"""
This method allows additional data to be stored on the statement object.
Typically this data is something that pertains just to this statement.
For example, a value stored here might be the tagged parts of speech for
each word in the statement text.
- key = 'pos_tags'
- value = [('Now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('different', 'JJ')]
:param key: The key to use in the dictionary of extra data.
:type key: str
:param value: The value to set for the specified key.
"""
self.extra_data[key] = value
def add_response(self, response):
"""
Add the response to the list of statements that this statement is in response to.
If the response is already in the list, increment the occurrence count of that response.
:param response: The response to add.
:type response: `Response`
"""
if not isinstance(response, Response):
raise Statement.InvalidTypeException(
'A {} was recieved when a {} instance was expected'.format(
type(response),
type(Response(''))
)
)
updated = False
for index in range(0, len(self.in_response_to)):
if response.text == self.in_response_to[index].text:
self.in_response_to[index].occurrence += 1
updated = True
if not updated:
self.in_response_to.append(response)
def remove_response(self, response_text):
"""
Removes a response from the statement's response list based
on the value of the response text.
:param response_text: The text of the response to be removed.
:type response_text: str
"""
for response in self.in_response_to:
if response_text == response.text:
self.in_response_to.remove(response)
return True
return False
def get_response_count(self, statement):
"""
Find the number of times that the statement has been used
as a response to the current statement.
:param statement: The statement object to get the count for.
:type statement: `Statement`
:returns: Return the number of times the statement has been used as a response.
:rtype: int
"""
for response in self.in_response_to:
if statement.text == response.text:
return response.occurrence
return 0
def serialize(self):
"""
:returns: A dictionary representation of the statement object.
:rtype: dict
"""
data = {}
data['text'] = self.text
data['in_response_to'] = []
data['created_at'] = self.created_at
data['extra_data'] = self.extra_data
for response in self.in_response_to:
data['in_response_to'].append(response.serialize())
return data
@property
def response_statement_cache(self):
"""
This property is to allow ChatterBot Statement objects to
be swappable with Django Statement models.
"""
return self.in_response_to
class InvalidTypeException(Exception):
def __init__(self, value='Recieved an unexpected value type.'):
self.value = value
def __str__(self):
return repr(self.value)
| bsd-3-clause | 6,238,078,572,523,169,000 | 29.579618 | 96 | 0.580712 | false |
mozman/ezdxf | examples/render/render_ellipse.py | 1 | 1255 | # Copyright (c) 2018-2019, Manfred Moitzi
# License: MIT License
from math import radians
import ezdxf
from ezdxf.render.forms import ellipse
from ezdxf.math import Matrix44
NAME = 'ellipse.dxf'
doc = ezdxf.new('R12', setup=True)
msp = doc.modelspace()
def render(points):
msp.add_polyline2d(list(points))
def tmatrix(x, y, angle):
return Matrix44.chain(
Matrix44.z_rotate(radians(angle)),
Matrix44.translate(x, y, 0),
)
for axis in [0.5, 0.75, 1., 1.5, 2., 3.]:
render(ellipse(200, rx=5., ry=axis))
attribs = {
'color': 1,
'linetype': 'DASHDOT',
}
msp.add_line((-7, 0), (+7, 0), dxfattribs=attribs)
msp.add_line((0, -5), (0, +5), dxfattribs=attribs)
for rotation in [0, 30, 45, 60, 90]:
m = tmatrix(20, 0, rotation)
render(m.transform_vertices(ellipse(100, rx=5., ry=2.)))
for startangle in [0, 30, 45, 60, 90]:
m = tmatrix(40, 0, startangle)
render(m.transform_vertices(
ellipse(90, rx=5., ry=2., start_param=radians(startangle), end_param= radians(startangle+90)))
)
render(m.transform_vertices(
ellipse(90, rx=5., ry=2., start_param=radians(startangle+180), end_param= radians(startangle+270)))
)
doc.saveas(NAME)
print("drawing '%s' created.\n" % NAME)
| mit | -3,324,645,544,781,303,000 | 24.612245 | 107 | 0.641434 | false |
tochev/obshtestvo.bg | projects/admin.py | 1 | 20632 | from django.contrib import admin
from django.contrib.contenttypes.generic import GenericTabularInline
from django.forms import ModelForm
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from suit.widgets import *
from pagedown.widgets import AdminPagedownWidget
from .models import *
from django import forms
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
import reversion
from suit.admin import SortableTabularInline, SortableModelAdmin
from django.db import models
from django.templatetags.static import static
from django.utils.html import urlize, format_html
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
from django.core.exceptions import ValidationError
from django.contrib.admin.options import IncorrectLookupParameters
from guardian.models import UserObjectPermission, GroupObjectPermission
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
@receiver(post_save, sender=User)
def user_perobject_permissions(sender, instance, created, **kwargs):
if created:
assign_perm('change_user', instance, instance)
@receiver(pre_delete, sender=User)
def remove_user_perobject_permissions(sender, instance, **kwargs):
UserObjectPermission.objects.filter(user_id=instance.pk).delete()
def prepare_lookup_value(key, value):
if key.endswith('__in') and type(value) == 'str':
value = value.split(',')
if key.endswith('__isnull'):
value = not (value.lower() in ('', 'false', '0'))
return value
class MultipleFilter(admin.RelatedFieldListFilter):
# title = _('skills')
# parameter_name = 'skills'
template = 'admin/filter_multiple.html'
def __init__(self, field, request, params, model, model_admin, field_path):
super(MultipleFilter, self).__init__(
field, request, params, model, model_admin, field_path)
self.lookup_val = request.GET.getlist(self.lookup_kwarg, None)
self.used_parameters = {}
for p in self.expected_parameters():
if p in request.GET:
value = request.GET.getlist(p) if self.lookup_kwarg == p else request.GET.get(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def queryset(self, request, queryset):
try:
if self.lookup_kwarg in self.used_parameters:
for lookup in self.used_parameters[self.lookup_kwarg]:
value = {self.lookup_kwarg: lookup}
queryset = queryset.filter(**value)
else:
queryset.filter(**self.used_parameters)
return queryset
except ValidationError as e:
raise IncorrectLookupParameters(e)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': smart_text(pk_val) in self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
def close_link(instance):
if not instance.id:
return ''
url = reverse('admin:%s_%s_change' % (
instance._meta.app_label, instance._meta.module_name), args=[instance.id] ) + 'tools/' + 'toolfunc'
return mark_safe(u'<a href="{u}">Close</a>'.format(u=url))
def avatar(obj):
if (obj.facebook):
url = u'http://graph.facebook.com/%s/picture?width=40&height=40' % obj.facebook.split('=' if 'profile.php' in obj.facebook else '/')[-1]
else:
url = static('img/user-silhouette.png')
return mark_safe(u'<img width="40" height="40" src="%s" />' % url)
# from guardian.admin import GuardedModelAdmin
class UserActivityInline(admin.TabularInline):
model = UserActivity
suit_classes = 'suit-tab suit-tab-activities'
extra = 1
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
class MyUserAdmin(UserAdmin):
form = MyUserChangeForm
inlines = (UserActivityInline,)
add_form = MyUserCreationForm
suit_form_tabs = (
('system', 'System'),
('common', 'Common'),
('activities', 'Activities'),
)
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-system',),
'fields': ('username', 'password')}
),
(_('Personal info'), {
'classes': ('suit-tab suit-tab-system',),
'fields': ('first_name', 'last_name', 'email')}
),
(_('Permissions'), {
'classes': ('suit-tab suit-tab-system',),
'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')
}),
(_('Important dates'), {
'classes': ('suit-tab suit-tab-system',),
'fields': ('last_login', 'date_joined')}
),
(_('Custom'), {
'classes': ('suit-tab suit-tab-common',),
'fields': ('profession','is_available','available_after','bio', 'avatar')}
),
)
class ProjectActivityFrom(ModelForm):
class Meta:
widgets = {
'can_accomodate': EnclosedInput(append='icon-user', attrs={'class': 'input-mini'}),
}
class ProjectActivityInline(SortableTabularInline):
model = ProjectActivity
suit_classes = 'suit-tab suit-tab-activities'
sortable = 'order'
extra = 0
form = ProjectActivityFrom
def advanced(self, instance):
if not instance.id:
return ''
url = reverse('admin:%s_%s_change' % (
instance._meta.app_label, instance._meta.module_name), args=[instance.id] )
return mark_safe(u'<a href="{u}">Edit</a>'.format(u=url) + ' ' + close_link(instance))
readonly_fields = ('advanced',)
class TaskInline(SortableTabularInline):
model = Task
suit_classes = 'suit-tab suit-tab-tasks'
sortable = 'order'
extra = 0
class ProjectMotiveInline(SortableTabularInline):
model = ProjectMotive
suit_classes = 'suit-tab suit-tab-motives'
sortable = 'order'
extra = 0
class ProjectMilestoneFrom(ModelForm):
class Meta:
widgets = {
# 'percent': RangeInput(append='%', attrs={"min":1, "max":100}),
'percent': EnclosedInput(append='%', attrs={'class': 'input-mini'}),
'target_date': forms.TextInput(attrs={'class': 'input-mini'}),
}
class ProjectMilestoneInline(SortableTabularInline):
form = ProjectMilestoneFrom
model = ProjectMilestone
suit_classes = 'suit-tab suit-tab-milestones'
sortable = 'order'
extra = 0
class ProjectUsageExampleStepForm(ModelForm):
class Meta:
widgets = {
# 'percent': RangeInput(append='%', attrs={"min":1, "max":100}),
'example_number': EnclosedInput(attrs={'class': 'input-mini'}),
'icon':EnclosedInput(append='icon-heart', attrs={'class': 'input-mini'}),
}
class ProjectUsageExampleStepInline(SortableTabularInline):
model = ProjectUsageExampleStep
suit_classes = 'suit-tab suit-tab-usage-examples'
sortable = 'order'
extra = 0
form = ProjectUsageExampleStepForm
class RangeInput(EnclosedInput):
"""HTML5 Range Input."""
input_type = 'range'
class ProjectAdminForm(ModelForm):
class Meta:
widgets = {
'url': EnclosedInput(prepend='icon-globe'),
'pm_url': EnclosedInput(prepend='icon-globe'),
'facebook_group': EnclosedInput(prepend='icon-globe'),
'github_repo': EnclosedInput(prepend='icon-globe'),
'strategy': AdminPagedownWidget(),
'description': AdminPagedownWidget()
}
class ProjectAdmin(reversion.VersionAdmin, SortableModelAdmin):
list_display = ('name',)
sortable = 'order'
form = ProjectAdminForm
search_fields = ['name']
list_filter = ['is_featured']
prepopulated_fields = {"slug": ("name",)}
inlines = [
ProjectActivityInline,
ProjectMotiveInline,
ProjectUsageExampleStepInline,
ProjectMilestoneInline,
]
suit_form_tabs = (
('general', 'General'),
('strategy', 'Strategy'),
('description', 'Description'),
# ('advanced', 'Advanced Settings'),
('activities', 'Activities'),
('milestones', 'Milestones'),
('motives', 'Motives'),
('usage-examples', 'Usage examples steps'),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':2, 'cols':50})},
}
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'slug', 'url', 'short_description', 'is_forced_active','is_public','has_static_page',)
}),
('Management', {
'classes': ('suit-tab suit-tab-general',),
'fields': ('pm_url', 'facebook_group', 'github_repo',)
}),
('Media', {
'classes': ('suit-tab suit-tab-general',),
'fields': (
'logo',
'logo_styled',
'logo_thumb',
'cover_image',
'complimenting_color',
)
}),
('Homepage', {
'classes': ('suit-tab suit-tab-general',),
'fields': ('is_featured',)
}),
(None, {
'classes': ('suit-tab suit-tab-strategy',),
'fields': ('strategy',)
}),
(None, {
'classes': ('suit-tab suit-tab-description',),
'fields': (
'description',
)}
),
)
class SkillGroupAdmin(SortableModelAdmin):
list_display = ('name',)
sortable = 'order'
class SkillAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ['name']
class ProjectActivityAdminBase(admin.ModelAdmin):
inlines = (UserActivityInline, TaskInline)
def tools(self, instance):
return close_link(instance)
list_display = ('name', 'project', 'tools')
def toolfunc(self, request, obj):
pass
toolfunc.label = "Close" # optional
toolfunc.short_description = "This will be the tooltip of the button" # optional
hobjectactions = ('toolfunc', )
class ProjectActivityAdmin(ProjectActivityAdminBase, reversion.VersionAdmin):
suit_form_tabs = (
('general', 'General'),
('tasks', 'Tasks'),
('activities', 'User activities'),
)
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'project',)
}),
('Settings', {
'classes': ('suit-tab suit-tab-general',),
'fields': ('is_organisational', 'is_template', 'can_accomodate', )
}),
)
# template, prepopulated forms:
# http://stackoverflow.com/questions/2223375/multiple-modeladmins-views-for-same-model-in-django-admin
# http://stackoverflow.com/questions/936376/prepopulate-django-non-model-form
class ProjectActivityTemplate(ProjectActivity):
class Meta:
proxy = True
class ProjectActivityTemplateForm(forms.ModelForm):
class Meta:
model = ProjectActivityTemplate
is_template = forms.BooleanField(widget=forms.HiddenInput(), initial=1)
order = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class ProjectActivityTemplateAdmin(ProjectActivityAdminBase):
form = ProjectActivityTemplateForm
inlines = []
fields = ('name', 'is_organisational', 'can_accomodate', 'order', 'is_template',)
def queryset(self, request):
return self.model.objects.filter(is_template=True)
class MemberAdminFrom(forms.ModelForm):
class Meta:
widgets = {
'facebook':EnclosedInput(prepend='icon-share'),
'email':EnclosedInput(prepend='icon-envelope'),
# 'types': autocomplete_light.MultipleChoiceWidget(autocomplete='MemberTypeAutocomplete'),
# 'skills': autocomplete_light.MultipleChoiceWidget(autocomplete='SkillAutocomplete'),
# 'projects_interests': autocomplete_light.MultipleChoiceWidget(autocomplete='ProjectAutocomplete'),
}
class MemberAdmin(admin.ModelAdmin):
model = Member
form = MemberAdminFrom
ordering = ('name',)
search_fields = ['name']
list_filter = ('projects_interests', ('skills', MultipleFilter),'types', 'last_contacted_at')
list_display = (avatar, 'name', 'facebook_as_link', 'email', 'skills_display')
suit_form_tabs = (
('general', _('General')),
('specifics', _('Specifics')),
# ('integration', _('System')),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':2, 'cols':50})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
def skills_display(self, member):
return ', '.join([obj.name for obj in member.skills.all()])
skills_display.short_description = _('skills')
def facebook_as_link(self, obj):
return format_html(urlize(obj.facebook))
facebook_as_link.short_description = 'Facebook'
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'facebook', 'email', 'date_joined',)
}),
(_('Expectations'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ( 'availability', 'available_after', )
}),
(_("Member's preferences"), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('skills', 'types', 'projects_interests','offered_help', )
}),
(_('Self-description & Comments'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('intro', 'comment')
}),
(_('Communication'), {
'classes': ('suit-tab suit-tab-specifics',),
'fields': ('last_contacted_at', 'latest_answer', 'contact_frequency', )
}),
# ('User', {
# 'classes': ('suit-tab suit-tab-integration',),
# 'fields': ('user', 'update_from_user')
# }),
)
# from guardian.admin import GuardedModelAdmin
class UpdateInline(GenericTabularInline):
model = Update
suit_classes = 'suit-tab suit-tab-updates'
extra = 0
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':1, 'cols':100})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
class OrganisationAdmin(admin.ModelAdmin):
model = Organisation
inlines = (UpdateInline,)
list_filter = ('middlemen',('types', MultipleFilter))
list_display = ('name','representatives', 'types_display')
search_fields = ['name']
suit_form_tabs = (
('general', _('General')),
('updates', _('Updates')),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':3, 'cols':70})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
def types_display(self, org):
return ', '.join([obj.name for obj in org.types.all()])
types_display.short_description = _('relation type')
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'types','strategy')
}),
(_('Contact'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('middlemen', 'representatives', 'contact', )
}),
(_('About'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('comment', 'found_via', 'working_with', )
}),
(_('Partner'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ('partnered_project', 'provided_help',)
}),
)
class SponsorOrg(Organisation):
class Meta:
proxy = True
verbose_name = Organisation._meta.verbose_name
verbose_name_plural = Organisation._meta.verbose_name
class SponsorOrgAdmin(OrganisationAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(types__id=2)
class PartnerOrg(Organisation):
class Meta:
proxy = True
verbose_name = Organisation._meta.verbose_name
verbose_name_plural = Organisation._meta.verbose_name
class PartnerOrgAdmin(OrganisationAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.exclude(partnered_project=None)
class AvailableMember(Member):
class Meta:
proxy = True
verbose_name = Member._meta.verbose_name
verbose_name_plural = Member._meta.verbose_name
class AvailableMemberAdmin(MemberAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(availability=Member.AVAILABLE)
class PaidMember(Member):
class Meta:
proxy = True
verbose_name = Member._meta.verbose_name
verbose_name_plural = Member._meta.verbose_name
class PaidMemberAdmin(MemberAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(availability=Member.ONLY_PAID)
class ReaderMember(Member):
class Meta:
proxy = True
verbose_name = Member._meta.verbose_name
verbose_name_plural = Member._meta.verbose_name
class ReaderMemberAdmin(MemberAdmin):
def has_add_permission(self, request):
return False
def queryset(self, request):
return self.model.objects.filter(availability=Member.ONLY_READER)
class EventAdmin(admin.ModelAdmin):
model = Event
ordering = ('name',)
search_fields = ['name']
list_filter = ('date', ('organizers', MultipleFilter))
list_display = ('name', 'date')
suit_form_tabs = (
('general', _('General')),
# ('integration', _('System')),
)
formfield_overrides = {
models.TextField: {'widget': AutosizedTextarea(attrs={'rows':2, 'cols':60})},
models.DateTimeField: {'widget': SuitSplitDateTimeWidget},
models.DateField: {'widget': SuitDateWidget},
}
fieldsets = (
(None, {
'classes': ('suit-tab suit-tab-general',),
'fields': ('name', 'date', 'contact')
}),
(_('details'), {
'classes': ('suit-tab suit-tab-general',),
'fields': ( 'strategy', 'organizers', 'comment')
}),
)
admin.site.register(Organisation, OrganisationAdmin)
admin.site.register(SponsorOrg, SponsorOrgAdmin)
admin.site.register(PartnerOrg, PartnerOrgAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Member, MemberAdmin)
admin.site.register(ReaderMember, ReaderMemberAdmin)
admin.site.register(AvailableMember, AvailableMemberAdmin)
admin.site.register(PaidMember, PaidMemberAdmin)
admin.site.register(OrganisationType)
admin.site.register(MemberType)
admin.site.register(Skill, SkillAdmin)
admin.site.register(SkillGroup, SkillGroupAdmin)
admin.site.register(UserProjectPause)
admin.site.register(ProjectActivity, ProjectActivityAdmin)
admin.site.register(ProjectActivityTemplate, ProjectActivityTemplateAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(User, MyUserAdmin)
admin.site.register(UserActivity)
| unlicense | -5,973,505,790,476,279,000 | 33.61745 | 148 | 0.612253 | false |
MarkWh1te/xueqiu_predict | crawler/stock.py | 1 | 2487 | from flask import Flask
from flask import render_template, request
from utils import Base,engine
from sqlalchemy.orm import scoped_session, sessionmaker
from models import Stock, StockDetail
from flask_bootstrap import Bootstrap
from flask import Blueprint
from flask_paginate import Pagination,get_page_args
from sqlalchemy import desc
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
app = create_app()
# @app.route('/')
# def hello_world():
# return 'Hello, World!'
@app.route('/detail/<stock_id>')
def detail(stock_id):
print(stock_id)
page = request.args.get('page', type=int, default=1)
per_page = 15
if per_page:
stocks = StockDetail.query.filter(StockDetail.stock_id == stock_id).\
order_by(desc(StockDetail.create_time)).limit(per_page)
if page:
stocks = stocks.offset((page-1)*per_page)
pagination = Pagination(page=page,
per_page=per_page,
# total=stocks.count(),
total = StockDetail.query.filter(StockDetail.stock_id == stock_id).count(),
record_name='record',
format_total=True,
format_number=True,
css_framework="bootstrap3"
)
return render_template('detail.html',
stocks=stocks,
page=page,
per_page=per_page,
pagination=pagination)
@app.route('/')
def index():
# stocks = Stock.query.all()
page = request.args.get('page', type=int, default=1)
per_page = 15
if per_page:
stocks = Stock.query.limit(per_page)
if page:
stocks = stocks.offset((page-1)*per_page)
pagination = Pagination(page=page,
per_page=per_page,
total=Stock.query.count(),
record_name='stocks',
format_total=True,
format_number=True,
css_framework="bootstrap3"
)
return render_template('index.html',
stocks=stocks,
page=page,
per_page=per_page,
pagination=pagination)
if __name__ == "__main__":
app.run(host='0.0.0.0')
| mit | -7,122,884,169,626,361,000 | 32.608108 | 103 | 0.51347 | false |
jsilter/scipy | scipy/linalg/special_matrices.py | 1 | 27627 | from __future__ import division, print_function, absolute_import
import math
import numpy as np
from scipy.lib.six import xrange
from scipy.lib.six import string_types
__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
'hadamard', 'leslie', 'all_mat', 'kron', 'block_diag', 'companion',
'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft']
#-----------------------------------------------------------------------------
# matrix construction functions
#-----------------------------------------------------------------------------
#
# *Note*: tri{,u,l} is implemented in numpy, but an important bug was fixed in
# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards
# compatibility.
def tri(N, M=None, k=0, dtype=None):
"""
Construct (N, M) matrix filled with ones at and below the k-th diagonal.
The matrix has A[i,j] == 1 for i <= j + k
Parameters
----------
N : integer
The size of the first dimension of the matrix.
M : integer or None
The size of the second dimension of the matrix. If `M` is None,
`M = N` is assumed.
k : integer
Number of subdiagonal below which matrix is filled with ones.
`k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0
superdiagonal.
dtype : dtype
Data type of the matrix.
Returns
-------
tri : (N, M) ndarray
Tri matrix.
Examples
--------
>>> from scipy.linalg import tri
>>> tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> tri(3, 5, -1, dtype=int)
array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0]])
"""
if M is None:
M = N
if isinstance(M, string_types):
#pearu: any objections to remove this feature?
# As tri(N,'d') is equivalent to tri(N,dtype='d')
dtype = M
M = N
m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)), -k)
if dtype is None:
return m
else:
return m.astype(dtype)
def tril(m, k=0):
"""
Make a copy of a matrix with elements above the k-th diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : integer
Diagonal above which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
tril : ndarray
Return is the same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import tril
>>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = np.asarray(m)
out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m
return out
def triu(m, k=0):
"""
Make a copy of a matrix with elements below the k-th diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : int, optional
Diagonal below which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
triu : ndarray
Return matrix with zeroed elements below the k-th diagonal and has
same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import triu
>>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = np.asarray(m)
out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m
return out
def toeplitz(c, r=None):
"""
Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like
First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
in this case, if c[0] is real, the result is a Hermitian matrix.
r[0] is ignored; the first row of the returned matrix is
``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See also
--------
circulant : circulant matrix
hankel : Hankel matrix
Notes
-----
The behavior when `c` or `r` is a scalar, or when `c` is complex and
`r` is None, was changed in version 0.8.0. The behavior in previous
versions was undocumented and is no longer supported.
Examples
--------
>>> from scipy.linalg import toeplitz
>>> toeplitz([1,2,3], [1,4,5,6])
array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
>>> toeplitz([1.0, 2+3j, 4-1j])
array([[ 1.+0.j, 2.-3.j, 4.+1.j],
[ 2.+3.j, 1.+0.j, 2.-3.j],
[ 4.-1.j, 2.+3.j, 1.+0.j]])
"""
c = np.asarray(c).ravel()
if r is None:
r = c.conjugate()
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
a, b = np.ogrid[0:len(c), len(r) - 1:-1:-1]
indx = a + b
# `indx` is a 2D array of indices into the 1D array `vals`, arranged so
# that `vals[indx]` is the Toeplitz matrix.
return vals[indx]
def circulant(c):
"""
Construct a circulant matrix.
Parameters
----------
c : (N,) array_like
1-D array, the first column of the matrix.
Returns
-------
A : (N, N) ndarray
A circulant matrix whose first column is `c`.
See also
--------
toeplitz : Toeplitz matrix
hankel : Hankel matrix
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import circulant
>>> circulant([1, 2, 3])
array([[1, 3, 2],
[2, 1, 3],
[3, 2, 1]])
"""
c = np.asarray(c).ravel()
a, b = np.ogrid[0:len(c), 0:-len(c):-1]
indx = a + b
# `indx` is a 2D array of indices into `c`, arranged so that `c[indx]` is
# the circulant matrix.
return c[indx]
def hankel(c, r=None):
"""
Construct a Hankel matrix.
The Hankel matrix has constant anti-diagonals, with `c` as its
first column and `r` as its last row. If `r` is not given, then
`r = zeros_like(c)` is assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like
Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed.
r[0] is ignored; the last row of the returned matrix is
``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See also
--------
toeplitz : Toeplitz matrix
circulant : circulant matrix
Examples
--------
>>> from scipy.linalg import hankel
>>> hankel([1, 17, 99])
array([[ 1, 17, 99],
[17, 99, 0],
[99, 0, 0]])
>>> hankel([1,2,3,4], [4,7,7,8,9])
array([[1, 2, 3, 4, 7],
[2, 3, 4, 7, 7],
[3, 4, 7, 7, 8],
[4, 7, 7, 8, 9]])
"""
c = np.asarray(c).ravel()
if r is None:
r = np.zeros_like(c)
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing `c`
# followed by r[1:].
vals = np.concatenate((c, r[1:]))
a, b = np.ogrid[0:len(c), 0:len(r)]
indx = a + b
# `indx` is a 2D array of indices into the 1D array `vals`, arranged so
# that `vals[indx]` is the Hankel matrix.
return vals[indx]
def hadamard(n, dtype=int):
"""
Construct a Hadamard matrix.
Constructs an n-by-n Hadamard matrix, using Sylvester's
construction. `n` must be a power of 2.
Parameters
----------
n : int
The order of the matrix. `n` must be a power of 2.
dtype : numpy dtype
The data type of the array to be constructed.
Returns
-------
H : (n, n) ndarray
The Hadamard matrix.
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import hadamard
>>> hadamard(2, dtype=complex)
array([[ 1.+0.j, 1.+0.j],
[ 1.+0.j, -1.-0.j]])
>>> hadamard(4)
array([[ 1, 1, 1, 1],
[ 1, -1, 1, -1],
[ 1, 1, -1, -1],
[ 1, -1, -1, 1]])
"""
# This function is a slightly modified version of the
# function contributed by Ivo in ticket #675.
if n < 1:
lg2 = 0
else:
lg2 = int(math.log(n, 2))
if 2 ** lg2 != n:
raise ValueError("n must be an positive integer, and n must be "
"a power of 2")
H = np.array([[1]], dtype=dtype)
# Sylvester's construction
for i in range(0, lg2):
H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
return H
def leslie(f, s):
"""
Create a Leslie matrix.
Given the length n array of fecundity coefficients `f` and the length
n-1 array of survival coefficents `s`, return the associated Leslie matrix.
Parameters
----------
f : (N,) array_like
The "fecundity" coefficients.
s : (N-1,) array_like
The "survival" coefficients, has to be 1-D. The length of `s`
must be one less than the length of `f`, and it must be at least 1.
Returns
-------
L : (N, N) ndarray
The array is zero except for the first row,
which is `f`, and the first sub-diagonal, which is `s`.
The data-type of the array will be the data-type of ``f[0]+s[0]``.
Notes
-----
.. versionadded:: 0.8.0
The Leslie matrix is used to model discrete-time, age-structured
population growth [1]_ [2]_. In a population with `n` age classes, two sets
of parameters define a Leslie matrix: the `n` "fecundity coefficients",
which give the number of offspring per-capita produced by each age
class, and the `n` - 1 "survival coefficients", which give the
per-capita survival rate of each age class.
References
----------
.. [1] P. H. Leslie, On the use of matrices in certain population
mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945)
.. [2] P. H. Leslie, Some further notes on the use of matrices in
population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245
(Dec. 1948)
Examples
--------
>>> from scipy.linalg import leslie
>>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7])
array([[ 0.1, 2. , 1. , 0.1],
[ 0.2, 0. , 0. , 0. ],
[ 0. , 0.8, 0. , 0. ],
[ 0. , 0. , 0.7, 0. ]])
"""
f = np.atleast_1d(f)
s = np.atleast_1d(s)
if f.ndim != 1:
raise ValueError("Incorrect shape for f. f must be one-dimensional")
if s.ndim != 1:
raise ValueError("Incorrect shape for s. s must be one-dimensional")
if f.size != s.size + 1:
raise ValueError("Incorrect lengths for f and s. The length"
" of s must be one less than the length of f.")
if s.size == 0:
raise ValueError("The length of s must be at least 1.")
tmp = f[0] + s[0]
n = f.size
a = np.zeros((n, n), dtype=tmp.dtype)
a[0] = f
a[list(range(1, n)), list(range(0, n - 1))] = s
return a
@np.deprecate
def all_mat(*args):
return list(map(np.matrix, args))
def kron(a, b):
"""
Kronecker product.
The result is the block matrix::
a[0,0]*b a[0,1]*b ... a[0,-1]*b
a[1,0]*b a[1,1]*b ... a[1,-1]*b
...
a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b
Parameters
----------
a : (M, N) ndarray
Input array
b : (P, Q) ndarray
Input array
Returns
-------
A : (M*P, N*Q) ndarray
Kronecker product of `a` and `b`.
Examples
--------
>>> from numpy import array
>>> from scipy.linalg import kron
>>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
"""
if not a.flags['CONTIGUOUS']:
a = np.reshape(a, a.shape)
if not b.flags['CONTIGUOUS']:
b = np.reshape(b, b.shape)
o = np.outer(a, b)
o = o.reshape(a.shape + b.shape)
return np.concatenate(np.concatenate(o, axis=1), axis=1)
def block_diag(*arrs):
"""
Create a block diagonal matrix from provided arrays.
Given the inputs `A`, `B` and `C`, the output will have these
arrays arranged on the diagonal::
[[A, 0, 0],
[0, B, 0],
[0, 0, C]]
Parameters
----------
A, B, C, ... : array_like, up to 2-D
Input arrays. A 1-D array or array_like sequence of length `n`is
treated as a 2-D array with shape ``(1,n)``.
Returns
-------
D : ndarray
Array with `A`, `B`, `C`, ... on the diagonal. `D` has the
same dtype as `A`.
Notes
-----
If all the input arrays are square, the output is known as a
block diagonal matrix.
Examples
--------
>>> from scipy.linalg import block_diag
>>> A = [[1, 0],
... [0, 1]]
>>> B = [[3, 4, 5],
... [6, 7, 8]]
>>> C = [[7]]
>>> block_diag(A, B, C)
[[1 0 0 0 0 0]
[0 1 0 0 0 0]
[0 0 3 4 5 0]
[0 0 6 7 8 0]
[0 0 0 0 0 7]]
>>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]])
array([[ 1., 0., 0., 0., 0.],
[ 0., 2., 3., 0., 0.],
[ 0., 0., 0., 4., 5.],
[ 0., 0., 0., 6., 7.]])
"""
if arrs == ():
arrs = ([],)
arrs = [np.atleast_2d(a) for a in arrs]
bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
if bad_args:
raise ValueError("arguments in the following positions have dimension "
"greater than 2: %s" % bad_args)
shapes = np.array([a.shape for a in arrs])
out = np.zeros(np.sum(shapes, axis=0), dtype=arrs[0].dtype)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs[i]
r += rr
c += cc
return out
def companion(a):
"""
Create a companion matrix.
Create the companion matrix [1]_ associated with the polynomial whose
coefficients are given in `a`.
Parameters
----------
a : (N,) array_like
1-D array of polynomial coefficients. The length of `a` must be
at least two, and ``a[0]`` must not be zero.
Returns
-------
c : (N-1, N-1) ndarray
The first row of `c` is ``-a[1:]/a[0]``, and the first
sub-diagonal is all ones. The data-type of the array is the same
as the data-type of ``1.0*a[0]``.
Raises
------
ValueError
If any of the following are true: a) ``a.ndim != 1``;
b) ``a.size < 2``; c) ``a[0] == 0``.
Notes
-----
.. versionadded:: 0.8.0
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> from scipy.linalg import companion
>>> companion([1, -10, 31, -30])
array([[ 10., -31., 30.],
[ 1., 0., 0.],
[ 0., 1., 0.]])
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Incorrect shape for `a`. `a` must be "
"one-dimensional.")
if a.size < 2:
raise ValueError("The length of `a` must be at least 2.")
if a[0] == 0:
raise ValueError("The first coefficient in `a` must not be zero.")
first_row = -a[1:] / (1.0 * a[0])
n = a.size
c = np.zeros((n - 1, n - 1), dtype=first_row.dtype)
c[0] = first_row
c[list(range(1, n - 1)), list(range(0, n - 2))] = 1
return c
def hilbert(n):
"""
Create a Hilbert matrix of order `n`.
Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
Parameters
----------
n : int
The size of the array to create.
Returns
-------
h : (n, n) ndarray
The Hilbert matrix.
See Also
--------
invhilbert : Compute the inverse of a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import hilbert
>>> hilbert(3)
array([[ 1. , 0.5 , 0.33333333],
[ 0.5 , 0.33333333, 0.25 ],
[ 0.33333333, 0.25 , 0.2 ]])
"""
values = 1.0 / (1.0 + np.arange(2 * n - 1))
h = hankel(values[:n], r=values[n - 1:])
return h
def invhilbert(n, exact=False):
"""
Compute the inverse of the Hilbert matrix of order `n`.
The entries in the inverse of a Hilbert matrix are integers. When `n`
is greater than 14, some entries in the inverse exceed the upper limit
of 64 bit integers. The `exact` argument provides two options for
dealing with these large integers.
Parameters
----------
n : int
The order of the Hilbert matrix.
exact : bool
If False, the data type of the array that is returned is np.float64,
and the array is an approximation of the inverse.
If True, the array is the exact integer inverse array. To represent
the exact inverse when n > 14, the returned array is an object array
of long integers. For n <= 14, the exact inverse is returned as an
array with data type np.int64.
Returns
-------
invh : (n, n) ndarray
The data type of the array is np.float64 if `exact` is False.
If `exact` is True, the data type is either np.int64 (for n <= 14)
or object (for n > 14). In the latter case, the objects in the
array will be long integers.
See Also
--------
hilbert : Create a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import invhilbert
>>> invhilbert(4)
array([[ 16., -120., 240., -140.],
[ -120., 1200., -2700., 1680.],
[ 240., -2700., 6480., -4200.],
[ -140., 1680., -4200., 2800.]])
>>> invhilbert(4, exact=True)
array([[ 16, -120, 240, -140],
[ -120, 1200, -2700, 1680],
[ 240, -2700, 6480, -4200],
[ -140, 1680, -4200, 2800]], dtype=int64)
>>> invhilbert(16)[7,7]
4.2475099528537506e+19
>>> invhilbert(16, exact=True)[7,7]
42475099528537378560L
"""
from scipy.special import comb
if exact:
if n > 14:
dtype = object
else:
dtype = np.int64
else:
dtype = np.float64
invh = np.empty((n, n), dtype=dtype)
for i in xrange(n):
for j in xrange(0, i + 1):
s = i + j
invh[i, j] = ((-1) ** s * (s + 1) *
comb(n + i, n - j - 1, exact) *
comb(n + j, n - i - 1, exact) *
comb(s, i, exact) ** 2)
if i != j:
invh[j, i] = invh[i, j]
return invh
def pascal(n, kind='symmetric', exact=True):
"""
Returns the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
numpy.uint64 (if n < 35) or an object array of Python long integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and the values in the array will not be the exact
coefficients, but this version is much faster than `exact=True`.
Returns
-------
p : (n, n) ndarray
The Pascal matrix.
See Also
--------
invpascal
Notes
-----
See http://en.wikipedia.org/wiki/Pascal_matrix for more information
about Pascal matrices.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.linalg import pascal
>>> pascal(4)
array([[ 1, 1, 1, 1],
[ 1, 2, 3, 4],
[ 1, 3, 6, 10],
[ 1, 4, 10, 20]], dtype=uint64)
>>> pascal(4, kind='lower')
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]], dtype=uint64)
>>> pascal(50)[-1, -1]
25477612258980856902730428600L
>>> from scipy.special import comb
>>> comb(98, 49, exact=True)
25477612258980856902730428600L
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("kind must be 'symmetric', 'lower', or 'upper'")
if exact:
if n >= 35:
L_n = np.empty((n, n), dtype=object)
L_n.fill(0)
else:
L_n = np.zeros((n, n), dtype=np.uint64)
for i in range(n):
for j in range(i + 1):
L_n[i, j] = comb(i, j, exact=True)
else:
L_n = comb(*np.ogrid[:n, :n])
if kind is 'lower':
p = L_n
elif kind is 'upper':
p = L_n.T
else:
p = np.dot(L_n, L_n.T)
return p
def invpascal(n, kind='symmetric', exact=True):
"""
Returns the inverse of the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
`numpy.int64` (if `n` <= 35) or an object array of Python integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and for large `n`, the values in the array will not be the
exact coefficients.
Returns
-------
invp : (n, n) ndarray
The inverse of the Pascal matrix.
See Also
--------
pascal
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] "Pascal matrix", http://en.wikipedia.org/wiki/Pascal_matrix
.. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical
Gazette, 59(408), pp. 111-112, 1975.
Examples
--------
>>> from scipy.linalg import invpascal, pascal
>>> invp = invpascal(5)
>>> invp
array([[ 5, -10, 10, -5, 1],
[-10, 30, -35, 19, -4],
[ 10, -35, 46, -27, 6],
[ -5, 19, -27, 17, -4],
[ 1, -4, 6, -4, 1]])
>>> p = pascal(5)
>>> p.dot(invp)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
An example of the use of `kind` and `exact`:
>>> invpascal(5, kind='lower', exact=False)
array([[ 1., -0., 0., -0., 0.],
[-1., 1., -0., 0., -0.],
[ 1., -2., 1., -0., 0.],
[-1., 3., -3., 1., -0.],
[ 1., -4., 6., -4., 1.]])
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.")
if kind == 'symmetric':
if exact:
if n > 34:
dt = object
else:
dt = np.int64
else:
dt = np.float64
invp = np.empty((n, n), dtype=dt)
for i in range(n):
for j in range(0, i + 1):
v = 0
for k in range(n - i):
v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j,
exact=exact)
invp[i, j] = (-1)**(i - j) * v
if i != j:
invp[j, i] = invp[i, j]
else:
# For the 'lower' and 'upper' cases, we computer the inverse by
# changing the sign of every other diagonal of the pascal matrix.
invp = pascal(n, kind=kind, exact=exact)
if invp.dtype == np.uint64:
# This cast from np.uint64 to int64 OK, because if `kind` is not
# "symmetric", the values in invp are all much less than 2**63.
invp = invp.view(np.int64)
# The toeplitz matrix has alternating bands of 1 and -1.
invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype)
return invp
def dft(n, scale=None):
"""
Discrete Fourier transform matrix.
Create the matrix that computes the discrete Fourier transform of a
sequence [1]_. The n-th primitive root of unity used to generate the
matrix is exp(-2*pi*i/n), where i = sqrt(-1).
Parameters
----------
n : int
Size the matrix to create.
scale : str, optional
Must be None, 'sqrtn', or 'n'.
If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
If `scale` is 'n', the matrix is divided by `n`.
If `scale` is None (the default), the matrix is not normalized, and the
return value is simply the Vandermonde matrix of the roots of unity.
Returns
-------
m : (n, n) ndarray
The DFT matrix.
Notes
-----
When `scale` is None, multiplying a vector by the matrix returned by
`dft` is mathematically equivalent to (but much less efficient than)
the calculation performed by `scipy.fftpack.fft`.
.. versionadded:: 0.14.0
References
----------
.. [1] "DFT matrix", http://en.wikipedia.org/wiki/DFT_matrix
Examples
--------
>>> np.set_printoptions(precision=5, suppress=True)
>>> x = np.array([1, 2, 3, 0, 3, 2, 1, 0])
>>> m = dft(8)
>>> m.dot(x) # Comute the DFT of x
array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j,
-0.+4.j, -2.+2.j])
Verify that ``m.dot(x)`` is the same as ``fft(x)``.
>>> from scipy.fftpack import fft
>>> fft(x) # Same result as m.dot(x)
array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j,
0.+4.j, -2.+2.j])
"""
if scale not in [None, 'sqrtn', 'n']:
raise ValueError("scale must be None, 'sqrtn', or 'n'; "
"%r is not valid." % (scale,))
omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
m = omegas ** np.arange(n)
if scale == 'sqrtn':
m /= math.sqrt(n)
elif scale == 'n':
m /= n
return m
| bsd-3-clause | 4,770,515,006,507,963,000 | 27.07622 | 79 | 0.508452 | false |
rthouvenin/meteography | meteography/neighbors.py | 1 | 2176 | # -*- coding: utf-8 -*-
"""
Wrapper around sklearn k-neighbors estimators that can work in batches on
pytables arrays (or other disk-backed arrays that support slicing)
"""
import numpy as np
from sklearn.neighbors import NearestNeighbors as SKNN
from meteography.dataset import PIXEL_TYPE
class NearestNeighbors:
BATCH_SIZE = 20 * 1024 * 1024 # 20 Mb
def __init__(self, **kwargs):
self.sknn = SKNN(1, algorithm='brute', **kwargs)
def fit(self, X, y=None):
self.X = X
self.y = y
self.batch_len = max(1, self.BATCH_SIZE // X.shape[1])
self.nb_batch = 0
self.batch = None
if len(X) > 0:
self._reset_nb_batch()
def _reset_nb_batch(self):
old = self.nb_batch
self.nb_batch = len(self.X) // self.batch_len
if len(self.X) % self.batch_len:
self.nb_batch += 1
oldincr = (old > 1)
incr = (self.nb_batch > 1)
if self.batch is None or oldincr != incr:
self.batch = np.empty((self.batch_len+incr, self.X.shape[1]),
dtype=PIXEL_TYPE)
return self.nb_batch
def _get_batch(self, b, extra_row):
start = b * self.batch_len
end = min(start+self.batch_len, len(self.X))
actual_len = end - start
self.batch[:actual_len] = self.X[start:end]
has_extra = 0
if extra_row is not None:
has_extra = 1
self.batch[actual_len] = self.X[extra_row]
if actual_len+has_extra == self.batch.shape[0]:
return self.batch
else:
return self.batch[:actual_len+has_extra]
def predict(self, input_row):
self._reset_nb_batch()
nearest = None
for b in range(self.nb_batch):
batch = self._get_batch(b, nearest)
self.sknn.fit(batch)
i_batch = self.sknn.kneighbors([input_row], return_distance=False)
i_batch = i_batch[0][0]
if i_batch != (batch.shape[0]-1) or b == 0:
nearest = b * self.batch_len + i_batch
if self.y is None:
return nearest
return self.y[nearest]
| mit | 2,629,343,150,644,379,000 | 30.536232 | 78 | 0.554688 | false |
MTG/essentia | test/src/unittests/standard/test_nsgiconstantq.py | 1 | 4838 | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import essentia.standard as ess
import numpy as np
testdir = join(filedir(), 'nsgiconstantq')
class TestNSGIConstantQ(TestCase):
def testSynthetiseSine(self):
x = essentia.array(np.sin(2 * np.pi * 1000 * np.arange(2**12) / 44100))
CQ, CQDC, DCNF = NSGConstantQ(inputSize=2**12)(x)
# At the moment data needs to be transformed into a list of lists
CQList= list(CQ)
for i in range(len(CQList)):
CQList[i] = list(CQList[i])
y = NSGIConstantQ(inputSize=2**12)(CQList, CQDC, DCNF)
self.assertAlmostEqualVectorFixedPrecision(x, y, 3)
def testSynthetiseSineLocalPhase(self):
x = essentia.array(np.sin(2 * np.pi * 1000 * np.arange(2**12) / 44100))
CQ, CQDC, DCNF= NSGConstantQ(inputSize=2**12,
phaseMode='local')(x)
# At the moment data needs to be transformed into a list of lists
CQList= list(CQ)
for i in range(len(CQList)):
CQList[i] = list(CQList[i])
y = NSGIConstantQ(inputSize=2**12, phaseMode='local')(CQList, CQDC, DCNF)
self.assertAlmostEqualVectorFixedPrecision(x, y, 3)
def testSynthetiseSineOddSize(self):
# Test the reconstruction capabilities for signals with an odd length.
inputSize = 2 ** 12 + 1
x = essentia.array(np.sin(2 * np.pi * 1000 * np.arange(inputSize) / 44100))
CQ, CQDC, DCNF= NSGConstantQ(inputSize=inputSize)(x)
y = NSGIConstantQ(inputSize=inputSize)(CQ, CQDC, DCNF)
self.assertAlmostEqualVectorFixedPrecision(x, y, 5)
def testSynthetiseDC(self):
x = essentia.array(np.ones(2**12))
CQ, CQDC, DCNF = NSGConstantQ(inputSize=2**12)(x)
CQList= list(CQ)
for i in range(len(CQList)):
CQList[i] = list(CQList[i])
y = NSGIConstantQ(inputSize=2**12)(CQList, CQDC, DCNF)
self.assertAlmostEqualVectorFixedPrecision(x, y, 1)
def testInvalidParam(self):
self.assertConfigureFails(NSGIConstantQ(), {'phaseMode': 'none'})
self.assertConfigureFails(NSGIConstantQ(), {'inputSize': -1})
self.assertConfigureFails(NSGIConstantQ(), {'inputSize': 0})
self.assertConfigureFails(NSGIConstantQ(), {'minFrequency': 30000})
self.assertConfigureFails(NSGIConstantQ(), {'minFrequency': 1000,
'maxFrequency': 500})
self.assertConfigureFails(NSGIConstantQ(), {'maxFrequency': 0})
self.assertConfigureFails(NSGIConstantQ(), {'binsPerOctave': 0})
self.assertConfigureFails(NSGIConstantQ(), {'sampleRate': 0})
self.assertConfigureFails(NSGIConstantQ(), {'gamma': -1})
self.assertConfigureFails(NSGIConstantQ(), {'minimumWindow': 1})
self.assertConfigureFails(NSGIConstantQ(), {'windowSizeFactor': 0})
self.assertConfigureFails(NSGIConstantQ(), {'minimumWindow': 1})
def testReconfigure(self):
# The configuration of this algorithm is done the first time it is computed and
# it will automatically change each time the input vectors modify their length.
x = essentia.array(np.sin(2 * np.pi * 1000 * np.arange(2**12) / 44100))
CQ, CQDC, DCNF = NSGConstantQ(inputSize=2**12)(x)
CQList= list(CQ)
for i in range(len(CQList)):
CQList[i] = list(CQList[i])
nsgiconstantq = NSGIConstantQ()
nsgiconstantq(CQList, CQDC, DCNF)
# Reuse the algorith with different input shapes
x = essentia.array(np.sin(2 * np.pi * 1000 * np.arange(2**13) / 44100))
CQ, CQDC, DCNF = NSGConstantQ(inputSize=2**13)(x)
CQList= list(CQ)
for i in range(len(CQList)):
CQList[i] = list(CQList[i])
nsgiconstantq = NSGIConstantQ()
nsgiconstantq(CQList, CQDC, DCNF)
suite = allTests(TestNSGIConstantQ)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -3,107,686,161,840,203,300 | 36.215385 | 89 | 0.641794 | false |
nickretallack/babel | babel/messages/pofile.py | 1 | 17024 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Reading and writing of files in the ``gettext`` PO (portable object)
format.
:see: `The Format of PO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
"""
import os
import re
from babel.messages.catalog import Catalog, Message
from babel.util import wraptext
from babel._compat import text_type
__all__ = ['read_po', 'write_po']
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
:return: the unescaped string
"""
def replace_escapes(match):
m = match.group(1)
if m == 'n':
return '\n'
elif m == 't':
return '\t'
elif m == 'r':
return '\r'
# m is \ or "
return m
return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
:return: the denormalized string
:rtype: `unicode` or `str`
"""
if '\n' in string:
escaped_lines = string.splitlines()
if string.startswith('""'):
escaped_lines = escaped_lines[1:]
lines = map(unescape, escaped_lines)
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from datetime import datetime
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr "quux %(name)s"
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] "bar"
... msgstr[1] "baaz"
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', u'quux %(name)s')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), (u'bar', u'baaz'))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
.. versionadded:: 1.0
Added support for explicit charset argument.
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:param charset: the character set of the catalog.
:return: a catalog object representing the parsed PO file
:rtype: `Catalog`
"""
catalog = Catalog(locale=locale, domain=domain, charset=charset)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
context = []
in_msgid = [False]
in_msgstr = [False]
in_msgctxt = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
if context:
msgctxt = denormalize('\n'.join(context))
else:
msgctxt = None
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1,
context=msgctxt)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del context[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:];
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('msgctxt'):
if messages:
_add_message()
in_msgid[0] = in_msgstr[0] = False
context.append(line[7:].lstrip())
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
elif in_msgctxt[0]:
context.append(line.rstrip())
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, text_type):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
:return: the escaped string
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
:return: the normalized string
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for line in string.splitlines(True):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
<Message...>
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
<Message...>
>>> from io import BytesIO
>>> buf = BytesIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width)
def _write(text):
if isinstance(text, text_type):
text = text.encode(catalog.charset, 'backslashreplace')
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines)
_write(comment_header + u'\n')
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + sorted(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
| bsd-3-clause | 6,471,569,443,023,267,000 | 34.101031 | 84 | 0.520266 | false |
drallensmith/neat-python | examples/xor/evolve-feedforward-parallel.py | 1 | 3373 | """
A parallel version of XOR using neat.parallel.
Since XOR is a simple experiment, a parallel version probably won't run any
faster than the single-process version, due to the overhead of
inter-process communication.
If your evaluation function is what's taking up most of your processing time
(and you should check by using a profiler while running single-process),
you should see a significant performance improvement by evaluating in parallel.
This example is only intended to show how to do a parallel experiment
in neat-python. You can of course roll your own parallelism mechanism
or inherit from ParallelEvaluator if you need to do something more complicated.
"""
from __future__ import print_function
import math
import os
import time
import neat
import visualize
# 2-input XOR inputs and expected outputs.
xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
xor_outputs = [ (0.0,), (1.0,), (1.0,), (0.0,)]
def eval_genome(genome, config):
"""
This function will be run in parallel by ParallelEvaluator. It takes two
arguments (a single genome and the genome class configuration data) and
should return one float (that genome's fitness).
Note that this function needs to be in module scope for multiprocessing.Pool
(which is what ParallelEvaluator uses) to find it. Because of this, make
sure you check for __main__ before executing any code (as we do here in the
last few lines in the file), otherwise you'll have made a fork bomb
instead of a neuroevolution demo. :)
"""
net = neat.nn.FeedForwardNetwork.create(genome, config)
error = 4.0
for xi, xo in zip(xor_inputs, xor_outputs):
output = net.activate(xi)
error -= (output[0] - xo[0]) ** 2
return error
def run(config_file):
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
# Run for up to 300 generations.
pe = neat.ParallelEvaluator(4, eval_genome)
winner = p.run(pe.evaluate, 300)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against training data.
print('\nOutput:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
for xi, xo in zip(xor_inputs, xor_outputs):
output = winner_net.activate(xi)
print("input {!r}, expected output {!r}, got {!r}".format(xi, xo, output))
node_names = {-1:'A', -2: 'B', 0:'A XOR B'}
visualize.draw_net(config, winner, True, node_names = node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path) | bsd-3-clause | -4,725,489,445,653,296,000 | 35.27957 | 82 | 0.688111 | false |
IDNoise/NoiseIDE | NoiseIDEPython/idn_snippet_completer.py | 1 | 1650 | import os
from idn_completer import Completer
import core
import yaml
class SnippetCompleter(Completer):
def __init__(self, stc):
Completer.__init__(self, stc)
self.snippets = []
for path in [os.path.join(core.MainFrame.cwd, "data", "erlang", "ide_snippets.yaml"),
os.path.join(core.MainFrame.cwd, "data", "erlang", "user_snippets.yaml"),
os.path.join(core.Project.projectDir, "snippets.yaml")]:
if os.path.exists(path):
stream = file(path, 'r')
data = yaml.load(stream)
if data:
self.snippets += data
def OnUpdate(self, text, nextChar = None):
self.list.Clear()
core.Log(text)
i = len(text) - 1
while i >= 0 and text[i].isalpha():
self.prefix += text[i]
i -= 1
self.prefix = self.prefix[::-1]
core.Log(self.prefix)
for snippet in self.snippets:
if self.prefix == "" or snippet['id'].startswith(self.prefix):
self.list.Append(snippet['id'], snippet['desc'] + "<br/><br/>" + snippet['snippet'])
def AutoComplete(self, text):
snippet = ""
for m in self.snippets:
if m['id'] == text:
snippet = m['snippet']
if not snippet: return
startPos = self.stc.GetCurrentPos() - len(self.prefix)
self.stc.SetSelectionStart(startPos)
self.stc.SetSelectionEnd(self.stc.GetCurrentPos())
self.stc.ReplaceSelection(snippet)
self.HideCompleter()
self.stc.StartSnippetEditing(startPos, snippet)
| gpl-2.0 | 3,293,013,052,869,466,000 | 32 | 100 | 0.555152 | false |
activityhistory/TracesVisualizer | dayview/scripts/extract.py | 1 | 8057 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# TESTING FILE made.by.a.fox. 12.2.15
# Updated by acrule 01.21.16
#FEATURE LIST
# Y connect to db
# Y write to file
# Y Write JSON format
# Accept input date parameter
#KNOWN ISSUES
# 2. no formatting or conversion of datetime stamps
import re
import os
import sys
import json
import sqlite3 as lite
import collections
import time
import datetime
db_file = os.path.expanduser('~/.traces/traces.sqlite') #looks for db under ~/.traces
con = lite.connect(db_file)
with con:
data = [] #master data container
apps = [] #list of apps
windows = [] # list of windows
urls = []
appevents = [] #list of application events
windowevents = [] #list of window events
urlevents = []
exps = [] #list of experiences
images = [] #list of screenshots
words = [] #list of keywords
cur = con.cursor()
#SQL query strings
appsSQL = "SELECT * FROM app"
windowsSQL = "SELECT * FROM window"
urlSQL = "SELECT * FROM url"
activeappSQL = "SELECT a.id, a.app_id, a.event, a.time as startt, min(b.time) AS endt FROM appevent a, appevent b WHERE a.app_id = b.app_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
activewindowSQL = "SELECT a.id, a.window_id, a.event, a.time as startt, min(b.time) AS endt FROM windowevent a, windowevent b WHERE a.window_id = b.window_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
activeurlSQL = "SELECT a.id, a.url_id, a.app_id, a.window_id, a.event, a.time as startt, min(b.time) AS endt FROM urlevent a, urlevent b WHERE a.url_id = b.url_id AND a.window_id = b.window_id AND a.app_id = b.app_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
experienceSQL = "SELECT * FROM experience"
wordsSQL = "SELECT * FROM keys"
#GET list of applications
cur.execute(appsSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['time'] = row[1]
a['name'] = row[2]
apps.append(a)
#GET list of windows
cur.execute(windowsSQL)
rows = cur.fetchall()
for row in rows:
w = collections.OrderedDict()
w['id'] = row[0]
w['time'] = row[1]
w['name'] = row[2]
w['app'] = row[3]
windows.append(w)
#GET list of urls
cur.execute(urlSQL)
rows = cur.fetchall()
for row in rows:
u = collections.OrderedDict()
u['id'] = row[0]
u['time'] = row[1]
u['title'] = row[2]
u['url'] = row[3]
u['host'] = row[4]
urls.append(u)
#GET list intervals for primary application
cur.execute(activeappSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['appid'] = row[1]
a['event'] = row[2]
a['start'] = row[3]
a['end'] = row[4]
appevents.append(a)
#GET list intervals for primary window
cur.execute(activewindowSQL)
rows = cur.fetchall()
for row in rows:
w = collections.OrderedDict()
w['id'] = row[0]
w['windowid'] = row[1]
w['appid'] = (item for item in windows if item["id"] == row[1]).next()['app']
w['event'] = row[2]
w['start'] = row[3]
w['end'] = row[4]
windowevents.append(w)
#GET list intervals for urls
cur.execute(activeurlSQL)
rows = cur.fetchall()
for row in rows:
u = collections.OrderedDict()
u['id'] = row[0]
u['urlid'] = row[1]
u['appid'] = row[2]
u['windowid'] = row[3]
u['event'] = row[4]
u['start'] = row[5]
u['end'] = row[6]
urlevents.append(u)
#GET list of experiences
cur.execute(experienceSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['text'] = row[2]
exps.append(a)
#GET list of screenshots
image_dir = os.path.expanduser('~/.traces/screenshots') #looks for db under ~/.traces
for y in os.listdir(image_dir):
y_dir = os.path.join(image_dir,y)
if not os.path.isdir(y_dir):
continue
for m in os.listdir(y_dir):
m_dir = os.path.join(y_dir, m)
if not os.path.isdir(m_dir):
continue
for d in os.listdir(m_dir):
d_dir = os.path.join(m_dir, d)
if not os.path.isdir(d_dir):
continue
for h in os.listdir(d_dir):
h_dir = os.path.join(d_dir, h)
if not os.path.isdir(h_dir):
continue
h_images = os.listdir(h_dir)
for image in h_images:
#make sure the file is an image
if image[-4:] == '.jpg':
i = collections.OrderedDict()
image_time = datetime.datetime.strptime(image[0:19], '%y%m%d-%H%M%S%f')
i['time'] = (image_time - datetime.datetime(1970,1,1)).total_seconds() + time.timezone #add timezone offset
i['image'] = os.path.join("screenshots", y, m, d, h, image)
images.append(i)
#GET keywords
cmd_rows = []
newWord = ['Enter','Left','Right','Up','Down','Tab','Escape', ' ']
starttime = 0.0
app = 0
window = 0
s = ''
cur.execute(wordsSQL)
rows = cur.fetchall()
for row in rows:
if 'Cmd' in row[3]:
cmd_rows.append(row)
else:
text = str(row[2])
# if its a char indicating a new word, save our text token
if text in newWord:
# save our data
if len(s) > 0:
k = collections.OrderedDict()
k['time'] = starttime #datetime.datetime.fromtimestamp(starttime).strftime("%H:%M %m/%d/%y")
k['text'] = s #just pass the whole string for now
k['app'] = app
k['window'] = window
words.append(k)
#reset tracking time
starttime = float(row[1])
s = ''
# if its a regular char on the same window, just keep building the string
elif int(row[5]) == window: # and float(row[1]) - time <= 300.0:
if text == 'Backspace':
s = s[:-1]
else:
s += row[2]
#else its a regular char but we switched windows, save the data
else:
if len(s) > 0:
k = collections.OrderedDict()
k['time'] = starttime #datetime.datetime.fromtimestamp(starttime).strftime("%H:%M %m/%d/%y")
k['text'] = s #just pass teh whole string for now
k['app'] = app
k['window'] = window
words.append(k)
#reset tracking variables
window = int(row[5])
app = int(row[4])
starttime = float(row[1])
#write the character to start the next word
if text in newWord or text == 'Backspace':
s = ''
else:
s = row[2]
#ASSEMBLE apps and experince into json
d = collections.OrderedDict()
d['apps']=apps
d['window']=windows
d['url']=urls
d['appevents']=appevents
d['windowevents']=windowevents
d['urlevents']=urlevents
d['exps']=exps
d['images']=images
d['words']=words
data = d
#WRITE file
file = 'extract.json'
z = open(file,'w')
z.writelines(json.dumps(data))
| gpl-2.0 | -7,282,651,105,888,489,000 | 32.995781 | 363 | 0.52563 | false |
solanolabs/rply | rply/parser.py | 1 | 2619 | from rply.errors import ParsingError
class LRParser(object):
def __init__(self, lr_table, error_handler):
self.lr_table = lr_table
self.error_handler = error_handler
def parse(self, tokenizer, state=None):
from rply.token import Token
lookahead = None
lookaheadstack = []
statestack = [0]
symstack = [Token("$end", None)]
current_state = 0
while True:
if lookahead is None:
if lookaheadstack:
lookahead = lookaheadstack.pop()
else:
lookahead = tokenizer.next()
if lookahead is None:
lookahead = Token("$end", None)
ltype = lookahead.gettokentype()
if ltype in self.lr_table.lr_action[current_state]:
t = self.lr_table.lr_action[current_state][ltype]
if t > 0:
statestack.append(t)
current_state = t
symstack.append(lookahead)
lookahead = None
continue
elif t < 0:
# reduce a symbol on the stack and emit a production
p = self.lr_table.grammar.productions[-t]
pname = p.name
plen = p.getlength()
start = len(symstack) + (-plen - 1)
assert start >= 0
targ = symstack[start:]
del targ[0]
start = len(symstack) + (-plen)
assert start >= 0
del symstack[start:]
del statestack[start:]
if state is None:
value = p.func(targ)
else:
value = p.func(state, targ)
symstack.append(value)
current_state = self.lr_table.lr_goto[statestack[-1]][pname]
statestack.append(current_state)
continue
else:
n = symstack[-1]
return n
else:
# TODO: actual error handling here
if self.error_handler is not None:
if state is None:
self.error_handler(lookahead)
else:
self.error_handler(state, lookahead)
raise AssertionError("For now, error_handler must raise.")
else:
raise ParsingError(lookahead.getsourcepos())
| bsd-3-clause | 457,821,966,247,470,600 | 35.887324 | 80 | 0.450554 | false |
cortesi/pry | libpry/explain.py | 1 | 3089 | """
A module for printing "nice" messages from assertion statements.
"""
import tokenize, parser
class _Wrap:
def __init__(self, *lines):
self.lines = list(lines)
def __call__(self):
if not self.lines:
raise StopIteration
else:
return self.lines.pop(0)
class Expression:
def __init__(self, s):
self.s = s.strip()
def show(self, glob, loc):
try:
return repr(eval(self.s, glob, loc))
except SyntaxError, v:
return "<could not be evaluated>"
def __eq__(self, other):
return self.s == other.s
class Explain:
_specialOps = set(["==", "!=", "<", ">", ])
_specialNames = set(["not", "and", "or"])
def __init__(self, expr=None, glob=None, loc=None):
self.expr, self.glob, self.loc = expr, glob, loc
if self.expr:
self.parsed, self.expr = self.parseExpression(self.expr)
def parseExpression(self, expr):
"""
Parses an expression into components. It understands the following
delimiters: ==, !=, >, <, not, and, or
In each of these cases, the variables "x" and "y" will be evaluated.
Discards the second (message) clause of an assertion expression.
Returns None if the expression could not be interpreted.
"""
nest = 0
rem = expr
# A list of (str, start, end) tuples.
delimiters = []
try:
for i in list(tokenize.generate_tokens(_Wrap(expr))):
name, txt = tokenize.tok_name[i[0]], i[1]
start, end = i[2][1], i[3][1]
if name == "OP" and (txt == "(" or txt == "["):
nest += 1
elif name == "OP" and (txt == ")" or txt == "]"):
nest -= 1
elif nest == 0:
if name == "OP" and txt in self._specialOps:
delimiters.append((txt, start, end))
elif name == "NAME" and txt in self._specialNames:
delimiters.append((txt, start, end))
elif name == "OP" and txt == ",":
rem = expr[:start]
break
except tokenize.TokenError:
return None, None
if delimiters:
ret = []
cur = 0
for s, start, end in delimiters:
if start > cur:
ret.append(Expression(rem[cur:start]))
ret.append(s)
cur = end
ret.append(Expression(rem[end:]))
return ret, rem
else:
return [Expression(rem)], rem
def __str__(self):
l = []
l.append(" :: Re-evaluating expression:\n")
l.append(" :: %s\n"%self.expr)
l.append(" ::")
for i in self.parsed:
if isinstance(i, Expression):
l.append(i.show(self.glob, self.loc))
else:
l.append(i)
return " ".join(l)
| mit | 6,707,783,616,712,852,000 | 31.861702 | 80 | 0.471997 | false |
davidmarin/msd | msd/category_data.py | 1 | 3297 | # Copyright 2014-2015 SpendRight, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting data for msd.category."""
# too vague to be useful
BAD_CATEGORIES = {
'Commercial Products',
'Industry Innovators',
'Other',
}
# strip these
USELESS_CATEGORY_SUFFIXES = [
' Brands',
' Products',
]
# custom corrections to categories (after normalization)
CATEGORY_ALIASES = {
'Accessories': 'Fashion Accessories',
'Bananas and Pineapple': 'Bananas and Pineapples',
'Food and Beverage': 'Food and Beverages',
'Food and Drink': 'Food and Beverages',
'Fun and Games': 'Toys and Games',
'Misc. Food': 'Food',
'Occ. Safety and Health Consulting': (
'Occupational Safety and Health Consulting')
}
# Categories that we can't just split on "and"; e.g.
# Home and Office Furniture, Skin and Hair Care
CATEGORY_SPLITS = {
'IT Software and Services/Web Design': {
'IT Software',
'IT Services',
'Web Design',
},
'Renewable Energy Generation and Installation': {
'Renewable Energy Generation',
'Renewable Energy Installation',
},
'Sport and Outdoor - Clothing and Shoes': {
'Sport Clothing',
'Sport Shoes',
'Outdoor Clothing',
'Outdoor Shoes',
},
'Home and Office Furniture': {
'Home Furniture',
'Office Furniture',
},
'Education and Training Services': {
'Education Services',
'Training Services',
},
'Sports Equipment, Toys and Accessories': {
'Sports Equipment',
'Toys',
'Sports Accessories',
},
'Skin and Hair Care': {
'Skin Care',
'Hair Care',
},
'Sport and Outdoor Clothing': {
'Sport Clothing',
'Outdoor Clothing',
},
'Surf, Beach and Swimwear': {
'Surf Wear',
'Beachwear',
'Swimwear',
},
'Film and Music Production': {
'Film Production',
'Music Production',
},
'Baby and Children Clothing': {
'Baby Clothing',
'Children Clothing',
},
'Catering and Meeting/Event Management': {
'Catering',
'Meeting Management',
'Event Management',
},
'Waste Reduction Consulting and Services': {
'Waste Reduction Consulting',
'Waste Reduction Services',
},
'Automotive Sales and Repair': {
'Automotive Sales',
'Automotive Repair',
},
'Web Design and Development': {
'Web Design',
'Web Development',
},
'Pet Toys, Bedding and Apparel': {
'Pet Toys',
'Pet Bedding',
'Pet Apparel',
},
'Housewares, Home Furnishings, and Accessories': {
'Housewares',
'Home Furnishings',
'Home Accessories',
},
}
| apache-2.0 | -2,630,639,732,445,051,000 | 26.247934 | 74 | 0.605096 | false |
ThomasMarcel/selection-naturelle | user/models.py | 1 | 1507 | import json
import logging
from google.appengine.ext import ndb
from lib import tools
default_permissions = {'reader': 0, 'administrator': 0}
class User(ndb.Model):
username = ndb.StringProperty()
email = ndb.StringProperty()
password=ndb.StringProperty()
first_name = ndb.StringProperty()
last_name = ndb.StringProperty()
permissions = ndb.JsonProperty(default=json.dumps(default_permissions))
active = ndb.BooleanProperty(default=False)
notes = ndb.TextProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
@classmethod
def get_by_username(cls, username):
return cls.query(cls.username == username).get()
@classmethod
def get_by_email(cls, email):
return cls.query(cls.email == email).get()
@classmethod
def reset_permissions(cls):
cls.permissions = json.dumps(default_permissions)
@classmethod
def get_by_urlkey(cls, userkey):
return cls.query(User.key == ndb.Key(urlsafe = userkey)).get()
def to_dict(cls):
return {
'key': cls.key,
'username': cls.username,
'email': cls.email,
'password': cls.password,
'first_name': cls.first_name,
'last_name': cls.last_name,
'permissions': cls.permissions,
'active': cls.active,
'notes': cls.notes,
'created': cls.created,
'modified': cls.modified
} | apache-2.0 | -5,442,921,198,355,938,000 | 28 | 75 | 0.624419 | false |
viz4biz/PyDataNYC2015 | enaml/mpl_canvas.py | 1 | 2532 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed, ForwardTyped, Bool, observe, set_default, Value, List, Enum
from enaml.core.declarative import d_
from .control import Control, ProxyControl
#: Delay the import of matplotlib until needed. This removes the hard
#: dependecy on matplotlib for the rest of the Enaml code base.
def Figure():
from matplotlib.figure import Figure
return Figure
class ProxyMPLCanvas(ProxyControl):
""" The abstract definition of a proxy MPLCanvas object.
"""
#: A reference to the MPLCanvas declaration.
declaration = ForwardTyped(lambda: MPLCanvas)
def set_figure(self, figure):
raise NotImplementedError
def set_toolbar_visible(self, visible):
raise NotImplementedError
def set_toolbar_location(self, location):
raise NotImplementedError
def set_event_actions(self, actions):
raise NotImplementedError
def draw(self):
raise NotImplementedError
class MPLCanvas(Control):
""" A control which can be used to embded a matplotlib figure.
"""
#: The matplotlib figure to display in the widget.
figure = d_(ForwardTyped(Figure))
#: Whether or not the matplotlib figure toolbar is visible.
toolbar_visible = d_(Bool(False))
toolbar_location = d_(Enum('top', 'bottom'))
event_actions = d_(List(Value()))
#: Matplotlib figures expand freely in height and width by default.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
#: A reference to the ProxyMPLCanvas object.
proxy = Typed(ProxyMPLCanvas)
def draw(self):
""" Request draw on the Figure """
if self.proxy_is_active:
self.proxy.draw()
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('figure', 'toolbar_visible', 'toolbar_location', 'event_actions')
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
super(MPLCanvas, self)._update_proxy(change)
| apache-2.0 | -4,969,528,589,405,369,000 | 31.050633 | 87 | 0.600711 | false |
LiGhT1EsS/cobra | cobra/scheduler/report.py | 1 | 4364 | # -*- coding: utf-8 -*-
"""
scheduler.report
~~~~~~~~~~~~~~~~
Implements automation report Cobra data
:author: Feei <[email protected]>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import os
import subprocess
import base64
import datetime
from cobra.utils.log import logging
from cobra.utils.config import Config
import smtplib
from smtplib import SMTPException
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
logging = logging.getLogger(__name__)
phantomjs = '/usr/local/bin/phantomjs'
time_types = ['w', 'm', 'q']
time_type_des = {
'w': '周',
'm': '月',
'q': '季'
}
class Report(object):
def __init__(self, time_type, month=None):
if time_type not in time_types:
logging.critical('Time type exception')
return
self.time_type_de = time_type_des[time_type]
# mail
mark = ''
if month is None:
c_month = int(datetime.datetime.today().strftime("%m"))
else:
c_month = int(month)
if time_type == 'w':
c_week = int(datetime.datetime.today().strftime("%U"))
mark = 'W{week}'.format(week=c_week)
elif time_type == 'm':
mark = 'M{month}'.format(month=c_month)
elif time_type == 'q':
c_quarter = 0
if c_month in [1, 2, 3]:
c_quarter = 1
elif c_month in [4, 5, 6]:
c_quarter = 2
elif c_month in [7, 8, 9]:
c_quarter = 3
elif c_month in [10, 11, 12]:
c_quarter = 4
mark = 'Q{quarter}'.format(quarter=c_quarter)
self.subject = '[Cobra] 代码安全{0}报({mark})'.format(self.time_type_de, mark=mark)
self.user = Config('email', 'user').value
self.name = Config('email', 'name').value
self.to = Config('report', 'to').value
self.host = Config('email', 'host').value
self.port = Config('email', 'port').value
self.password = Config('email', 'password').value
self.param = [phantomjs, os.path.join(Config().project_directory, 'scheduler', 'report.js'), Config().project_directory, time_type]
if month is not None:
self.param.append(month)
def run(self):
capture = self.capture()
if capture is False:
logging.critical('Capture failed')
return False
# send notification
if self.notification(capture):
return True
else:
logging.critical('Notification failed')
return False
def capture(self):
"""
Use PhantomJS to capture report page
:return: boolean
"""
capture = None
p = subprocess.Popen(self.param, stdout=subprocess.PIPE)
result, err = p.communicate()
if 'Critical' in result:
logging.critical('Capture exception')
return False
lines = result.split('\n')
for l in lines:
if 'reports' in l:
capture = l.split(':')[1].strip()
if capture is None:
logging.critical('get capture image file failed')
return False
else:
return os.path.join(Config().project_directory, capture)
def notification(self, capture_path):
"""
Email notification
:param capture_path:
:return: boolean
"""
msg = MIMEMultipart()
msg['Subject'] = self.subject
msg['From'] = '{0}<{1}>'.format(self.name, self.user)
msg['To'] = self.to
with open(capture_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
text = MIMEText('<img src="data:image/png;base64,{0}">'.format(encoded_string), 'html')
msg.attach(text)
try:
s = smtplib.SMTP(self.host, self.port)
s.ehlo()
s.starttls()
s.ehlo()
s.login(self.user, self.password)
s.sendmail(self.user, self.to, msg.as_string())
s.quit()
return True
except SMTPException:
logging.critical('Send mail failed')
return False
| mit | 8,891,019,861,325,576,000 | 29.405594 | 139 | 0.548298 | false |
gsnedders/Template-Python | t/directive_test.py | 1 | 4424 | from template import Template
from template.test import TestCase, main
class DirectiveTest(TestCase):
def testDirectives(self):
ttobjs = (('tt', Template()),
('pre', Template({ 'PRE_CHOMP': 1 })),
('post', Template({ 'POST_CHOMP': 1 })),
('trim', Template({ 'INCLUDE_PATH': 'test/lib', 'TRIM': 1 })))
self.Expect(DATA, ttobjs, self._callsign())
DATA = r"""
#------------------------------------------------------------------------
# basic directives
#------------------------------------------------------------------------
-- test --
[% a %]
[%a%]
-- expect --
alpha
alpha
-- test --
pre [% a %]
pre[% a %]
-- expect --
pre alpha
prealpha
-- test --
[% a %] post
[% a %]post
-- expect --
alpha post
alphapost
-- test --
pre [% a %] post
pre[% a %]post
-- expect --
pre alpha post
prealphapost
-- test --
[% a %][%b%][% c %]
-- expect --
alphabravocharlie
-- test --
[%
a %][%b
%][%
c
%][%
d
%]
-- expect --
alphabravocharliedelta
#------------------------------------------------------------------------
# comments
#------------------------------------------------------------------------
-- test --
[%# this is a comment which should
be ignored in totality
%]hello world
-- expect --
hello world
-- test --
[% # this is a one-line comment
a
%]
-- expect --
alpha
-- test --
[% # this is a two-line comment
a =
# here's the next line
b
-%]
[% a %]
-- expect --
bravo
-- test --
[% a = c # this is a comment on the end of the line
b = d # so is this
-%]
a: [% a %]
b: [% b %]
-- expect --
a: charlie
b: delta
#------------------------------------------------------------------------
# manual chomping
#------------------------------------------------------------------------
-- test --
[% a %]
[% b %]
-- expect --
alpha
bravo
-- test --
[% a -%]
[% b %]
-- expect --
alphabravo
-- test --
[% a -%]
[% b %]
-- expect --
alpha bravo
-- test --
[% a %]
[%- b %]
-- expect --
alphabravo
-- test --
[% a %]
[%- b %]
-- expect --
alphabravo
-- test --
start
[% a %]
[% b %]
end
-- expect --
start
alpha
bravo
end
-- test --
start
[%- a %]
[% b -%]
end
-- expect --
startalpha
bravoend
-- test --
start
[%- a -%]
[% b -%]
end
-- expect --
startalphabravoend
-- test --
start
[%- a %]
[%- b -%]
end
-- expect --
startalphabravoend
#------------------------------------------------------------------------
# PRE_CHOMP enabled
#------------------------------------------------------------------------
-- test --
-- use pre --
start
[% a %]
mid
[% b %]
end
-- expect --
startalpha
midbravo
end
-- test --
start
[% a %]
mid
[% b %]
end
-- expect --
startalpha
midbravo
end
-- test --
start
[%+ a %]
mid
[% b %]
end
-- expect --
start
alpha
midbravo
end
-- test --
start
[%+ a %]
mid
[% b %]
end
-- expect --
start
alpha
midbravo
end
-- test --
start
[%- a %]
mid
[%- b %]
end
-- expect --
startalpha
midbravo
end
#------------------------------------------------------------------------
# POST_CHOMP enabled
#------------------------------------------------------------------------
-- test --
-- use post --
start
[% a %]
mid
[% b %]
end
-- expect --
start
alphamid
bravoend
-- test --
start
[% a %]
mid
[% b %]
end
-- expect --
start
alphamid
bravoend
-- test --
start
[% a +%]
mid
[% b %]
end
-- expect --
start
alpha
mid
bravoend
-- test --
start
[% a +%]
[% b +%]
end
-- expect --
start
alpha
bravo
end
-- test --
start
[% a -%]
mid
[% b -%]
end
-- expect --
start
alphamid
bravoend
#------------------------------------------------------------------------
# TRIM enabled
#------------------------------------------------------------------------
-- test --
-- use trim --
[% INCLUDE trimme %]
-- expect --
I am a template element file which will get TRIMmed
-- test --
[% BLOCK foo %]
this is block foo
[% END -%]
[% BLOCK bar %]
this is block bar
[% END %]
[% INCLUDE foo %]
[% INCLUDE bar %]
end
-- expect --
this is block foo
this is block bar
end
-- test --
<foo>[% PROCESS foo %]</foo>
<bar>[% PROCESS bar %]</bar>
[% BLOCK foo %]
this is block foo
[% END -%]
[% BLOCK bar %]
this is block bar
[% END -%]
end
-- expect --
<foo>this is block foo</foo>
<bar>this is block bar</bar>
end
-- test --
[% r; r = s; "-"; r %].
-- expect --
romeo-sierra.
-- test --
[% IF a; b; ELSIF c; d; ELSE; s; END %]
-- expect --
bravo
"""
| artistic-2.0 | 1,009,830,628,860,374,400 | 11.288889 | 76 | 0.419304 | false |
AlexStarov/Shop | applications/delivery2/migrations/0002_auto_20161124_2123.py | 1 | 4727 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import applications.delivery2.models
class Migration(migrations.Migration):
dependencies = [
('delivery2', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EmailImageTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=256, verbose_name='\u041f\u0443\u0442\u044c')),
('image', models.ImageField(upload_to=applications.delivery2.models.upload_to, null=True, verbose_name='\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_EmailImageTemplate',
'verbose_name': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u0432 \u043f\u0438\u0441\u044c\u043c\u0435',
'verbose_name_plural': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f \u0432 \u043f\u0438\u0441\u044c\u043c\u0435',
},
),
migrations.CreateModel(
name='EmailSubject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(default='\u0422\u0435\u043c\u0430', max_length=256, verbose_name='\u0422\u0435\u043c\u0430 \u043f\u0438\u0441\u044c\u043c\u0430')),
('chance', models.DecimalField(default=1, verbose_name='\u0412\u0435\u0440\u043e\u044f\u0442\u043d\u043e\u0441\u0442\u044c', max_digits=4, decimal_places=2)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_EmailSubject',
'verbose_name': '\u0422\u0435\u043c\u0430',
'verbose_name_plural': '\u0422\u0435\u043c\u044b',
},
),
migrations.RemoveField(
model_name='subject',
name='delivery',
),
migrations.RemoveField(
model_name='delivery',
name='template',
),
migrations.AddField(
model_name='emailtemplate',
name='name',
field=models.CharField(null=True, default=b'<built-in method now of type object at 0x83c4c20>', max_length=64, blank=True, unique=True, verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435'),
),
migrations.AlterField(
model_name='delivery',
name='task_id',
field=models.CharField(max_length=255, null=True, verbose_name='task id', blank=True),
),
migrations.AlterField(
model_name='emailtemplate',
name='template',
field=models.FileField(upload_to=applications.delivery2.models.upload_to, null=True, verbose_name='\u0428\u0430\u0431\u043b\u043e\u043d', blank=True),
),
migrations.AlterField(
model_name='message',
name='subject',
field=models.ForeignKey(verbose_name='\u0423\u043a\u0430\u0437\u0430\u0442\u0435\u043b\u044c \u043d\u0430 subject', blank=True, to='delivery2.EmailSubject', null=True),
),
migrations.AlterModelTable(
name='emailtemplate',
table='Delivery2_EmailTemplate',
),
migrations.DeleteModel(
name='Subject',
),
migrations.AddField(
model_name='emailsubject',
name='delivery',
field=models.ForeignKey(to='delivery2.Delivery'),
),
migrations.AddField(
model_name='emailimagetemplate',
name='template',
field=models.ForeignKey(related_name='images', verbose_name='\u0428\u0430\u0431\u043b\u043e\u043d', to='delivery2.EmailTemplate'),
),
]
| apache-2.0 | -7,083,899,874,331,063,000 | 50.380435 | 213 | 0.609477 | false |
mammix2/ccoin-dev | contrib/pyminer/pyminer.py | 1 | 6435 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10464
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit | 8,596,083,419,467,708,000 | 24.535714 | 84 | 0.648951 | false |
John-Lin/invoice-net | website.py | 1 | 1459 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from bottle import route, run, template, view
#from bottle import jinja2_view
from invoice_prize import *
@route('/hello')
def hello():
return "Hello World!"
@route('/invoice')
@view('invoice_template')
def invoive():
(results, date) = get_result()
date = date[0].decode('UTF-8')
special = prize(results, 0)
first = prize(results, 1)
second = prize(results, 2)
third = prize(results, 3)
fourth = prize(results, 4)
fifth = prize(results, 5)
sixth = prize(results, 6)
sixth_plus = prize(results, 7)
special2 = prize(results, 8)
return dict(date=date, special2=special2, special=special,
first=first, second=second, third=third, fourth=fourth,
fifth=fifth, sixth=sixth, sixth_plus=sixth_plus)
@route('/invoice_M')
@view('invoiceM_template')
def invoive():
(results, date) = get_result()
date = date[0].decode('UTF-8')
special = prize(results, 0)
first = prize(results, 1)
second = prize(results, 2)
third = prize(results, 3)
fourth = prize(results, 4)
fifth = prize(results, 5)
sixth = prize(results, 6)
sixth_plus = prize(results, 7)
special2 = prize(results, 8)
return dict(date=date, special2=special2, special=special,
first=first, second=second, third=third, fourth=fourth,
fifth=fifth, sixth=sixth, sixth_plus=sixth_plus)
run(host='localhost', port=8080, debug=True, reloader=True)
| mit | 5,745,852,764,212,994,000 | 27.607843 | 62 | 0.655243 | false |
jeonghoonkang/BerePi | apps/data.go.kr/get_public_micro_particle.py | 1 | 3613 | # -*- coding: utf-8 -*-
# Author : https://github.com/kmlee408
# https://github.com/jeonghoonkang
'''
부산 URL= http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?serviceKey=fCRWi0DoCfoCPMHyDwai3trva10y4qb8mh9aysoHzvLKDWw6Q2bWOsvuM4%2BsRdvE4dPiKqBFD7vj7%2FM2noCe2g%3D%3D&ver=1.3&pageSize=10&pageNo=1&sidoName=%EB%B6%80%EC%82%B0&startPage=1&numOfRows=100
실행 방법= $python mdust_pusan.py
(지역을 바꾸고 싶으면 misaemunji 함수 안에 location = '경기' 와 같은 식으로 변경)
(측정 가능 지역: 서울, 부산, 대구, 인천, 광주, 대전, 울산, 경기, 강원, 충북, 충남, 전북, 전남, 경북, 경남, 제주, 세종)
'''
import requests
from urllib import urlencode, quote_plus
from bs4 import BeautifulSoup
import pandas as pd
import keytxt
# 서비스키는 data.go.kr 에서 받아야 함
# https://www.data.go.kr/dataset/15000581/openapi.do?mypageFlag=Y
service_key = keytxt.key
def misaemunji(service_key, location=None, spot=None):
#location으로 가능한 것: 서울, 부산, 대구, 인천, 광주, 대전, 울산, 경기, 강원, 충북, 충남, 전북, 전남, 경북, 경남, 제주, 세종
#시도별 실시간 측정 조회 api
URL ='http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?serviceKey='
# URL 인자 설정 및 인코딩
queryParams = '&' + urlencode({quote_plus('numOfRows') : '100', # 최대로 설정
quote_plus('pageSize'): '10',
quote_plus('pageNo') : '1',
quote_plus('startPage') :'1',
quote_plus('sidoName') : location,
quote_plus('ver') : '1.3'
})
if location == None :
exit ('you shoud write location such like 부산')
r = requests.get(URL+service_key+queryParams)
html = r.text
soup = BeautifulSoup(html, 'html.parser') #parsing
info_ = soup.select('item')
misae_station = {}
for info__ in info_:
datetime_ = info__.datatime.text
list_ = [str(info__.pm10value.text),str(info__.pm25value.text)]
# list 미세먼지 측정값 2가지
misae_station[info__.stationname.text.encode('utf-8')] =list_
# misae_station 은 기상대 이름별로 pm2.5, pm10 데이터를 담고 있음
#dataframe 생성
index_list = ['미세먼지10','초미세먼지2.5']
df = pd.DataFrame(misae_station, index = index_list)
if spot != None :
if spot in misae_station:
'''
print('측정시간 : ' + str(datetime_)), 2018-11-08 20:00
print('측정지역 : ')
print(location)
print(spot)
print('(단위 : ㎍/㎥)')
print misae_station[spot][1]
'''
return (str(datetime_), str(spot), 'pm2.5', misae_station[spot][1] )
def get_public_mise(loc='서울', station='강남구'):
kangnam = misaemunji(service_key, location=loc, spot=station)
return kangnam
if __name__ == '__main__':
kangnam = misaemunji(service_key, location='서울', spot='강남구')
#location으로 가능한 것: 서울, 부산, 대구, 인천, 광주, 대전, 울산, 경기, 강원, 충북, 충남, 전북, 전남, 경북, 경남, 제주, 세종
print kangnam
| bsd-2-clause | 2,974,692,860,236,816,000 | 36.134146 | 300 | 0.560601 | false |
cysuncn/python | spark/crm/PROC_O_LNA_XDXT_ENT_AUTH.py | 1 | 2786 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_ENT_AUTH').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_ENT_AUTH = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_ENT_AUTH/*')
O_CI_XDXT_ENT_AUTH.registerTempTable("O_CI_XDXT_ENT_AUTH")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.SERIALNO AS SERIALNO
,A.CERTTYPE AS CERTTYPE
,A.AUTHDATE AS AUTHDATE
,A.CERTNAME AS CERTNAME
,A.CERTID AS CERTID
,A.CERTRESULT AS CERTRESULT
,A.CERTORG AS CERTORG
,A.VALIDDATE AS VALIDDATE
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.TRADENAME AS TRADENAME
,A.NAME_RENDING AS NAME_RENDING
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_ENT_AUTH A --企业资质与认证信息
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_ENT_AUTH = sqlContext.sql(sql)
F_CI_XDXT_ENT_AUTH.registerTempTable("F_CI_XDXT_ENT_AUTH")
dfn="F_CI_XDXT_ENT_AUTH/"+V_DT+".parquet"
F_CI_XDXT_ENT_AUTH.cache()
nrows = F_CI_XDXT_ENT_AUTH.count()
F_CI_XDXT_ENT_AUTH.write.save(path=hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_ENT_AUTH.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_ENT_AUTH/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_ENT_AUTH lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | 4,066,141,725,929,621,000 | 37.140845 | 170 | 0.574225 | false |
rowinggolfer/openmolar2 | src/lib_openmolar/admin/db_orm/admin_teeth_present.py | 1 | 3093 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <[email protected]> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
'''
Provides a DemoGenerator for teeth_present table
provides schema and insert query for the teeth_present table
data on which teeth are present in the patients mouth
'''
from random import randint
from PyQt4 import QtSql
from lib_openmolar.common.db_orm import InsertableRecord
TABLENAME = "teeth_present"
class DemoGenerator(object):
def __init__(self, database):
q_query= QtSql.QSqlQuery(
"select min(ix), max(ix) from patients", database)
if q_query.first():
self.min_patient_id = q_query.value(0).toInt()[0]
self.max_patient_id = q_query.value(1).toInt()[0]
else:
self.min_patient_id, self.max_patient_id = 0,0
self.length = self.max_patient_id - self.min_patient_id
self.record = InsertableRecord(database, TABLENAME)
self.record.remove(self.record.indexOf("dent_key"))
self.record.remove(self.record.indexOf('checked_date'))
def demo_queries(self):
'''
return a list of queries to populate a demo database
'''
for patient_id in xrange(self.min_patient_id, self.max_patient_id+1):
self.record.clearValues()
#set values, or allow defaults
self.record.setValue('patient_id', patient_id)
self.record.setValue('checked_by', 'demo_installer')
yield self.record.insert_query
if __name__ == "__main__":
from lib_openmolar.admin.connect import DemoAdminConnection
sc = DemoAdminConnection()
sc.connect()
builder = DemoGenerator(sc)
print builder.demo_queries().next()
| gpl-3.0 | -1,188,628,920,479,698,700 | 41.369863 | 79 | 0.515681 | false |
dormouse/read | database/models.py | 1 | 5390 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
from sqlalchemy import Column, ForeignKey
from sqlalchemy.dialects.sqlite import INTEGER, TEXT, DATETIME, BOOLEAN
from sqlalchemy.orm import column_property, relationship
from sqlalchemy.sql import func
from sqlalchemy import and_
from database.database import book_base, rss_base
class BookJob(book_base):
""" Jobs for book """
__tablename__ = 'book_job'
id = Column(INTEGER, primary_key=True)
type_code = Column(TEXT, ForeignKey('book_dict.code'))
type = relationship(
"BookDict",
primaryjoin="and_(BookJob.type_code==BookDict.code,"
"BookDict.name=='job_type')",
backref='job_type'
)
file_name = Column(TEXT)
url = Column(TEXT)
create_time = Column(DATETIME, default=datetime.datetime.utcnow)
last_update = Column(DATETIME, default=datetime.datetime.utcnow)
status_code = Column(TEXT, ForeignKey('book_dict.code'))
status = relationship(
"BookDict",
primaryjoin="and_(BookJob.status_code==BookDict.code,"
"BookDict.name=='job_status')",
backref='job_status'
)
def __init__(self, url):
self.url = url
def __repr__(self):
return 'BookJob %s' % self.url
class BookDict(book_base):
""" BookDict """
__tablename__ = 'book_dict'
id = Column(INTEGER, primary_key=True)
name = Column(TEXT)
code = Column(TEXT)
value = Column(TEXT)
class Node(rss_base):
__tablename__ = 'node'
id = Column(INTEGER, primary_key=True)
parent_id = Column(INTEGER, ForeignKey('node.id'))
category = Column(TEXT)
children = relationship("Node")
data_id = Column(INTEGER) # RssAction.id or RssFolder.id or RssFeed.id
rank = Column(INTEGER) # rank for display in tree
def __repr__(self):
return "Node:{}".format(self.id)
class RssCommand(rss_base):
__tablename__ = 'rss_command'
id = Column(INTEGER, primary_key=True)
title = Column(TEXT)
command = Column(TEXT)
def __repr__(self):
return "Commander:{}".format(self.title)
class RssFolder(rss_base):
__tablename__ = 'rss_folder'
id = Column(INTEGER, primary_key=True)
title = Column(TEXT)
def __repr__(self):
return "folder:{}".format(self.title)
class RssFeed(rss_base):
__tablename__ = 'rss_feed'
id = Column(INTEGER, primary_key=True)
title = Column(TEXT)
subtitle = Column(TEXT)
url = Column(TEXT)
encoding = Column(TEXT)
language = Column(TEXT)
author = Column(TEXT)
site_url = Column(TEXT)
published = Column(DATETIME)
updated = Column(DATETIME)
def __repr__(self):
return "feed:{}".format(self.title)
class RssItem(rss_base):
__tablename__ = 'rss_item'
id = Column(INTEGER, primary_key=True)
author = Column(TEXT)
feed_id = Column(INTEGER,
ForeignKey('rss_feed.id'),
info={'relationFieldName': 'feed'}
)
feed = relationship("RssFeed")
published = Column(DATETIME)
link = Column(TEXT)
title = Column(TEXT)
summary = Column(TEXT)
content = Column(TEXT)
is_read = Column(BOOLEAN)
@property
def foreignKeyFieldNames(self):
# a list of name of field which have foreign key
cols = self.__table__.columns
fieldNames = [col.name for col in cols]
return filter(self.isForeignKeyField, fieldNames)
@property
def foreignKeyRelationFieldNames(self):
return [self.relationFieldName(name) for name in
self.foreignKeyFieldNames]
@property
def allFieldNames(self):
cols = self.__table__.columns
fieldNames = [col.name for col in cols]
return fieldNames + self.foreignKeyRelationFieldNames
def __repr__(self):
return '<item {0}>'.format(self.title)
def updateByDict(self, dictData):
for name, value in dictData.item_rows():
setattr(self, name, value)
def isForeignKeyField(self, name):
""" 判断是否是一个外键字段 """
if self.__table__.columns[name].foreign_keys:
return True
else:
return False
def relationFieldName(self, name):
""" 返回外键字段对应的关系字段 """
cols = self.__table__.columns
relationName = dict(cols)[name].info['relationFieldName']
return relationName
def valuesAsDict(self, fieldNames=None):
names = fieldNames if fieldNames else self.allFieldNames
values = self.valuesAsList(names)
return dict(zip(names, values))
def valuesAsList(self, fieldNames):
"""
根据字段列表返回相应的值
:param fieldNames: 字段名称,类型:list
:return: 字段值,类型: list
"""
return [self.fieldValue(name) for name in fieldNames]
def fieldValue(self, fieldName):
"""
根据字段名称返回其值,关系字段返回其中文字典短名称
:param fieldName: 字段名称
:return: 字段值
"""
value = getattr(self, fieldName, None)
if fieldName == 'published':
value = value.strftime("%Y年%m月%d日 %X")
return value
# return value.value_short if isinstance(value, ModelCqDict) else value
| lgpl-3.0 | 7,813,540,119,064,889,000 | 27.839779 | 79 | 0.615134 | false |
fengkaicnic/traffic | traffic/crypto.py | 1 | 12797 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers around standard crypto data elements.
Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
"""
from __future__ import absolute_import
import hashlib
import os
import string
from traffic import context
from traffic import db
from traffic import exception
from traffic import flags
from traffic.openstack.common import cfg
from traffic.openstack.common import log as logging
from traffic.openstack.common import timeutils
from traffic import utils
LOG = logging.getLogger(__name__)
crypto_opts = [
cfg.StrOpt('ca_file',
default='cacert.pem',
help=_('Filename of root CA')),
cfg.StrOpt('key_file',
default=os.path.join('private', 'cakey.pem'),
help=_('Filename of private key')),
cfg.StrOpt('crl_file',
default='crl.pem',
help=_('Filename of root Certificate Revocation List')),
cfg.StrOpt('keys_path',
default='$state_path/keys',
help=_('Where we keep our keys')),
cfg.StrOpt('ca_path',
default='$state_path/CA',
help=_('Where we keep our root CA')),
cfg.BoolOpt('use_project_ca',
default=False,
help=_('Should we use a CA for each project?')),
cfg.StrOpt('user_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=trafficDev/CN=%.16s-%.16s-%s',
help=_('Subject for certificate for users, %s for '
'project, user, timestamp')),
cfg.StrOpt('project_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=trafficDev/CN=project-ca-%.16s-%s',
help=_('Subject for certificate for projects, %s for '
'project, timestamp')),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(crypto_opts)
def ca_folder(project_id=None):
if FLAGS.use_project_ca and project_id:
return os.path.join(FLAGS.ca_path, 'projects', project_id)
return FLAGS.ca_path
def ca_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.ca_file)
def key_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.key_file)
def crl_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.crl_file)
def fetch_ca(project_id=None):
if not FLAGS.use_project_ca:
project_id = None
ca_file_path = ca_path(project_id)
if not os.path.exists(ca_file_path):
raise exception.CryptoCAFileNotFound(project_id=project_id)
with open(ca_file_path, 'r') as cafile:
return cafile.read()
def ensure_ca_filesystem():
"""Ensure the CA filesystem exists."""
ca_dir = ca_folder()
if not os.path.exists(ca_path()):
genrootca_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'genrootca.sh')
start = os.getcwd()
utils.ensure_tree(ca_dir)
os.chdir(ca_dir)
utils.execute("sh", genrootca_sh_path)
os.chdir(start)
def _generate_fingerprint(public_key_file):
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', public_key_file)
fingerprint = out.split(' ')[1]
return fingerprint
def generate_fingerprint(public_key):
with utils.tempdir() as tmpdir:
try:
pubfile = os.path.join(tmpdir, 'temp.pub')
with open(pubfile, 'w') as f:
f.write(public_key)
return _generate_fingerprint(pubfile)
except exception.ProcessExecutionError:
raise exception.InvalidKeypair()
def generate_key_pair(bits=1024):
# what is the magic 65537?
with utils.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
'-t', 'rsa', '-f', keyfile, '-C', 'Generated by traffic')
fingerprint = _generate_fingerprint('%s.pub' % (keyfile))
if not os.path.exists(keyfile):
raise exception.FileNotFound(keyfile)
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise exception.FileNotFound(public_key_path)
public_key = open(public_key_path).read()
return (private_key, public_key, fingerprint)
def fetch_crl(project_id):
"""Get crl file for project."""
if not FLAGS.use_project_ca:
project_id = None
crl_file_path = crl_path(project_id)
if not os.path.exists(crl_file_path):
raise exception.CryptoCRLFileNotFound(project_id)
with open(crl_file_path, 'r') as crlfile:
return crlfile.read()
def decrypt_text(project_id, text):
private_key = key_path(project_id)
if not os.path.exists(private_key):
raise exception.ProjectNotFound(project_id=project_id)
try:
dec, _err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % private_key,
process_input=text)
return dec
except exception.ProcessExecutionError:
raise exception.DecryptionFailure()
def revoke_cert(project_id, file_name):
"""Revoke a cert by file name."""
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
file_name)
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
'-out', FLAGS.crl_file)
os.chdir(start)
def revoke_certs_by_user(user_id):
"""Revoke all user certs."""
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_user(admin, user_id):
revoke_cert(cert['project_id'], cert['file_name'])
def revoke_certs_by_project(project_id):
"""Revoke all project certs."""
# NOTE(vish): This is somewhat useless because we can just shut down
# the vpn.
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_project(admin, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
def revoke_certs_by_user_and_project(user_id, project_id):
"""Revoke certs for user in project."""
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_user_and_project(admin,
user_id, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
def _project_cert_subject(project_id):
"""Helper to generate user cert subject."""
return FLAGS.project_cert_subject % (project_id, timeutils.isotime())
def _user_cert_subject(user_id, project_id):
"""Helper to generate user cert subject."""
return FLAGS.user_cert_subject % (project_id, user_id, timeutils.isotime())
def generate_x509_cert(user_id, project_id, bits=1024):
"""Generate and sign a cert for user in project."""
subject = _user_cert_subject(user_id, project_id)
with utils.tempdir() as tmpdir:
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out',
csrfile, '-batch', '-subj', subject)
private_key = open(keyfile).read()
csr = open(csrfile).read()
(serial, signed_csr) = sign_csr(csr, project_id)
fname = os.path.join(ca_folder(project_id), 'newcerts/%s.pem' % serial)
cert = {'user_id': user_id,
'project_id': project_id,
'file_name': fname}
db.certificate_create(context.get_admin_context(), cert)
return (private_key, signed_csr)
def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
geninter_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'geninter.sh')
start = os.getcwd()
os.chdir(ca_folder())
utils.execute('sh', geninter_sh_path, project_id,
_project_cert_subject(project_id))
os.chdir(start)
def generate_vpn_files(project_id):
project_folder = ca_folder(project_id)
key_fn = os.path.join(project_folder, 'server.key')
crt_fn = os.path.join(project_folder, 'server.crt')
if os.path.exists(crt_fn):
return
# NOTE(vish): The 2048 is to maintain compatibility with the old script.
# We are using "project-vpn" as the user_id for the cert
# even though that user may not really exist. Ultimately
# this will be changed to be launched by a real user. At
# that point we will can delete this helper method.
key, csr = generate_x509_cert('project-vpn', project_id, 2048)
with open(key_fn, 'w') as keyfile:
keyfile.write(key)
with open(crt_fn, 'w') as crtfile:
crtfile.write(csr)
def sign_csr(csr_text, project_id=None):
if not FLAGS.use_project_ca:
project_id = None
if not project_id:
return _sign_csr(csr_text, ca_folder())
_ensure_project_folder(project_id)
project_folder = ca_folder(project_id)
return _sign_csr(csr_text, ca_folder(project_id))
def _sign_csr(csr_text, ca_folder):
with utils.tempdir() as tmpdir:
inbound = os.path.join(tmpdir, 'inbound.csr')
outbound = os.path.join(tmpdir, 'outbound.csr')
with open(inbound, 'w') as csrfile:
csrfile.write(csr_text)
LOG.debug(_('Flags path: %s'), ca_folder)
start = os.getcwd()
# Change working dir to CA
utils.ensure_tree(ca_folder)
os.chdir(ca_folder)
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
'./openssl.cnf', '-infiles', inbound)
out, _err = utils.execute('openssl', 'x509', '-in', outbound,
'-serial', '-noout')
serial = string.strip(out.rpartition('=')[2])
os.chdir(start)
with open(outbound, 'r') as crtfile:
return (serial, crtfile.read())
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""Compute an md5 hash.
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
:rtype: tuple
:returns: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
| apache-2.0 | -3,505,826,213,502,774,300 | 34.64624 | 79 | 0.616238 | false |
arpitmathur/CourseAvailabilityChecker | courseCheck.py | 1 | 1898 | __author__ = 'Arpit'
import find
import time
import sys
import gmailer
#initialize datastructures
courses = []
semester = []
email = []
flag = 0
#parse changeme
with open('CHANGEME.txt') as fp:
for line in fp:
if(line[0] == "\n" or line[0] == "#"):
continue
line = line.rstrip()
if(line == "START" or line == "EMAIL START"):
continue
elif(line == "EMAIL END"):
break
elif(line == "END"):
flag = 2
elif(flag == 0):
semester = (line.rsplit(','))
flag = 1
elif(flag == 1):
courses.append(line.rsplit(','))
elif(flag == 2):
email = (line.rsplit(','))
flag = 0
count = 0
sleepTime = 300
#while a course isn't available
while courses:
count = count + 1
if count!=1:
print ("Please wait for " + str(sleepTime/60) + " minutes before the next attempt!")
#sleep five minutes
time.sleep(sleepTime)
print ("Aaaaaaaand we're back! \n")
print ('Attempt: ' + str(count))
try:
for course in list(courses):
print ("Checking: " + str(course[0]) + ' ' + str(course[1]) + ' - CRN: ' + str(course[2]))
#check availability
flag = find.search(semester, course)
if( flag == 1):
print ('Success!')
print ('Sending email now!')
courses.remove(course)
try:
gmailer.sendemail(email[0], email[0], "", str(course[0]) + " " + str(course[1]) + " IS OPEN", "The CRN is " + str(course[2]) + ". Register now!", email[0], email[1] )
except:
raise ValueError()
else:
print ("It's Closed!")
except ValueError:
print ("Fix your email credentials!")
sys.exit()
except:
print ("oops") | mit | 7,397,628,063,815,961,000 | 27.343284 | 186 | 0.494731 | false |
argonnexraydetector/RoachFirmPy | Roach2DevelopmentTree/pyfiles/pca.py | 1 | 3520 | import numpy as np
from scipy import linalg
import random as rnd
import matplotlib
import matplotlib.pyplot
'''
execfile('pca.py')
p = pulseTrain(1000)
e = eigens(p)
plot(e['eigenvectors'][0])
plot(e['eigenvectors'][1])
testan(e,2);
'''
print 'running pca.py'
def makePulse(L=100.0,t1=10.0,t2=1.0,a1=1.0,a2=1.0,n=0.1):
rnd.seed(None)
e1=a1*np.exp(-1*np.arange(L)/t1);
e2=a2*(1.0 - np.exp(-1*np.arange(L)/t2));
p1=e1*e2
noise=[]
for k in range(int(L)): noise.append(rnd.gauss(0.0,n))
noise=np.array(noise)
p1=p1+noise
return(p1)
def pulseTrain(N=100):
plist=[]
for n in range(N):
amp = 0.5 + 0.02*rnd.random()
amp2 = 0.2 + 0.02*rnd.random()
xx=rnd.random()
if xx>=0.5: tc = 10
else: tc = 4
pls=makePulse(
a1=amp,
a2=amp2,
t2=4,
t1=tc,
n=0.001)
plist.append(pls.tolist())
D=np.array(plist).transpose()
plotTrain(D)
return(D)
def plotTrain(D):
matplotlib.pyplot.figure(1)
N=D.shape[0]
L=D.shape[1]
matplotlib.pyplot.clf()
for k in range(N):
matplotlib.pyplot.plot(D.transpose()[k])
matplotlib.pyplot.figure(2)
matplotlib.pyplot.clf()
matplotlib.pyplot.pcolor(D)
def eigens(D):
Z=np.dot(D,D.transpose() )
#Z =np.cov(D)
evals,evecs=linalg.eig(Z)
evals = np.real(evals)
evecs = np.real(evecs)
matplotlib.pyplot.figure(1)
matplotlib.pyplot.clf()
matplotlib.pyplot.plot(np.real(evals))
matplotlib.pyplot.figure(2)
matplotlib.pyplot.clf()
matplotlib.pyplot.pcolor(evecs * evals)
matplotlib.pyplot.figure(3)
matplotlib.pyplot.clf()
matplotlib.pyplot.pcolor(Z)
matplotlib.pyplot.figure(4)
matplotlib.pyplot.plot(evecs * evals)
retdata = {}
retdata['eigenvalues'] = np.real(evals)
retdata['eigenvectors'] = np.real(evecs).transpose()
retdata['covariance'] = Z
return(retdata)
def eigenPulseTrain(eigendata,numcomponents=2,N=100):
pulsestruct =np.array( [ [0.1,1.0],[1.0,0.1] , [0.5,0.5] , [0.1,-1.0]])
pulses = []
for n in range(N):
pulse = np.array([0.0] * len(eigendata['eigenvectors'][0]) )
r = rand()
psindex = floor(rand() * len(pulsestruct))
ps = pulsestruct[psindex]
ps = ps* (1.0 + 0.2*rand(numcomponents))
for c in range(numcomponents):
eigpulse = eigendata['eigenvectors'][c]
pulse = pulse + eigpulse * ps[c]
pulses.append(pulse)
pulses = np.array(pulses)
figure(1)
clf()
plot(pulses.transpose())
return(pulses)
def testan(eigendata,numcomponents):
#p = pulseTrain().transpose()
p = eigenPulseTrain(eigendata)
figure(10)
Rvals = []
for pulse in p:
rvalp = [0.0] * (1+numcomponents)
energy = 0.0
for c in range(numcomponents):
filt = eigendata['eigenvectors'][c]
fp = np.convolve(pulse,filt)
rvalp[c] =(np.dot(fp,fp))
#rvalp[c] =max(fp)
energy = energy + rvalp[c]
rvalp[numcomponents] = energy
Rvals.append(rvalp)
if numcomponents==2:
plot(rvalp[0],rvalp[1],'.')
return(np.array(Rvals) )
| gpl-2.0 | -1,943,123,093,516,862,700 | 20.469512 | 75 | 0.537216 | false |
why2pac/dp-tornado | dp_tornado/helper/io/image/__init__.py | 1 | 12413 | # -*- coding: utf-8 -*-
import tempfile
from dp_tornado.engine.helper import Helper as dpHelper
class ImageHelper(dpHelper):
def compare(self, i1, i2, error=0):
i1 = self.load(i1)
i2 = self.load(i2)
if not i1 or not i2:
return None
s1 = i1.size
s2 = i2.size
if s1[0] != s2[0] or s2[1] != s2[1]:
print('size ne,', s1, s2)
return False
i1 = i1.load()
i2 = i2.load()
for i in range(s1[0]):
for j in range(s1[1]):
if i1[i, j] != i2[i, j]:
if error:
for k in range(len(i1[i, j])):
if abs(i1[i, j][k] - i2[i, j][k]) > error:
print('pixel ne,', i1[i, j], i2[i, j], abs(i1[i, j][k] - i2[i, j][k]), error)
return False
else:
return False
return True
def _driver(self, options=None, **kwargs):
if not options and kwargs:
options = kwargs
if options and 'driver' in options and options['driver'] == 'wand':
return self.helper.io.image.driver.wand
return self.helper.io.image.driver.pillow
def load(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
tmp = None
drivers = []
pillow_image = self.helper.io.image.driver.pillow.Image
wand_image = self.helper.io.image.driver.wand.Image
if pillow_image:
drivers.append(pillow_image)
if wand_image:
drivers.append(wand_image)
try:
if isinstance(src, tuple(drivers)):
return src
elif self.helper.web.url.validate(src):
code, res = self.helper.web.http.get.raw(src)
if code != 200:
raise Exception('The specified image url is invalid.')
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(res)
tmp.close()
tmp = tmp.name
else:
tmp = None
if not tmp and not src:
raise Exception('The specified image is invalid.')
img = self._driver(options=options).load(tmp if tmp else src)
if not img:
raise Exception('The specified image is invalid.')
return img
except Exception as e:
self.logging.exception(e)
return False
finally:
if tmp:
self.helper.io.file.remove(tmp)
def execute(self, src, fn, options=None, **kwargs):
if not options and kwargs:
options = kwargs
img = self.load(src, options=options)
if not img:
return False
try:
return fn(img, options)
except Exception as e:
self.logging.exception(e)
return False
def size(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
if not img:
return -1, -1
return img.width, img.height
return self.execute(src, fn, options=options)
def crop(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
crop = kwargs['crop'] if 'crop' in kwargs else None
if not crop:
return img
e_top = 0
e_left = 0
e_right = 0
e_bottom = 0
if self.helper.misc.type.check.string(crop):
crop = crop.split(',')
crop = [int(e.strip()) for e in crop]
if self.helper.misc.type.check.numeric(crop):
e_top = e_left = e_right = e_bottom = crop
elif isinstance(crop, (tuple, list)):
if len(crop) == 1:
e_top = e_left = e_right = e_bottom = crop[0]
elif len(crop) == 2:
e_top = e_bottom = crop[0]
e_left = e_right = crop[1]
elif len(crop) == 4:
e_top = crop[0]
e_right = crop[1]
e_bottom = crop[2]
e_left = crop[3]
img = self._driver(options=kwargs).crop(img, e_left, e_top, img.size[0] - e_right, img.size[1] - e_bottom)
return img
return self.execute(src, fn, options=options)
def border(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not border:
return img
if '_org' in kwargs and 'radius' in kwargs and kwargs['radius']:
return img
img = self._driver(options=kwargs).border(img, border, border_color)
return img
return self.execute(src, fn, options=options)
def radius(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
radius = int(kwargs['radius'] or 0) if 'radius' in kwargs else None
border = int(kwargs['border']) if 'border' in kwargs else 0
border_color = kwargs['border_color'] if 'border_color' in kwargs else '#000000'
if not radius:
return img
elif '__radius_processed__' in img.__dict__:
return img
img = self._driver(options=kwargs).radius(img, radius, border, border_color)
img.__dict__['__radius_processed__'] = True
return img
return self.execute(src, fn, options=options)
def colorize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
colorize = kwargs['colorize'] if 'colorize' in kwargs else None
if not colorize:
return img
img = self._driver(options=kwargs).colorize(img, colorize)
return img
return self.execute(src, fn, options=options)
def resize(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
size = kwargs['size'] if 'size' in kwargs else None
mode = kwargs['mode'] if 'mode' in kwargs else None
scale = int(kwargs['scale']) if 'scale' in kwargs else 1
limit = True if 'limit' in kwargs and kwargs['limit'] else False
border = int(kwargs['border']) if 'border' in kwargs else 0
if not size:
return img
width_new, height_new = size
width_origin, height_origin = img.size
if scale > 1:
if limit:
scale_max_width = float(width_origin) / float(width_new)
scale_max_height = float(height_origin) / float(height_new)
scale_max = min(scale, scale_max_width, scale_max_height)
else:
scale_max = scale
if scale_max > 1:
width_new = int(width_new * scale_max)
height_new = int(height_new * scale_max)
if not width_new:
width_new = width_origin * height_new / height_origin
mode = self.helper.io.image.mode.resize
if not height_new:
height_new = height_origin * width_new / width_origin
mode = self.helper.io.image.mode.resize
if border:
width_new -= border * 2
height_new -= border * 2
if not mode:
mode = self.helper.io.image.mode.resize
if mode not in self.helper.io.image.mode.modes:
raise Exception('The specified mode is not supported.')
seqs = []
for i, im in self._driver(options=kwargs).iter_seqs(img, kwargs):
# Image Resizing
if mode == self.helper.io.image.mode.center:
im = self._driver(options=kwargs).resize(im, width_new, height_new, kwargs)
elif mode == self.helper.io.image.mode.fill:
ratio_origin = float(width_origin) / float(height_origin)
ratio_new = float(width_new) / float(height_new)
if ratio_origin > ratio_new:
tw = int(round(height_new * ratio_origin))
im = self._driver(options=kwargs).resize(im, tw, height_new)
left = int(round((tw - width_new) / 2.0))
im = self._driver(options=kwargs).crop(im, left, 0, left + width_new, height_new)
elif ratio_origin < ratio_new:
th = int(round(width_new / ratio_origin))
im = self._driver(options=kwargs).resize(im, width_new, th)
top = int(round((th - height_new) / 2.0))
im = self._driver(options=kwargs).crop(im, 0, top, width_new, top + height_new)
else:
im = self._driver(options=kwargs).resize(im, width_new, height_new)
elif mode == self.helper.io.image.mode.resize:
if width_new > width_origin or height_new > height_origin:
width_new = width_origin
height_new = height_origin
im = self._driver(options=kwargs).resize(im, width_new, height_new)
seqs.append(im)
img = seqs[0]
seqs.remove(img)
img.__dict__['__frames__'] = seqs
return img
return self.execute(src, fn, options=options)
def save(self, src, options=None, **o_kwargs):
if not options and o_kwargs:
options = o_kwargs
def fn(img, kwargs):
ext = kwargs['format'] if 'format' in kwargs else None
dest = kwargs['dest'] if 'dest' in kwargs else None
if not dest:
return None
if not ext and self.helper.misc.type.check.string(dest):
ext = self.helper.io.path.ext(dest, dot='').lower()
if not ext and self.helper.misc.type.check.string(src):
ext = self.helper.io.path.ext(src, dot='').lower()
if not ext and '_org' in kwargs and kwargs['_org'] and self.helper.misc.type.check.string(kwargs['_org']):
ext = self.helper.io.path.ext(kwargs['_org'], dot='').lower()
if dest == 's3':
# TODO
return False
if not self._driver(options=kwargs).save(img, ext, dest, kwargs):
return False
return True
return self.execute(src, fn, options=options)
def manipulate(self, src, options=None, **kwargs):
if not options and kwargs:
options = kwargs
options['_org'] = src
try:
img = self.load(src, options=options)
# Crop
img = self.crop(img, options=options)
if not img:
return False
# Resize
img = self.resize(img, options=options)
if not img:
return False
# Radius
img = self.radius(img, options=options)
if not img:
return False
# Border
img = self.border(img, options=options)
if not img:
return False
# Colorize
img = self.colorize(img, options=options)
if not img:
return False
# Save
saved = self.save(img, options=options)
if saved is None:
return img
elif saved is False:
return False
return True
except Exception as e:
self.logging.exception(e)
return False
| mit | 5,356,887,573,378,849,000 | 29.649383 | 118 | 0.498107 | false |
kgarrison343/recipe-site | polls/views.py | 1 | 1213 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Question, Choice
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a valid choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| mit | -1,490,559,948,873,557,200 | 30.921053 | 82 | 0.693322 | false |
mads-bertelsen/McCode | tools/Python/mcrun/mcrun.py | 1 | 12659 | #!/usr/bin/env python3
from os import mkdir
from os.path import isfile, isdir, abspath, dirname, basename, join
from shutil import copyfile
from optparse import OptionParser, OptionGroup, OptionValueError
from decimal import Decimal, InvalidOperation
from datetime import datetime
from mccode import McStas, Process
from optimisation import Scanner, LinearInterval, MultiInterval
#import config
import sys
sys.path.append(join(dirname(__file__), '..'))
from mccodelib import mccode_config
from log import getLogger, setupLogger, setLogLevel, McRunException
from log import DEBUG
LOG = getLogger('main')
# File path friendly date format (avoid ':' and white space)
DATE_FORMAT_PATH = "%Y%m%d_%H%M%S"
# Helper functions
def build_checker(accept, msg='Invalid value'):
''' Build checker from accept() function '''
def checker(option, _opt_str, value, parser):
''' value must be acceptable '''
if not accept(value):
raise OptionValueError('option %s: %s (was: "%s")' % \
(option, msg, value))
# Update parser with accepted value
setattr(parser.values, option.dest, value)
return checker
def add_mcrun_options(parser):
''' Add option group for McRun options to parser '''
# McRun options
opt = OptionGroup(parser, 'mcrun options')
add = opt.add_option
add('-c', '--force-compile',
action='store_true',
help='force rebuilding of instrument')
add('-p', '--param',
metavar='FILE',
help='read parameters from file FILE')
add('-N', '--numpoints',
type=int, metavar='NP',
help='set number of scan points')
add('-L', '--list',
action='store_true',
help='use a fixed list of points for linear scanning')
add('-M', '--multi',
action='store_true',
help='run a multi-dimensional scan')
add('--autoplot',
action='store_true',
help='open plotter on generated dataset')
add('--embed',
action='store_true', default=True,
help='store copy of instrument file in output directory')
# Multiprocessing
add('--mpi',
metavar='NB_CPU',
help='spread simulation over NB_CPU machines using MPI')
add('--machines',
metavar='machines',
help='defines path of MPI machinefile to use in parallel mode')
# Optimisation
add('--optimise-file',
metavar='FILE',
help='store scan results in FILE '
'(defaults to: "mccode.dat")')
add('--no-cflags',
action='store_true', default=False,
help='disable optimising compiler flags for faster compilation')
add('--verbose',
action='store_true', default=False,
help='enable verbose output')
add('--write-user-config',
action='store_true', default=False,
help='generate a user config file')
parser.add_option_group(opt)
def add_mcstas_options(parser):
''' Add option group for McStas options to parser '''
opt = OptionGroup(parser, 'Instrument options')
add = opt.add_option
# Misc options
check_seed = build_checker(lambda seed: seed != 0,
'SEED cannot be 0')
add('-s', '--seed',
metavar='SEED', type=int, action='callback', callback=check_seed,
help='set random seed (must be: SEED != 0)')
add('-n', '--ncount',
metavar='COUNT', type=float, default=1000000,
help='set number of neutrons to simulate')
add('-t', '--trace',
action='store_true', default=False,
help='enable trace of neutron through instrument')
add('-g', '--gravitation', '--gravity',
action='store_true', default=False,
help='enable gravitation for all trajectories')
# Data options
dir_exists = lambda path: isdir(abspath(path))
def check_file(exist=True):
''' Validate the path to a file '''
if exist:
is_valid = isfile
else:
def is_valid(path):
''' Ensure path to file exists and filename is provided '''
if path == "." or path == "./" or path == ".\\":
return True
if not dir_exists(dirname(path)):
return False
return not isdir(abspath(path))
return build_checker(is_valid, 'invalid path')
add('-d', '--dir',
metavar='DIR', type=str,
action='callback', callback=check_file(exist=False),
help='put all data files in directory DIR')
add('--format',
metavar='FORMAT', default='McStas',
help='output data files using format FORMAT '
'(format list obtained from <instr>.%s -h)' % mccode_config.platform["EXESUFFIX"])
add('--no-output-files',
action='store_true', default=False,
help='Do not write any data files')
# Information
add('-i', '--info',
action='store_true', default=False,
help='Detailed instrument information')
parser.add_option_group(opt)
def expand_options(options):
''' Add extra options based on previous choices '''
# McCode version and library
options.mccode_bin = mccode_config.configuration['MCCODE']
options.mccode_lib = mccode_config.configuration['MCCODE_LIB_DIR']
# MPI
if options.mpi is not None:
options.use_mpi = True
options.cc = mccode_config.compilation['MPICC']
options.mpirun = mccode_config.compilation['MPIRUN']
else:
options.use_mpi = False
options.cc = mccode_config.compilation['CC']
# Output dir
if options.dir is None:
instr = options.instr
instr = instr.endswith('.instr') and instr[:-6] or instr
# use unique directory when unspecified
options.dir = "%s_%s" % \
(basename(instr),
datetime.strftime(datetime.now(), DATE_FORMAT_PATH))
# alert user
LOG.info('No output directory specified (--dir)')
# Output file
if options.optimise_file is None:
# use mccode.dat when unspecified
options.optimise_file = '%s/mccode.dat' % options.dir
def is_decimal(string):
''' Check if string is parsable as decimal/float '''
try:
Decimal(string)
return True
except InvalidOperation:
return False
def get_parameters(options):
''' Get fixed and scan/optimise parameters '''
fixed_params = {}
intervals = {}
for param in options.params:
if '=' in param:
key, value = param.split('=', 1)
interval = value.split(',')
# When just one point is present, fix as constant
if len(interval) == 1:
fixed_params[key] = value
else:
LOG.debug('interval: %s', interval)
intervals[key] = interval
else:
LOG.warning('Ignoring invalid parameter: "%s"', param)
return (fixed_params, intervals)
def find_instr_file(instr):
# Remove [-mpi].out to avoid parsing a binary file
instr = clean_quotes(instr)
if instr.endswith("-mpi." + mccode_config.platform['EXESUFFIX']):
instr = instr[:-(5 + len(mccode_config.platform['EXESUFFIX']))]
if instr.endswith("." + mccode_config.platform['EXESUFFIX']):
instr = instr[:-(1 + len(mccode_config.platform['EXESUFFIX']))]
# Append ".instr" if needed
if not isfile(instr) and isfile(instr + ".instr"):
instr += ".instr"
return instr
def clean_quotes(string):
''' Remove all leading and ending quotes (" and \') '''
return string.strip('"' + "'")
def main():
''' Main routine '''
setupLogger()
# Add options
usage = ('usage: %prog [-cpnN] Instr [-sndftgahi] '
'params={val|min,max|min,guess,max}...')
parser = OptionParser(usage, version=mccode_config.configuration['MCCODE_VERSION'])
add_mcrun_options(parser)
add_mcstas_options(parser)
# Parse options
(options, args) = parser.parse_args()
# Write user config file and exit
if options.write_user_config:
mccode_config.save_user_config()
quit()
# Extract instrument and parameters
if len(args) == 0:
print(parser.get_usage())
parser.exit()
# Set path of instrument-file after locating it
options.instr = find_instr_file(args[0])
if options.param:
# load params from file
text = open(options.param).read()
import re
params = re.findall('[^=^\s^t]+=[^=^\s^t]+', text)
options.params = map(clean_quotes, params)
else:
# Clean out quotes (perl mcgui requires this step)
options.params = map(clean_quotes, args[1:])
# On windows, ensure that backslashes in the filename are escaped
if sys.platform == "win32":
options.instr = options.instr.replace("\\","\\\\")
# Fill out extra information
expand_options(options)
if options.verbose:
setLogLevel(DEBUG)
# Inform user of what is happening
# TODO: More info?
LOG.info('Using directory: "%s"' % options.dir)
if options.dir == "." or options.dir == "./" or options == ".\\":
LOG.warning('Existing files in "%s" will be overwritten!' % options.dir)
LOG.warning(' - and datafiles catenated...')
options.dir = '';
# Run McStas
mcstas = McStas(options.instr)
mcstas.prepare(options)
(fixed_params, intervals) = get_parameters(options)
# Indicate end of setup / start of computations
LOG.info('===')
if options.info:
print('info!')
mcstas.run(override_mpi=False)
exit()
# Set fixed parameters
for key, value in fixed_params.items():
mcstas.set_parameter(key, value)
# Check for linear scanning
interval_points = None
# Can't both do list and interval scanning
if options.list and options.numpoints:
raise OptionValueError('--numpoints cannot be used with --list')
if options.list:
if len(intervals) == 0:
raise OptionValueError(
'--list was chosen but no lists was presented.')
pointlist=list(intervals.values())
points = len(pointlist[0])
if not(all(map(lambda i: len(i) == points, intervals.values()))):
raise OptionValueError(
'All variables much have an equal amount of points.')
interval_points = LinearInterval.from_list(
points, intervals)
scan = options.multi or options.numpoints
if ((options.numpoints is not None and options.numpoints < 2)
or (scan and options.numpoints is None)):
raise OptionValueError(
('Cannot scan variable(s) %s using only one data point. '
'Please use -N to specify the number of points.') % \
', '.join(intervals.keys()))
# Check that input is valid decimals
if not all(map(lambda i:
len(i) == 2 and
all(map(is_decimal, i)), intervals.values())):
raise OptionValueError('Could not parse intervals -- result: %s'
% str(intervals))
if options.multi is not None:
interval_points = MultiInterval.from_range(
options.numpoints, intervals)
elif options.numpoints is not None:
interval_points = LinearInterval.from_range(
options.numpoints, intervals)
# Parameters for linear scanning present
if interval_points:
scanner = Scanner(mcstas, intervals)
scanner.set_points(interval_points)
if (not options.dir == ''):
mkdir(options.dir)
scanner.run()
else:
# Only run a simulation if we have a nonzero ncount
if not options.ncount == 0.0:
mcstas.run()
if isdir(options.dir):
LOG.info('Placing instr file copy %s in dataset %s',options.instr,options.dir)
copyfile(options.instr, join(options.dir,basename(options.instr)))
if options.autoplot is not None:
if isdir(options.dir):
LOG.info('Running plotter %s on dataset %s',mccode_config.configuration['MCPLOT'],options.dir)
Process(mccode_config.configuration['MCPLOT']).run([options.dir])
if __name__ == '__main__':
try:
mccode_config.load_user_config()
mccode_config.check_env_vars()
main()
except KeyboardInterrupt:
LOG.fatal('User interrupt.')
except OptionValueError as e:
LOG.fatal(str(e))
except McRunException as e:
LOG.fatal(str(e))
| gpl-2.0 | 2,498,550,659,349,612,000 | 30.967172 | 106 | 0.598704 | false |
bobmcwhirter/drools | lib/utility-scripts/docbot-masseur.py | 1 | 2159 | #!/usr/bin/python
#
# This script will flatten out a folder based docbook manual into a docbot friendly "flat" structure
# (and update links in files accordingly)
# Author: Michael Neale
#
import os, sys, shutil
def flatten(root, output) :
if not os.path.isdir(output):
os.mkdir(output)
if not os.path.isdir(os.path.join(output, "images")):
os.mkdir(os.path.join(output, "images"))
sections = {}
top_files = []
names = os.listdir(root)
for name in names:
if os.path.isdir(os.path.join(root, name)) :
if not name == ".svn":
flattenDir(root, name, output, sections)
else:
if name.endswith(".xml") :
top_files.append(name)
elif name != ".svn":
shutil.copyfile(os.path.join(root, name), os.path.join(output, name))
for file in top_files:
contents = open(os.path.join(root, file), "r").read()
for section in sections:
contents = contents.replace(section, sections[section])
outfile = open(os.path.join(output, file), "w")
outfile.write(contents)
def flattenDir(root, dir, output, sections):
docs = []
images = []
names = os.listdir(os.path.join(root, dir))
for name in names:
if name.endswith(".xml"):
docs.append(name)
else:
if name != ".svn":
images.append(name)
shutil.copyfile(os.path.join(root, dir, name), os.path.join(output, "images", dir + "_" + name))
for doc in docs:
new_name = dir + "_" + doc
sections[dir + "/" + doc] = new_name
file = open(os.path.join(root, dir, doc), "r").read()
outfile = open(os.path.join(output, new_name), "w")
for img in images:
file = file.replace(img, "images/" + dir + "_" + img)
outfile.write(file)
if len(sys.argv) < 2:
print "2 arguments required: <path to root of documentation> <output path>. eg: docbot-masseur.py ./something ./output"
else:
flatten(sys.argv[1], sys.argv[2])
| apache-2.0 | 6,316,199,486,616,234,000 | 31.223881 | 123 | 0.552571 | false |
enthought/traitsgui | enthought/pyface/i_directory_dialog.py | 1 | 1873 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The interface for a dialog that allows the user to browse for a directory.
"""
# Enthought library imports.
from enthought.traits.api import Bool, Unicode
# Local imports.
from i_dialog import IDialog
class IDirectoryDialog(IDialog):
""" The interface for a dialog that allows the user to browse for a
directory.
"""
#### 'IDirectoryDialog' interface #########################################
# The default path. The default (ie. the default default path) is toolkit
# specific.
# FIXME v3: The default should be the current directory. (It seems wx is
# the problem, although the file dialog does the right thing.)
default_path = Unicode
# The message to display in the dialog. The default is toolkit specific.
message = Unicode
# True iff the dialog should include a button that allows the user to
# create a new directory.
new_directory = Bool(True)
# The path of the chosen directory.
path = Unicode
class MDirectoryDialog(object):
""" The mixin class that contains common code for toolkit specific
implementations of the IDirectoryDialog interface.
"""
#### EOF ######################################################################
| bsd-3-clause | 852,909,260,676,528,000 | 33.685185 | 79 | 0.619861 | false |
ristorantino/fiscalberry | Traductores/TraductorFiscal.py | 1 | 7099 | # -*- coding: utf-8 -*-
from Traductores.TraductorInterface import TraductorInterface
import math
class TraductorFiscal(TraductorInterface):
def dailyClose(self, type):
"Comando X o Z"
# cancelar y volver a un estado conocido
self.comando.cancelAnyDocument()
self.comando.start()
ret = self.comando.dailyClose(type)
self.comando.close()
return ret
def imprimirAuditoria(self, desde, hasta):
"Imprimir Auditoria"
#Solo compatible para Epson 1G y 2G por el momento...
#desde & hasta parametros que pueden ser números de zetas o fechas en formato ddmmyyyy
self.comando.start()
ret = self.comando.imprimirAuditoria(desde, hasta)
self.comando.close()
return ret
def getStatus(self, *args):
"getStatus"
self.comando.start()
ret = self.comando.getStatus(list(args))
self.comando.close()
return ret
def setHeader(self, *args):
"SetHeader"
self.comando.start()
ret = self.comando.setHeader(list(args))
self.comando.close()
return ret
def setTrailer(self, *args):
"SetTrailer"
self.comando.start()
ret = self.comando.setTrailer(list(args))
self.comando.close()
return ret
def openDrawer(self, *args):
"Abrir caja registradora"
self.comando.start()
ret = self.comando.openDrawer()
self.comando.close()
return ret
def getLastNumber(self, tipo_cbte):
"Devuelve el último número de comprobante"
self.comando.start()
letra_cbte = tipo_cbte[-1] if len(tipo_cbte) > 1 else None
ret = self.comando.getLastNumber(letra_cbte)
self.comando.close()
return ret
def cancelDocument(self, *args):
"Cancelar comprobante en curso"
self.comando.start()
self.comando.cancelAnyDocument()
self.comando.close()
def printTicket(self, encabezado=None, items=[], pagos=[], percepciones=[], addAdditional=None, setHeader=None, setTrailer=None):
if setHeader:
self.setHeader(*setHeader)
if setTrailer:
self.setTrailer(*setTrailer)
self.comando.start()
try:
if encabezado:
self._abrirComprobante(**encabezado)
else:
self._abrirComprobante()
for item in items:
self._imprimirItem(**item)
if percepciones:
for percepcion in percepciones:
self._imprimirPercepcion(**percepcion)
if pagos:
for pago in pagos:
self._imprimirPago(**pago)
if addAdditional:
self.comando.addAdditional(**addAdditional)
rta = self._cerrarComprobante()
self.comando.close()
return rta
except Exception, e:
self.cancelDocument()
raise
def _abrirComprobante(self,
tipo_cbte="T", # tique
tipo_responsable="CONSUMIDOR_FINAL",
tipo_doc="SIN_CALIFICADOR",
nro_doc=" ", # sin especificar
nombre_cliente=" ",
domicilio_cliente=" ",
referencia=None, # comprobante original (ND/NC)
**kwargs
):
"Creo un objeto factura (internamente) e imprime el encabezado"
# crear la estructura interna
self.factura = {"encabezado": dict(tipo_cbte=tipo_cbte,
tipo_responsable=tipo_responsable,
tipo_doc=tipo_doc, nro_doc=nro_doc,
nombre_cliente=nombre_cliente,
domicilio_cliente=domicilio_cliente,
referencia=referencia),
"items": [], "pagos": [], "percepciones": []}
printer = self.comando
letra_cbte = tipo_cbte[-1] if len(tipo_cbte) > 1 else None
# mapear el tipo de cliente (posicion/categoria)
pos_fiscal = printer.ivaTypes.get(tipo_responsable)
# mapear el numero de documento según RG1361
doc_fiscal = printer.docTypes.get(tipo_doc)
ret = False
# enviar los comandos de apertura de comprobante fiscal:
if tipo_cbte.startswith('T'):
if letra_cbte:
ret = printer.openTicket(letra_cbte)
else:
ret = printer.openTicket()
elif tipo_cbte.startswith("F"):
ret = printer.openBillTicket(letra_cbte, nombre_cliente, domicilio_cliente,
nro_doc, doc_fiscal, pos_fiscal)
elif tipo_cbte.startswith("ND"):
ret = printer.openDebitNoteTicket(letra_cbte, nombre_cliente,
domicilio_cliente, nro_doc, doc_fiscal,
pos_fiscal)
elif tipo_cbte.startswith("NC"):
ret = printer.openBillCreditTicket(letra_cbte, nombre_cliente,
domicilio_cliente, nro_doc, doc_fiscal,
pos_fiscal, referencia)
return ret
def _imprimirItem(self, ds, qty, importe, alic_iva=21., itemNegative=False, discount=0, discountDescription='',
discountNegative=False):
"Envia un item (descripcion, cantidad, etc.) a una factura"
if importe < 0:
importe = math.fabs(importe)
itemNegative = True
self.factura["items"].append(dict(ds=ds, qty=qty,
importe=importe, alic_iva=alic_iva, itemNegative=itemNegative,
discount=discount, discountDescription=discountDescription,
discountNegative=discountNegative))
# Nota: no se calcula neto, iva, etc (deben venir calculados!)
if discountDescription == '':
discountDescription = ds
return self.comando.addItem(ds, float(qty), float(importe), float(alic_iva),
itemNegative, float(discount), discountDescription, discountNegative)
def _imprimirPago(self, ds, importe):
"Imprime una linea con la forma de pago y monto"
self.factura["pagos"].append(dict(ds=ds, importe=importe))
return self.comando.addPayment(ds, float(importe))
def _imprimirPercepcion(self, ds, importe):
"Imprime una linea con nombre de percepcion y monto"
self.factura["percepciones"].append(dict(ds=ds, importe=importe))
return self.comando.addPerception(ds, float(importe))
def _cerrarComprobante(self, *args):
"Envia el comando para cerrar un comprobante Fiscal"
return self.comando.closeDocument()
| mit | -7,636,467,503,041,819,000 | 36.539683 | 133 | 0.552361 | false |
k-j-m/Pyxon | pyxon/decode.py | 1 | 5564 | # Dict of the form:
# { cls: [propname]}
# cls: class that has been written with the @sprop annotation
# propname: name of the property
class_sprops = {}
# Dict of the form:
# {cls: {name:(fn, inv_fn)}}
# cls: class that has been written with @cprop annotations
# name: class attribute name
# fn: function to turn json data into the corresponding attribute type
# inv_fn: inverse of fn
class_cprops = {}
# Dict of the form:
# {AbstractClass:specifier_property}
# AbstractClass: the class that we're trying to (de)serialize
# specifier_property: the name of the json property that
# will indicate the concrete class name
specifier_properties = {}
# Dict of the form {AbstractClass: {label: ConcreteClass}}
# Used to retrieve the concrete implementation of an
# abstract class based on a string label.
class_specifiers = {}
# {ConcreteClass: (AbstractClass, concrete_label)}
conc_to_abstract = {}
def add_type_property(data,cls):
"""
Given some JSON data and the class from which it was produced,
this function returns the JSON data with any required type
annotations added to it.
"""
if not cls in conc_to_abstract:
return data
abstract_cls, concrete_label = conc_to_abstract[cls]
prop_name = specifier_properties[abstract_cls]
data[prop_name] = concrete_label
return data
class MetaSProp(type):
"""
Metaclass designed specifically to let us use dot notation
for specifying simple class properties. This metaclass
contains the decorator logic for the @cprop decorator.
"""
def __getattr__(prop_cls,key):
def sprop2(cls):
simple_props = class_sprops.get(cls,[])
simple_props.append(key)
class_sprops[cls]=simple_props
return cls
return sprop2
class sprop:
"""
Decorator used to add simple properties to a class.
The logic for this decorator is contained in the metaclass
MetaSProp. The reason for this is to allow simple dot
notation to specify parameter.
Example:
>>> @sprop.x
>>> @sprop.y
>>> class Foo(object): pass
"""
__metaclass__ = MetaSProp
class MetaCProp(type):
"""
Metaclass for the cprop calculated property decorator.
This class contains all of the decorator logic. The reason
for using a metaclass rather than something simpler is
to allow us to use dot notation when adding calculated
properties.
"""
def __getattr__(prop_cls,key):
def cprop2(f1, f2):
def cprop3(cls):
cprops = class_cprops.get(cls,{})
cprops[key]=(f1,f2)
class_cprops[cls]=cprops
return cls
return cprop3
return cprop2
class cprop:
"""
Decorator for adding calculated properties to a class.
A calculated property is needed when the json data can't
be added to the class directly, for example when creating
some other user classes from the data before adding as
properties.
The decorator needs to be given 2 functions as arguments:
fun1: a function that takes JSON data and converts to some
other data type
fun2: the inverse of fun1, which takes some data type and
converts it into JSON data
Note: ideally the following will hold for any value of x
>>> fun2(fun1(x)) == x
Example:
@sprop.x
class Foo(object): pass
@cprop.y(f1=obj(Foo), f2=unobjectify)
class Bar(object): pass
"""
__metaclass__ = MetaCProp
# Decorator annotations
def subtyped(using):
"""
Decorator used to indicate that a class will be subtyped.
The using= parameter is used to indicate which JSON
property will contain the name of the subclass. A sensible
value for thsi will be @type, but this wil all depend on
how you have set up the rest of the system.
Example:
@subtyped(using='@type')
class Foo(object): pass
"""
# Because this is a parameterised decorator that we call, we
# now need to create and return the decorator proper.
def subtyped2(cls):
specifier_properties[cls]=using
return cls
return subtyped2
def extending(super_cls, named):
"""
This decorator is used to indicate which superclass a class
extends. This could potentially be interpreted from the classes
mro, but that starts to get tricky and we would still need to
add extra info to say what the class will be named in the data.
This label is needed because we can't necessarily rely on the
class name and the class label in the data being the same.
Example:
@extending(Foo, named='Bar')
class Baz(Foo): pass
"""
def extending2(cls):
conc_to_abstract[cls]=super_cls,named
clsmap = class_specifiers.get(super_cls,{})
clsmap[named]=cls
class_specifiers[super_cls]=clsmap
return cls
return extending2
def conc2(data, cls):
"""
Returns the appropriate concrete class of a subtyped class
based on the content of some JSON data.
If the class is not subtyped then it gets returned.
"""
s1 = set(specifier_properties.keys())
s2 = set(class_specifiers.keys())
assert s1==s2, "You need to use @subtyped and @extending as a pair!:\n%s\n%s" % (str(s1), str(s2))
if not cls in specifier_properties:
return cls
prop_name = specifier_properties[cls]
cls_label = data[prop_name]
concrete_cls = class_specifiers[cls][cls_label]
return concrete_cls
| mit | 950,680,876,732,445,200 | 28.913978 | 102 | 0.663192 | false |
twitter/pants | src/python/pants/subsystem/subsystem_client_mixin.py | 1 | 6246 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from twitter.common.collections import OrderedSet
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.optionable import OptionableFactory
from pants.option.scope import ScopeInfo
from pants.util.objects import datatype
class SubsystemClientError(Exception): pass
class SubsystemDependency(datatype([
'subsystem_cls',
'scope',
'removal_version',
'removal_hint',
]), OptionableFactory):
"""Indicates intent to use an instance of `subsystem_cls` scoped to `scope`."""
def __new__(cls, subsystem_cls, scope, removal_version=None, removal_hint=None):
return super(SubsystemDependency, cls).__new__(cls, subsystem_cls, scope, removal_version, removal_hint)
def is_global(self):
return self.scope == GLOBAL_SCOPE
@property
def optionable_cls(self):
# Fills the OptionableFactory contract.
return self.subsystem_cls
@property
def options_scope(self):
"""The subscope for options of `subsystem_cls` scoped to `scope`.
This is the scope that option values are read from when initializing the instance
indicated by this dependency.
"""
if self.is_global():
return self.subsystem_cls.options_scope
else:
return self.subsystem_cls.subscope(self.scope)
class SubsystemClientMixin(object):
"""A mixin for declaring dependencies on subsystems.
Must be mixed in to an Optionable.
"""
@classmethod
def subsystem_dependencies(cls):
"""The subsystems this object uses.
Override to specify your subsystem dependencies. Always add them to your superclass's value.
Note: Do not call this directly to retrieve dependencies. See subsystem_dependencies_iter().
:return: A tuple of SubsystemDependency instances.
In the common case where you're an optionable and you want to get an instance scoped
to you, call subsystem_cls.scoped(cls) to get an appropriate SubsystemDependency.
As a convenience, you may also provide just a subsystem_cls, which is shorthand for
SubsystemDependency(subsystem_cls, GLOBAL SCOPE) and indicates that we want to use
the global instance of that subsystem.
"""
return tuple()
@classmethod
def subsystem_dependencies_iter(cls):
"""Iterate over the direct subsystem dependencies of this Optionable."""
for dep in cls.subsystem_dependencies():
if isinstance(dep, SubsystemDependency):
yield dep
else:
yield SubsystemDependency(dep, GLOBAL_SCOPE, removal_version=None, removal_hint=None)
@classmethod
def subsystem_closure_iter(cls):
"""Iterate over the transitive closure of subsystem dependencies of this Optionable.
:rtype: :class:`collections.Iterator` of :class:`SubsystemDependency`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
seen = set()
dep_path = OrderedSet()
def iter_subsystem_closure(subsystem_cls):
if subsystem_cls in dep_path:
raise cls.CycleException(list(dep_path) + [subsystem_cls])
dep_path.add(subsystem_cls)
for dep in subsystem_cls.subsystem_dependencies_iter():
if dep not in seen:
seen.add(dep)
yield dep
for d in iter_subsystem_closure(dep.subsystem_cls):
yield d
dep_path.remove(subsystem_cls)
for dep in iter_subsystem_closure(cls):
yield dep
class CycleException(Exception):
"""Thrown when a circular subsystem dependency is detected."""
def __init__(self, cycle):
message = 'Cycle detected:\n\t{}'.format(' ->\n\t'.join(
'{} scope: {}'.format(optionable_cls, optionable_cls.options_scope)
for optionable_cls in cycle))
super(SubsystemClientMixin.CycleException, self).__init__(message)
@classmethod
def known_scope_infos(cls):
"""Yield ScopeInfo for all known scopes for this optionable, in no particular order.
:rtype: set of :class:`pants.option.scope.ScopeInfo`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
known_scope_infos = set()
optionables_path = OrderedSet() # To check for cycles at the Optionable level, ignoring scope.
def collect_scope_infos(optionable_cls, scoped_to, removal_version=None, removal_hint=None):
if optionable_cls in optionables_path:
raise cls.CycleException(list(optionables_path) + [optionable_cls])
optionables_path.add(optionable_cls)
scope = (optionable_cls.options_scope if scoped_to == GLOBAL_SCOPE
else optionable_cls.subscope(scoped_to))
scope_info = ScopeInfo(
scope,
optionable_cls.options_scope_category,
optionable_cls,
removal_version=removal_version,
removal_hint=removal_hint
)
if scope_info not in known_scope_infos:
known_scope_infos.add(scope_info)
for dep in scope_info.optionable_cls.subsystem_dependencies_iter():
# A subsystem always exists at its global scope (for the purpose of options
# registration and specification), even if in practice we only use it scoped to
# some other scope.
#
# NB: We do not apply deprecations to this implicit global copy of the scope, because if
# the intention was to deprecate the entire scope, that could be accomplished by
# deprecating all options in the scope.
collect_scope_infos(dep.subsystem_cls, GLOBAL_SCOPE)
if not dep.is_global():
collect_scope_infos(dep.subsystem_cls,
scope,
removal_version=dep.removal_version,
removal_hint=dep.removal_hint)
optionables_path.remove(scope_info.optionable_cls)
collect_scope_infos(cls, GLOBAL_SCOPE)
return known_scope_infos
| apache-2.0 | 7,956,763,797,131,338,000 | 36.401198 | 108 | 0.686519 | false |
thieme/rxnconcompiler | tests/test_data/bngl_rules/mapk_data/rules_mapk_bind_data.py | 1 | 2006 | #!/usr/bin/env python
"""
rules_mapk_bind_data.py contains dictionary with BIND reactions from MAPK network.
{rxncon_quick_string: 'Rules': [rule1, rule2 ...], 'Tags': [rtype ...]}
"""
MAPK_BIND_DATA = {
# ASSOCCIATION
# BIND no contingencies
'Hot1_BIND_Hot1Site': {
'Rules':[
'Hot1(AssocHot1Site) + Hot1Site(AssocHot1) <-> Hot1(AssocHot1Site!1).Hot1Site(AssocHot1!1)'],
'Tags': [
1, 'BIND', 'Hot1', 'Hot1Site', 'no contingencies']},
'Rlm1_BIND_Rlm1Site': {
'Rules':[
'Rlm1(AssocRlm1Site) + Rlm1Site(AssocRlm1) <-> Rlm1(AssocRlm1Site!1).Rlm1Site(AssocRlm1!1)'],
'Tags': [
1, 'BIND', 'Rlm1', 'Rlm1Site', 'no contingencies']},
'Smp1_BIND_Smp1Site': {
'Rules':[
'Smp1(AssocSmp1Site) + Smp1Site(AssocSmp1) <-> Smp1(AssocSmp1Site!1).Smp1Site(AssocSmp1!1)'],
'Tags': [
1, 'BIND', 'Smp1', 'Smp1Site', 'no contingencies']},
'Ste12_[n/HTH]_BIND_PRE': {
'Rules':[
'Ste12(nHTH) + PRE(AssocSte12) <-> PRE(AssocSte12!1).Ste12(nHTH!1)'],
'Tags': [
1, 'BIND', 'Ste12', 'PRE', 'no contingencies']},
'Sko1_[bZIP]_BIND_CRE': {
'Rules':[
'Sko1(bZIP) + CRE(AssocSko1) <-> CRE(AssocSko1!1).Sko1(bZIP!1)'],
'Tags': [
1, 'BIND', 'Sko1', 'CRE', 'no contingencies']},
'Tec1_[n/TEA]_BIND_TCS': {
'Rules':[
'Tec1(nTEA) + TCS(AssocTec1) <-> TCS(AssocTec1!1).Tec1(nTEA!1)'],
'Tags': [
1, 'BIND', 'Tec1', 'TCS', 'no contingencies']},
# BIND contingencies
'Swi4_BIND_SCBFKS2; ! Slt2_[DB]--Swi4_[c]; x Swi4_[n]--Swi4_[c]': {
'Rules':[
'Slt2(DB!1).Swi4(AssocSCBFKS2,c!1,n) + SCBFKS2(AssocSwi4) <-> SCBFKS2(AssocSwi4!1).Slt2(DB!2).Swi4(AssocSCBFKS2!1,c!2,n)'],
'Tags': [
1, 'BIND', 'Swi4', 'SCBFKS2', 'contingencies', 'complicated']},
'Swi4_BIND_SCBG1; x Slt2_[DB]--Swi4_[c]; x Swi4_[n]--Swi4_[c]': {
'Rules':[
'Swi4(AssocSCBG1,c,n) + SCBG1(AssocSwi4) <-> SCBG1(AssocSwi4!1).Swi4(AssocSCBG1!1,c,n)'],
'Tags': [
1, 'BIND', 'Swi4', 'SCBG1', 'contingencies', 'complicated']},
}
DATA = [MAPK_BIND_DATA] | lgpl-3.0 | -9,080,213,258,389,551,000 | 30.857143 | 127 | 0.590229 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/virtual_machine_extension_images_operations.py | 1 | 10932 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineExtensionImagesOperations(object):
"""VirtualMachineExtensionImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def get(
self, location, publisher_name, type, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineExtensionImage or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineExtensionImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_types(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_versions(
self, location, publisher_name, type, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionImage]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -586,315,393,133,818,900 | 43.620408 | 181 | 0.644621 | false |
kamailio/kamcli | kamcli/commands/cmd_db.py | 1 | 28309 | import os
import sys
import click
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from sqlalchemy.exc import SQLAlchemyError
from kamcli.cli import pass_context
from kamcli.ioutils import ioutils_dbres_print
from kamcli.ioutils import ioutils_formats_list
from kamcli.dbutils import dbutils_exec_sqlfile
KDB_GROUP_BASIC = ["standard"]
KDB_GROUP_STANDARD = [
"acc",
"lcr",
"domain",
"group",
"permissions",
"registrar",
"usrloc",
"msilo",
"alias_db",
"uri_db",
"speeddial",
"avpops",
"auth_db",
"pdt",
"dialog",
"dispatcher",
"dialplan",
"topos",
]
KDB_GROUP_EXTRA = [
"imc",
"cpl",
"siptrace",
"domainpolicy",
"carrierroute",
"drouting",
"userblacklist",
"userblocklist",
"htable",
"purple",
"uac",
"pipelimit",
"mtree",
"sca",
"mohqueue",
"rtpproxy",
"rtpengine",
"secfilter",
]
KDB_GROUP_PRESENCE = ["presence", "rls"]
KDB_GROUP_UID = [
"uid_auth_db",
"uid_avp_db",
"uid_domain",
"uid_gflags",
"uid_uri_db",
]
@click.group(
"db", help="Raw database operations", short_help="Raw database operations"
)
@pass_context
def cli(ctx):
pass
@cli.command("query", short_help="Run SQL statement")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("query", metavar="<query>")
@pass_context
def db_query(ctx, oformat, ostyle, query):
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute(query.encode("ascii", "ignore").decode())
ioutils_dbres_print(ctx, oformat, ostyle, res)
@cli.command("connect", short_help="Launch db cli and connect to database")
@pass_context
def db_connect(ctx):
dbtype = ctx.gconfig.get("db", "type")
if dbtype.lower() == "mysql":
scmd = ("mysql -h {0} -u {1} -p{2} {3}").format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = ('psql "postgresql://{0}:{1}@{2}/{3}"').format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "sqlite":
scmd = ("sqlite3 {0} ").format(
ctx.gconfig.get("db", "dbpath"),
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("clirun", short_help="Run SQL statement via cli")
@click.argument("query", metavar="<query>")
@pass_context
def db_clirun(ctx, query):
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
scmd = ('mysql -h {0} -u {1} -p{2} -e "{3} ;" {4}').format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
query,
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = ('psql "postgresql://{0}:{1}@{2}/{3}" -c "{4} ;"').format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
query,
)
elif dbtype == "sqlite":
scmd = ('sqlite3 {0} "{1} "').format(
ctx.gconfig.get("db", "dbpath"),
query,
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("clishow", short_help="Show content of table via cli")
@click.argument("table", metavar="<table>")
@pass_context
def db_clishow(ctx, table):
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
scmd = (
'mysql -h {0} -u {1} -p{2} -e "select * from {3} ;" {4}'
).format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
table,
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}/{3}" -c "select * from {4} ;"'
).format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
table,
)
elif dbtype == "sqlite":
scmd = ('sqlite3 {0} "select * from {1} "').format(
ctx.gconfig.get("db", "dbpath"),
table,
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("clishowg", short_help="Show content of table via cli")
@click.argument("table", metavar="<table>")
@pass_context
def db_clishowg(ctx, table):
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
scmd = (
r'mysql -h {0} -u {1} -p{2} -e "select * from {3} \G" {4}'
).format(
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
table,
ctx.gconfig.get("db", "dbname"),
)
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}/{3}" -c "\\x" -c "select * from {4} ;" -c "\\x"'
).format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
table,
)
elif dbtype == "sqlite":
scmd = ('sqlite3 -line {0} "select * from {1} "').format(
ctx.gconfig.get("db", "dbpath"),
table,
)
else:
ctx.log("unsupported database type [%s]", dbtype)
sys.exit()
os.system(scmd)
@cli.command("show", short_help="Show content of a table")
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(ioutils_formats_list),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("table", metavar="<table>")
@pass_context
def db_show(ctx, oformat, ostyle, table):
ctx.vlog("Content of database table [%s]", table)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute("select * from {0}".format(table))
ioutils_dbres_print(ctx, oformat, ostyle, res)
@cli.command(
"showcreate", short_help="Show create statement of of a database table"
)
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(ioutils_formats_list),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("table", metavar="<table>")
@pass_context
def db_showcreate(ctx, oformat, ostyle, table):
ctx.vlog("Show create of database table [%s]", table)
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "mysql":
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute("show create table {0}".format(table))
ioutils_dbres_print(ctx, oformat, ostyle, res)
elif dbtype == "postgresql":
scmd = ('psql "postgresql://{0}:{1}@{2}/{3}" -c "\\d {4} "').format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ctx.gconfig.get("db", "dbname"),
table,
)
os.system(scmd)
elif dbtype == "sqlite":
scmd = ('sqlite3 {0} ".schema {1} "').format(
ctx.gconfig.get("db", "dbpath"),
table,
)
os.system(scmd)
else:
ctx.log("unsupported database type [%s]", dbtype)
@cli.command("runfile", short_help="Run SQL statements in a file")
@click.argument("fname", metavar="<fname>")
@pass_context
def db_runfile(ctx, fname):
"""Run SQL statements in a file
\b
Parameters:
<fname> - name to the file with the SQL statements
"""
ctx.vlog("Run statements in the file [%s]", fname)
e = create_engine(ctx.gconfig.get("db", "rwurl"))
dbutils_exec_sqlfile(ctx, e, fname)
def db_create_mysql_host_users(
ctx,
e,
nousers,
nogrants,
dbname,
dbhost,
dbrwuser,
dbrwpassword,
dbrouser,
dbropassword,
):
if not nousers:
e.execute(
"CREATE USER {0!r}@{1!r} IDENTIFIED BY {2!r}".format(
dbrwuser, dbhost, dbrwpassword
)
)
if not nogrants:
e.execute(
"GRANT ALL PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrwuser, dbhost
)
)
if not nousers:
e.execute(
"CREATE USER {0!r}@{1!r} IDENTIFIED BY {2!r}".format(
dbrouser, dbhost, dbropassword
)
)
if not nogrants:
e.execute(
"GRANT SELECT PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrouser, dbhost
)
)
def db_create_mysql_users(ctx, e, dbname, nousers, nogrants):
dbhost = ctx.gconfig.get("db", "host")
dbrwuser = ctx.gconfig.get("db", "rwuser")
dbrwpassword = ctx.gconfig.get("db", "rwpassword")
dbrouser = ctx.gconfig.get("db", "rouser")
dbropassword = ctx.gconfig.get("db", "ropassword")
dbaccesshost = ctx.gconfig.get("db", "accesshost")
db_create_mysql_host_users(
ctx, e, dbname, dbhost, dbrwuser, dbrwpassword, dbrouser, dbropassword
)
if dbhost != "localhost":
db_create_mysql_host_users(
ctx,
e,
nousers,
nogrants,
dbname,
"localhost",
dbrwuser,
dbrwpassword,
dbrouser,
dbropassword,
)
if len(dbaccesshost) > 0:
db_create_mysql_host_users(
ctx,
e,
nousers,
nogrants,
dbname,
dbaccesshost,
dbrwuser,
dbrwpassword,
dbrouser,
dbropassword,
)
def db_create_sql_group(ctx, e, dirpath, dbgroup):
for t in dbgroup:
fname = dirpath + "/" + t + "-create.sql"
dbutils_exec_sqlfile(ctx, e, fname)
def db_create_sql_table_groups(ctx, e, ldirectory, alltables):
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_BASIC)
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_STANDARD)
option = "y"
if not alltables:
print("Do you want to create extra tables? (y/n):", end=" ")
option = input()
if option == "y":
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_EXTRA)
if not alltables:
print("Do you want to create presence tables? (y/n):", end=" ")
option = input()
if option == "y":
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_PRESENCE)
if not alltables:
print("Do you want to create uid tables? (y/n):", end=" ")
option = input()
if option == "y":
db_create_sql_group(ctx, e, ldirectory, KDB_GROUP_UID)
def db_create_mysql(ctx, ldbname, ldirectory, nousers, nogrants, alltables):
e = create_engine(ctx.gconfig.get("db", "adminurl"))
e.execute("create database {0}".format(ldbname))
db_create_mysql_users(ctx, e, ldbname, nousers, nogrants)
e.execute("use {0}".format(ldbname))
db_create_sql_table_groups(ctx, e, ldirectory, alltables)
def db_create_postgresql(
ctx, ldbname, ldirectory, nousers, nogrants, nofunctions, alltables
):
scmd = (
'psql "postgresql://{0}:{1}@{2}" -c "create database {3} "'
).format(
ctx.gconfig.get("db", "adminuser"),
ctx.gconfig.get("db", "adminpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
os.system(scmd)
e = create_engine(ctx.gconfig.get("db", "adminurl"))
if not nogrants:
e.execute(
"CREATE USER {0} WITH PASSWORD '{1}';".format(
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
)
)
e.execute(
"GRANT CONNECT ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rwuser"),
)
)
if ctx.gconfig.get("db", "rwuser") != ctx.gconfig.get("db", "rouser"):
e.execute(
"CREATE USER {0} WITH PASSWORD '{1}';".format(
ctx.gconfig.get("db", "rouser"),
ctx.gconfig.get("db", "ropassword"),
)
)
e.execute(
"GRANT CONNECT ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rouser"),
)
)
e.dispose()
e = create_engine(
"{0}+{1}://{2}:{3}@{4}/{5}".format(
ctx.gconfig.get("db", "type"),
ctx.gconfig.get("db", "driver"),
ctx.gconfig.get("db", "rwuser"),
ctx.gconfig.get("db", "rwpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
)
if not nofunctions:
e.execute(
"CREATE FUNCTION concat(text, text) RETURNS text AS 'SELECT $1 || $2;' LANGUAGE 'sql';"
)
e.execute(
"CREATE FUNCTION rand() RETURNS double precision AS 'SELECT random();' LANGUAGE 'sql';"
)
db_create_sql_table_groups(ctx, e, ldirectory, alltables)
e.dispose()
e = create_engine(ctx.gconfig.get("db", "adminurl"))
if not nogrants:
e.execute(
"GRANT ALL PRIVILEGES ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rwuser"),
)
)
if ctx.gconfig.get("db", "rwuser") != ctx.gconfig.get("db", "rouser"):
e.execute(
"GRANT SELECT ON DATABASE {0} TO {1};".format(
ldbname,
ctx.gconfig.get("db", "rouser"),
)
)
def db_create_sqlite(ctx, ldbname, ldirectory, alltables):
e = create_engine(
"{0}+{1}:///{2}".format(
ctx.gconfig.get("db", "type"),
ctx.gconfig.get("db", "driver"),
ldbname,
)
)
db_create_sql_table_groups(ctx, e, ldirectory, alltables)
@cli.command("create", short_help="Create database structure")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name or path to the folder for database",
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@click.option(
"nousers",
"--no-users",
"-U",
is_flag=True,
help="Do not create users",
)
@click.option(
"nogrants",
"--no-grants",
"-G",
is_flag=True,
help="Do not grant privileges",
)
@click.option(
"nofunctions",
"--no-functions",
"-F",
is_flag=True,
help="Do not create additional SQL functions",
)
@click.option(
"alltables",
"--all-tables",
"-a",
is_flag=True,
help="Create all tables without asking for confirmation",
)
@pass_context
def db_create(
ctx, dbname, scriptsdirectory, nousers, nogrants, nofunctions, alltables
):
"""Create database structure
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "sqlite":
ldbname = ctx.gconfig.get("db", "dbpath")
else:
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
ctx.vlog("Creating database [%s] structure", ldbname)
if dbtype == "mysql":
db_create_mysql(ctx, ldbname, ldirectory, nousers, nogrants, alltables)
return
elif dbtype == "postgresql":
db_create_postgresql(
ctx, ldbname, ldirectory, nousers, nogrants, nofunctions, alltables
)
return
elif dbtype == "sqlite":
db_create_sqlite(ctx, ldbname, ldirectory, alltables)
return
else:
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
@cli.command("create-dbonly", short_help="Create database only")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name or path to the folder for database",
)
@pass_context
def db_create_dbonly(ctx, dbname):
"""Create database only
\b
"""
ctx.vlog("Creating only database [%s]", dbname)
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "sqlite":
ldbname = ctx.gconfig.get("db", "dbpath")
else:
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
if dbtype == "mysql":
e = create_engine(ctx.gconfig.get("db", "adminurl"))
e.execute("create database {0}".format(ldbname))
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}" -c "create database {3} "'
).format(
ctx.gconfig.get("db", "adminuser"),
ctx.gconfig.get("db", "adminpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
os.system(scmd)
elif dbtype == "sqlite":
ctx.vlog("Database file for type [%s] is created on first use", dbtype)
else:
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
@cli.command("drop", short_help="Drop database")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name or path to the database",
)
@click.option(
"yes",
"--yes",
"-y",
is_flag=True,
help="Do not ask for confirmation",
)
@pass_context
def db_drop(ctx, dbname, yes):
"""Drop database
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype == "sqlite":
ldbname = ctx.gconfig.get("db", "dbpath")
else:
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
if not yes:
print("Dropping database. Are you sure? (y/n):", end=" ")
option = input()
if option != "y":
ctx.vlog("Skip dropping database [%s]", ldbname)
return
ctx.vlog("Dropping database [%s]", ldbname)
if dbtype == "mysql":
e = create_engine(ctx.gconfig.get("db", "adminurl"))
e.execute("drop database {0}".format(ldbname))
elif dbtype == "postgresql":
scmd = (
'psql "postgresql://{0}:{1}@{2}" -c "drop database {3} "'
).format(
ctx.gconfig.get("db", "adminuser"),
ctx.gconfig.get("db", "adminpassword"),
ctx.gconfig.get("db", "host"),
ldbname,
)
os.system(scmd)
elif dbtype == "sqlite":
if not os.path.isfile(ldbname):
ctx.vlog("Database file [%s] does not exist", ldbname)
else:
os.remove(ldbname)
return
else:
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
def db_create_tables_list(ctx, directory, group):
dbtype = ctx.gconfig.get("db", "type")
if dbtype != "mysql":
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
ldirectory = ""
if len(directory) > 0:
ldirectory = directory
e = create_engine(ctx.gconfig.get("db", "rwurl"))
db_create_sql_group(ctx, e, ldirectory, group)
@cli.command("create-tables-basic", short_help="Create basic database tables")
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_basic(ctx, scriptsdirectory):
"""Create basic database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_BASIC)
@cli.command(
"create-tables-standard", short_help="Create standard database tables"
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_standard(ctx, scriptsdirectory):
"""Create standard database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_STANDARD)
@cli.command("create-tables-extra", short_help="Create extra database tables")
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_extra(ctx, scriptsdirectory):
"""Create extra database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_EXTRA)
@cli.command(
"create-tables-presence", short_help="Create presence database tables"
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_presence(ctx, scriptsdirectory):
"""Create presence database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_PRESENCE)
@cli.command("create-tables-uid", short_help="Create uid database tables")
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@pass_context
def db_create_tables_uid(ctx, scriptsdirectory):
"""Create uid database tables
\b
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
db_create_tables_list(ctx, ldirectory, KDB_GROUP_UID)
@cli.command(
"create-tables-group",
short_help="Create the group of database tables for a specific extension",
)
@click.option(
"scriptsdirectory",
"--scripts-directory",
"-s",
default="",
help="Path to the directory with db schema files",
)
@click.argument("gname", metavar="<gname>")
@pass_context
def db_create_tables_group(ctx, scriptsdirectory, gname):
"""Create the group of database tables for a specific extension
\b
Parameters:
<gname> - the name of the group of tables
"""
ldirectory = ctx.gconfig.get("db", "scriptsdirectory")
if len(scriptsdirectory) > 0:
ldirectory = scriptsdirectory
e = create_engine(ctx.gconfig.get("db", "rwurl"))
fpath = ldirectory + "/" + gname + "-create.sql"
dbutils_exec_sqlfile(ctx, e, fpath)
@cli.command("grant", short_help="Create db access users and grant privileges")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name",
)
@pass_context
def db_grant(ctx, dbname):
"""Create db access users and grant privileges
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype != "mysql":
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
ctx.vlog("Creating only database [%s]", ldbname)
e = create_engine(ctx.gconfig.get("db", "adminurl"))
db_create_mysql_users(ctx, e, ldbname, False, False)
def db_revoke_host_users(ctx, e, dbname, dbhost, dbrwuser, dbrouser):
e.execute(
"REVOKE ALL PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrwuser, dbhost
)
)
e.execute("DROP USER {0!r}@{1!r}".format(dbrwuser, dbhost))
e.execute(
"REVOKE SELECT PRIVILEGES ON {0}.* TO {1!r}@{2!r}".format(
dbname, dbrouser, dbhost
)
)
e.execute("DROP USER {0!r}@{1!r}".format(dbrouser, dbhost))
def db_revoke_users(ctx, e, dbname):
dbhost = ctx.gconfig.get("db", "host")
dbrwuser = ctx.gconfig.get("db", "rwuser")
dbrouser = ctx.gconfig.get("db", "rouser")
dbaccesshost = ctx.gconfig.get("db", "accesshost")
db_revoke_host_users(ctx, e, dbname, dbhost, dbrwuser, dbrouser)
if dbhost != "localhost":
db_revoke_host_users(
ctx,
e,
dbname,
"localhost",
dbrwuser,
dbrouser,
)
if len(dbaccesshost) > 0:
db_revoke_host_users(
ctx,
e,
dbname,
dbaccesshost,
dbrwuser,
dbrouser,
)
@cli.command("revoke", short_help="Revoke db access privileges")
@click.option(
"dbname",
"--dbname",
"-d",
default="",
help="Database name",
)
@pass_context
def db_revoke(ctx, dbname):
"""Revoke db access privileges
\b
"""
dbtype = ctx.gconfig.get("db", "type")
if dbtype != "mysql":
ctx.vlog("Database type [%s] not supported yet", dbtype)
return
ldbname = ctx.gconfig.get("db", "dbname")
if len(dbname) > 0:
ldbname = dbname
ctx.vlog("Revoke access to database [%s]", ldbname)
e = create_engine(ctx.gconfig.get("db", "adminurl"))
db_revoke_users(ctx, e, ldbname)
@cli.command(
"version-set", short_help="Set the version number for a table structure"
)
@click.option(
"vertable",
"--version-table",
default="version",
help="Name of the table with version records",
)
@click.argument("table", metavar="<table>")
@click.argument("version", metavar="<version>", type=int)
@pass_context
def db_version_set(ctx, vertable, table, version):
"""Set the version number for a table structure
\b
Parameters:
<table> - Name of the table to set the version for
<version> - Version number
"""
e = create_engine(ctx.gconfig.get("db", "rwurl"))
e.execute(
"delete from {0} where table_name={1!r}".format(
vertable.encode("ascii", "ignore").decode(),
table.encode("ascii", "ignore").decode(),
)
)
e.execute(
"insert into {0} (table_name, table_version) values ({1!r}, {2})".format(
vertable.encode("ascii", "ignore").decode(),
table.encode("ascii", "ignore").decode(),
version,
)
)
@cli.command(
"version-get", short_help="Get the version number for a table structure"
)
@click.option(
"vertable",
"--version-table",
default="version",
help="Name of the table with version records",
)
@click.option(
"oformat",
"--output-format",
"-F",
type=click.Choice(["raw", "json", "table", "dict"]),
default=None,
help="Format the output",
)
@click.option(
"ostyle",
"--output-style",
"-S",
default=None,
help="Style of the output (tabulate table format)",
)
@click.argument("table", metavar="<table>")
@pass_context
def db_version_get(ctx, vertable, oformat, ostyle, table):
"""Get the version number for a table structure
\b
Parameters:
<table> - Name of the table to get the version for
"""
e = create_engine(ctx.gconfig.get("db", "rwurl"))
res = e.execute(
"select * from {0} where table_name={1!r}".format(
vertable.encode("ascii", "ignore").decode(),
table.encode("ascii", "ignore").decode(),
)
)
ioutils_dbres_print(ctx, oformat, ostyle, res)
| gpl-2.0 | 5,029,327,672,222,936,000 | 26.863189 | 99 | 0.560528 | false |
aidin36/beneath-a-binary-sky | src/actions/water_action.py | 1 | 2052 | # This file is part of Beneath a Binary Sky.
# Copyright (C) 2016, Aidin Gharibnavaz <[email protected]>
#
# Beneath a Binary Sky is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Beneath a Binary Sky is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beneath a Binary Sky. If not, see
# <http://www.gnu.org/licenses/>.
import time
from actions.action import Action
from actions.exceptions import InvalidArgumentsError, RobotHaveNoWaterError
from world.world import World
from database.exceptions import LockAlreadyAquiredError
class WaterAction(Action):
def __init__(self):
super().__init__()
self._world = World()
def do_action(self, robot, args):
'''Waters the square robot stands on.
@param robot: Instance of `objects.robot.Robot'.
'''
if len(args) != 1:
raise InvalidArgumentsError("`water' action takes no arguments.")
if not robot.get_has_water():
raise RobotHaveNoWaterError("Robot does not carry water.")
try:
square = self._world.get_square(robot.get_location(), for_update=True)
except LockAlreadyAquiredError:
# Waiting a little, and trying one more time.
time.sleep(0.02)
square = self._world.get_square(robot.get_location(), for_update=True)
# Note: we don't raise an exception if there's no plant. A robot can waste its water.
plant = square.get_plant()
if plant is not None:
plant.set_water_level(100)
robot.set_honor(robot.get_honor() + 1)
robot.set_has_water(False)
| gpl-3.0 | 8,472,158,464,183,048,000 | 35 | 93 | 0.679825 | false |
mdsalman729/flexpret_project | emulator/concurrit-poplsyntax/concurrit-poplsyntax/bench/pfscan/inputs/in2/config/getpthreadfunctions.py | 1 | 1909 | ##
# getpthreadfunctions.py - outputs the pthread man page to mapthread.txt
# parses the latter, creates a dictionary with pairs
# (functionname, list of function args where last element is result type)
# marshals dictionary to pthreaddict file
#
# Author - Christos Stergiou ([email protected])
#
import os,re,marshal
os.system('man pthread | col -b > manpthread.txt')
filemp = open('manpthread.txt')
filedict = open('pthreaddict','w')
try:
pfuncs = dict()
previousmatch = False
funcargtypesstr = ''
funcname = ''
funcrettype = ''
for line in filemp:
line = line.rstrip('\n')
funcargtypeslist = []
if previousmatch:
previousmatch = False
funcargtypesstr = funcargtypesstr + ' ' + line.strip()[0:-2]
else:
#matchobj = re.search('[\t ]*[([a-zA-Z0-9_]+)[\t ]+([a-zA-Z0-9_]+)\(([a-z]+.*$)', line)
matchobj = re.search('[\t ]*([a-zA-Z0-9_]+( \*)?)[\t ]*([a-zA-Z0-9_]+)\(([a-z]+.*$)', line)
if matchobj:
funcname = matchobj.group(3)
funcrettype = matchobj.group(1)
funcargtypesstr = matchobj.group(4);
if not re.search(';$', matchobj.group(4)):
# function arguments continue to next line
previousmatch = True
continue
else:
# remove ');' from end of line
funcargtypesstr = funcargtypesstr[0:-2]
if matchobj or previousmatch:
funcargtypeslist = re.split(', ', funcargtypesstr)
funcargtypeslist.reverse()
funcargtypeslist.append(funcrettype)
funcargtypeslist.reverse()
print funcname,"->",funcargtypeslist
pfuncs[funcname] = funcargtypeslist
finally:
marshal.dump(pfuncs,filedict)
filemp.close()
filedict.close()
| bsd-3-clause | -8,134,398,863,236,522,000 | 33.709091 | 103 | 0.566789 | false |
henaras/sahara | sahara/service/volumes.py | 1 | 8618 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils.openstack import cinder
from sahara.utils.openstack import nova
from sahara.utils import poll_utils
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('api_version', 'sahara.utils.openstack.cinder',
group='cinder')
def _count_instances_to_attach(instances):
result = 0
for instance in instances:
if instance.node_group.volumes_per_node > 0:
result += 1
return result
def _count_volumes_to_mount(instances):
return sum([inst.node_group.volumes_per_node for inst in instances])
def attach_to_instances(instances):
instances_to_attach = _count_instances_to_attach(instances)
if instances_to_attach == 0:
return
cpo.add_provisioning_step(
instances[0].cluster_id, _("Attach volumes to instances"),
instances_to_attach)
with context.ThreadGroup() as tg:
for instance in instances:
if instance.node_group.volumes_per_node > 0:
with context.set_current_instance_id(instance.instance_id):
tg.spawn(
'attach-volumes-for-instance-%s'
% instance.instance_name, _attach_volumes_to_node,
instance.node_group, instance)
@poll_utils.poll_status(
'await_attach_volumes', _("Await for attaching volumes to instances"),
sleep=2)
def _await_attach_volumes(instance, devices):
return _count_attached_devices(instance, devices) == len(devices)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _attach_volumes_to_node(node_group, instance):
ctx = context.ctx()
size = node_group.volumes_size
volume_type = node_group.volume_type
devices = []
for idx in range(1, node_group.volumes_per_node + 1):
display_name = "volume_" + instance.instance_name + "_" + str(idx)
device = _create_attach_volume(
ctx, instance, size, volume_type,
node_group.volume_local_to_instance, display_name,
node_group.volumes_availability_zone)
devices.append(device)
LOG.debug("Attached volume {device} to instance".format(device=device))
_await_attach_volumes(instance, devices)
paths = instance.node_group.storage_paths()
for idx in range(0, instance.node_group.volumes_per_node):
LOG.debug("Mounting volume {volume} to instance"
.format(volume=devices[idx]))
_mount_volume(instance, devices[idx], paths[idx])
LOG.debug("Mounted volume to instance")
@poll_utils.poll_status(
'volume_available_timeout', _("Await for volume become available"),
sleep=1)
def _await_available(volume):
volume = cinder.get_volume(volume.id)
if volume.status == 'error':
raise ex.SystemError(_("Volume %s has error status") % volume.id)
return volume.status == 'available'
def _create_attach_volume(ctx, instance, size, volume_type,
volume_local_to_instance, name=None,
availability_zone=None):
if CONF.cinder.api_version == 1:
kwargs = {'size': size, 'display_name': name}
else:
kwargs = {'size': size, 'name': name}
kwargs['volume_type'] = volume_type
if availability_zone is not None:
kwargs['availability_zone'] = availability_zone
if volume_local_to_instance:
kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}
volume = cinder.client().volumes.create(**kwargs)
conductor.append_volume(ctx, instance, volume.id)
_await_available(volume)
resp = nova.client().volumes.create_server_volume(
instance.instance_id, volume.id, None)
return resp.device
def _count_attached_devices(instance, devices):
code, part_info = instance.remote().execute_command('cat /proc/partitions')
count = 0
for line in part_info.split('\n')[1:]:
tokens = line.split()
if len(tokens) > 3:
dev = '/dev/' + tokens[3]
if dev in devices:
count += 1
return count
def mount_to_instances(instances):
if len(instances) == 0:
return
cpo.add_provisioning_step(
instances[0].cluster_id,
_("Mount volumes to instances"), _count_volumes_to_mount(instances))
with context.ThreadGroup() as tg:
for instance in instances:
with context.set_current_instance_id(instance.instance_id):
devices = _find_instance_volume_devices(instance)
# Since formating can take several minutes (for large disks)
# and can be done in parallel, launch one thread per disk.
for idx in range(0, instance.node_group.volumes_per_node):
tg.spawn(
'mount-volume-%d-to-node-%s' %
(idx, instance.instance_name),
_mount_volume_to_node, instance, idx, devices[idx])
def _find_instance_volume_devices(instance):
volumes = nova.client().volumes.get_server_volumes(instance.instance_id)
devices = [volume.device for volume in volumes]
return devices
@cpo.event_wrapper(mark_successful_on_exit=True)
def _mount_volume_to_node(instance, idx, device):
LOG.debug("Mounting volume {device} to instance".format(device=device))
mount_point = instance.node_group.storage_paths()[idx]
_mount_volume(instance, device, mount_point)
LOG.debug("Mounted volume to instance")
def _mount_volume(instance, device_path, mount_point):
with instance.remote() as r:
try:
# Mount volumes with better performance options:
# - reduce number of blocks reserved for root to 1%
# - use 'dir_index' for faster directory listings
# - use 'extents' to work faster with large files
# - disable journaling
# - enable write-back
# - do not store access time
fs_opts = '-m 1 -O dir_index,extents,^has_journal'
mount_opts = '-o data=writeback,noatime,nodiratime'
r.execute_command('sudo mkdir -p %s' % mount_point)
r.execute_command('sudo mkfs.ext4 %s %s' % (fs_opts, device_path))
r.execute_command('sudo mount %s %s %s' %
(mount_opts, device_path, mount_point))
except Exception:
LOG.error(_LE("Error mounting volume to instance"))
raise
def detach_from_instance(instance):
for volume_id in instance.volumes:
_detach_volume(instance, volume_id)
_delete_volume(volume_id)
@poll_utils.poll_status(
'detach_volume_timeout', _("Await for volume become detached"), sleep=2)
def _await_detach(volume_id):
volume = cinder.get_volume(volume_id)
if volume.status not in ['available', 'error']:
return False
return True
def _detach_volume(instance, volume_id):
volume = cinder.get_volume(volume_id)
try:
LOG.debug("Detaching volume {id} from instance".format(id=volume_id))
nova.client().volumes.delete_server_volume(instance.instance_id,
volume_id)
except Exception:
LOG.error(_LE("Can't detach volume {id}").format(id=volume.id))
detach_timeout = CONF.timeouts.detach_volume_timeout
LOG.debug("Waiting {timeout} seconds to detach {id} volume".format(
timeout=detach_timeout, id=volume_id))
_await_detach(volume_id)
def _delete_volume(volume_id):
LOG.debug("Deleting volume {volume}".format(volume=volume_id))
volume = cinder.get_volume(volume_id)
try:
volume.delete()
except Exception:
LOG.error(_LE("Can't delete volume {volume}").format(
volume=volume.id))
| apache-2.0 | -8,491,081,074,740,166,000 | 34.465021 | 79 | 0.640752 | false |
niboshi/chainer | chainerx/_docs/routines.py | 1 | 127367 | import chainerx
from chainerx import _docs
def set_docs():
_docs_creation()
_docs_evaluation()
_docs_indexing()
_docs_linalg()
_docs_logic()
_docs_loss()
_docs_manipulation()
_docs_math()
_docs_sorting()
_docs_statistics()
_docs_connection()
_docs_normalization()
_docs_pooling()
_docs_rnn()
def _docs_creation():
_docs.set_doc(
chainerx.empty,
"""empty(shape, dtype, device=None)
Returns an array without initializing the elements.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type of the array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with elements not initialized.
.. seealso:: :func:`numpy.empty`
""")
_docs.set_doc(
chainerx.empty_like,
"""empty_like(a, device=None)
Returns a new array with same shape and dtype of a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with same shape and dtype as ``a`` \
with elements not initialized.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.empty_like`
""")
_docs.set_doc(
chainerx.eye,
"""eye(N, M=None, k=0, dtype=float64, device=None)
Returns a 2-D array with ones on the diagonals and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled with ones and
zeros elsewhere.
.. seealso:: :func:`numpy.eye`
""")
_docs.set_doc(
chainerx.tri,
"""tri(N, M=None, k=0, dtype=float32, device=None)
Returns a 2-D array with ones at and below the given diagonal
and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled ones at and
below the given diagonal and zeros elsewhere.
.. seealso:: :func:`numpy.tri`
""")
_docs.set_doc(
chainerx.tril,
"""tril(m, k=0)
Lower triangle of an array.
Returns a copy of an array with elements above the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Lower triangle of ``m``.
.. seealso:: :func:`numpy.tril`
""")
_docs.set_doc(
chainerx.triu,
"""triu(m, k=0)
Upper triangle of an array.
Returns a copy of an array with elements below the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Upper triangle of ``m``.
.. seealso:: :func:`numpy.triu`
""")
_docs.set_doc(
chainerx.identity,
"""identity(n, dtype=None, device=None)
Returns a 2-D identity array.
It is equivalent to ``eye(n, n, dtype)``.
Args:
n (int): Number of rows and columns.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D identity array.
.. seealso:: :func:`numpy.identity`
""")
_docs.set_doc(
chainerx.ones,
"""ones(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with ones.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.ones`
""")
_docs.set_doc(
chainerx.ones_like,
"""ones_like(a, device=None)
Returns an array of ones with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.ones_like`
""")
_docs.set_doc(
chainerx.zeros,
"""zeros(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with zeros.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.zeros`
""")
_docs.set_doc(
chainerx.zeros_like,
"""zeros_like(a, device=None)
Returns an array of zeros with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.zeros_like`
""")
_docs.set_doc(
chainerx.full,
"""full(shape, fill_value, dtype, device=None)
Returns a new array of given shape and dtype, filled with a given value.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.full`
""")
_docs.set_doc(
chainerx.full_like,
"""full_like(a, fill_value, dtype=None, device=None)
Returns a full array with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.full_like`
""")
_docs.set_doc(
chainerx.array,
"""array(object, dtype=None, copy=True, device=None)
Creates an array.
Args:
object: A :class:`~chainerx.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type. If omitted, it's inferred from the input.
copy (bool): If ``True``, the object is always copied. Otherwise, a copy
will only be made if it is needed to satisfy any of the other
requirements (dtype, device, etc.).
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.array`
""")
_docs.set_doc(
chainerx.asarray,
"""asarray(a, dtype=None, device=None)
Converts an object to an array.
Args:
a: The source object.
dtype: Data type. If omitted, it's inferred from the input.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: Array interpretation of ``a``. If ``a`` is already an \
ndarray on the given device with matching dtype, no copy is performed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.asarray`
""")
_docs.set_doc(
chainerx.ascontiguousarray,
"""ascontiguousarray(a, dtype=None, device=None)
Returns a C-contiguous array.
Args:
a (~chainerx.ndarray): Source array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: C-contiguous array. A copy will be made only if needed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.ascontiguousarray`
""")
_docs.set_doc(
chainerx.copy,
"""copy(a)
Creates a copy of a given array.
Args:
a (~chainerx.ndarray): Source array.
Returns:
~chainerx.ndarray: A copy array on the same device as ``a``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.copy`
""")
_docs.set_doc(
chainerx.frombuffer,
"""frombuffer(buffer, dtype=float, count=-1, offset=0, device=None)
Returns a 1-D array interpretation of a buffer.
The given ``buffer`` memory must be usable on the given device, otherwise,
an error is raised.
Note:
The ``native`` backend requires a buffer of main memory, and
the ``cuda`` backend requires a buffer of CUDA memory.
No copy is performed.
Args:
buffer: An object that exposes the buffer interface.
dtype: Data type of the returned array.
count (int): Number of items to read. -1 means all data in the buffer.
offset (int): Start reading the buffer from this offset (in bytes).
device (~chainerx.Device): Device of the returned array.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: 1-D array interpretation of ``buffer``.
.. seealso:: :func:`numpy.frombuffer`
""")
_docs.set_doc(
chainerx.arange,
"""arange([start=0, ]stop, [step=1, ]dtype=None, device=None)
Returns an array with evenly spaced values within a given interval.
Values are generated within the half-open interval [``start``, ``stop``).
The first three arguments are mapped like the ``range`` built-in function,
i.e. ``start`` and ``step`` are optional.
Args:
start: Start of the interval.
stop: End of the interval.
step: Step width between each pair of consecutive values.
dtype: Data type specifier. It is inferred from other arguments by
default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of range values.
.. seealso:: :func:`numpy.arange`
""")
_docs.set_doc(
chainerx.linspace,
"""linspace(start, stop, num=50, endpoint=True, dtype=None, device=None)
Returns an array with evenly spaced numbers over a specified interval.
Instead of specifying the step width like :func:`chainerx.arange()`,
this function requires the total number of elements specified.
Args:
start: Start of the interval.
stop: End of the interval.
num: Number of elements.
endpoint (bool): If ``True``, the stop value is included as the last
element. Otherwise, the stop value is omitted.
dtype: Data type specifier. It is inferred from the start and stop
arguments by default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of ranged values.
.. seealso:: :func:`numpy.linspace`
""") # NOQA
_docs.set_doc(
chainerx.diag,
"""diag(v, k=0, device=None)
Returns a diagonal or a diagonal array.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: If ``v`` is a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` is a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`chainerx.ndarray` object, then its view is
returned.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diag`
""")
_docs.set_doc(
chainerx.diagflat,
"""diagflat(v, k=0, device=None)
Creates a diagonal array from the flattened input.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. See :func:`chainerx.diag`.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D diagonal array with the diagonal copied
from ``v``.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diagflat`
""")
_docs.set_doc(
chainerx.meshgrid,
"""meshgrid(xi, indexing='xy')
Returns coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector
fields over N-D grids, given one-dimensional coordinate arrays x1, x2,…, xn.
Args:
xi (sequence of :class:`~chainerx.ndarray`\\ s): 1-D arrays
representing the coordinates of a grid.
indexing (str): {‘xy’, ‘ij’}, optional
Cartesian (‘xy’, default) or matrix (‘ij’) indexing of output.
Returns:
list of :class:`~chainerx.ndarray`\\ s: For vectors x1, x2,…, ‘xn’ with
lengths Ni=len(xi), return (N1, N2, N3,...Nn) shaped arrays if
indexing=’ij’ or (N2, N1, N3,...Nn) shaped arrays if indexing=’xy’
with the elements of xi repeated to fill the matrix along the first
dimension for x1, the second for x2 and so on.
.. seealso:: :func:`numpy.meshgrid`
""")
def _docs_evaluation():
_docs.set_doc(
chainerx.accuracy,
"""accuracy(y, t, ignore_label=None)
Computes multiclass classification accuracy of the minibatch.
Args:
y (~chainerx.ndarray):
Array whose (i, j, k, ...)-th element indicates the score of
the class j at the (i, k, ...)-th sample.
The prediction label :math:`\\hat t` is calculated by the formula
:math:`\\hat t(i, k, ...) = \\operatorname{\\mathrm{argmax}}_j \
y(i, j, k, ...)`.
t (~chainerx.ndarray):
Array of ground truth labels.
ignore_label (int or None): Skip calculating accuracy
if the true label is ``ignore_label``.
Returns:
:func:`~chainerx.ndarray`: A variable holding a scalar \
array of the accuracy.
Note:
This function is non-differentiable.
.. seealso:: :func:`chainer.functions.accuracy`
.. admonition:: Example
We show the most common case, when ``y`` is the two dimensional array.
>>> y = chainerx.array([[0.1, 0.7, 0.2], # prediction label is 1
... [8.0, 1.0, 2.0], # prediction label is 0
... [-8.0, 1.0, 2.0], # prediction label is 2
... [-8.0, -1.0, -2.0]]) # prediction label is 1
>>> t = chainerx.array([1, 0, 2, 1], chainerx.int32)
>>> chainerx.accuracy(y, t) \
# 100% accuracy because all samples are correct
array(1., shape=(), dtype=float64, device='native:0')
>>> t = chainerx.array([1, 0, 0, 0], chainerx.int32)
>>> chainerx.accuracy(y, t) \
# 50% accuracy because 1st and 2nd samples are correct
array(0.5, shape=(), dtype=float64, device='native:0')
>>> chainerx.accuracy(y, t, ignore_label=0) \
# 100% accuracy because of ignoring the 2nd, 3rd and 4th samples.
array(1., shape=(), dtype=float64, device='native:0')
""")
def _docs_indexing():
_docs.set_doc(
chainerx.take,
"""take(a, indices, axis)
Takes elements from an array along an axis.
Args:
a (~chainerx.ndarray): Source array.
indices (~chainerx.ndarray):
The indices of the values to extract. When indices are out of bounds,
they are wrapped around.
axis (int): The axis over which to select values.
mode (str): Specifies how out-of-bounds indices will behave.
'raise' - raise an error
'wrap' - wrap around
'clip' - clip to the range
Returns:
:func:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support ``axis=None``
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
Note:
The default mode for the native backend is 'raise', while for the cuda
backend is 'wrap' in order to prevent device synchronization.
'raise' mode is currently not supported in the CUDA backend.
.. seealso:: :func:`numpy.take`
""")
_docs.set_doc(
chainerx.where,
"""where(condition, x, y)
Return elements chosen from ``x`` or ``y`` depending on condition.
Args:
condition (~chainerx.ndarray): Where True, yield ``x``, otherwise
yield ``y``.
x (~chainerx.ndarray): Values from which to choose.
y (~chainerx.ndarray): Values from which to choose.
Returns:
:func:`~chainerx.ndarray`: An array with elements
from ``x`` where condition is True, and elements from ``y`` elsewhere.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x`` and ``y``.
.. seealso:: :func:`numpy.where`
""")
_docs.set_doc(
chainerx.nonzero,
"""nonzero(a)
Return the indices of the elements that are non-zero.
Args:
a (~chainerx.ndarray): Input array.
Returns:
tuple of :func:`~chainerx.ndarray`: Indices of elements that are non-zero.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :func:`numpy.nonzero`
""")
def _docs_linalg():
_docs.set_doc(
chainerx.dot,
"""dot(a, b)
Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the last
axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix
product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis
as an axis to take dot product over.
Args:
a (~chainerx.ndarray): The left argument.
b (~chainerx.ndarray): The right argument.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support N > 2 dimensional arrays.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
""")
_docs.set_doc(
chainerx.linalg.solve,
"""solve(a, b)
Solves a linear matrix equation, or system of linear scalar equations.
It computes the exact solution of ``x`` in ``ax = b``,
where ``a`` is a square and full rank matrix,
``b`` can be a vector, or a rectangular matrix.
When ``b`` is matrix, its columns are treated as separate vectors
representing multiple right-hand sides.
Args:
a (~chainerx.ndarray): Coefficient matrix.
b (~chainerx.ndarray): "dependent variable" values.
Returns:
:class:`~chainerx.ndarray`:
Solution to the system ``ax = b``.
Shape is identical to ``b``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.solve`
""")
_docs.set_doc(
chainerx.linalg.inv,
"""inv(a)
Computes the inverse of a matrix.
This function computes matrix ``a_inv`` from square matrix
``a`` such that ``dot(a, a_inv) = dot(a_inv, a) = eye(a.shape[0])``.
Args:
a (~chainerx.ndarray): The matrix to be inverted.
Returns:
:class:`~chainerx.ndarray`: The inverse of a matrix.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.inv`
""")
_docs.set_doc(
chainerx.linalg.svd,
"""svd(a, full_matrices=True, compute_uv=True)
Singular Value Decomposition.
Factorizes the matrix ``a`` into two unitary matrices ``U`` and ``Vt``, and
a 1-D array ``s`` of singular values such that
``a == U * S * Vt``, where ``S`` is a suitably shaped matrix of zeros with
main diagonal ``s`` and ``*`` represents a dot product.
Args:
a (~chainerx.ndarray): The input matrix with dimension ``(M, N)``.
full_matrices (bool): If True, it returns u and v with dimensions
``(M, M)`` and ``(N, N)``. Otherwise, the dimensions of u and v
are respectively ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv (bool): If False, only singular values are computed.
Returns:
tuple of :class:`chainerx.ndarray`:
A tuple of ``(U, s, Vt)`` such that ``a = U * diag(s) * Vt``.
When ``compute_uv`` is False only singular values ``s`` are returned.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* The SVD is commonly written as `a = U * diag(s) * V^T`.
The ``Vt`` returned by this function is `V^T`.
* During backpropagation, this function requires ``U`` and ``Vt`` computed,
therefore differentiation does not work for ``compute_uv=False``.
* Backpropagation is not implemented for ``full_matrices=True``.
.. seealso:: :func:`numpy.linalg.svd`
""")
_docs.set_doc(
chainerx.linalg.pinv,
"""pinv(a, rcond=1e-15)
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its singular-value
decomposition (SVD) and including all large singular values.
Args:
a (~chainerx.ndarray): The input matrix to be pseudo-inverted.
rcond (float): Cutoff for small singular values.
Returns:
:class:`~chainerx.ndarray`: The pseudo-inverse of ``a``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.pinv`
""")
_docs.set_doc(
chainerx.linalg.qr,
"""qr(a, mode='reduced')
Compute the qr factorization of a matrix.
Factor the matrix ``a`` as *qr*, where ``q`` is orthonormal and ``r`` is
upper-triangular.
Args:
a (~chainerx.ndarray): Matrix to be factored.
mode (str): The mode of decomposition.
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,),
where ``(M, N)`` is the shape of the input matrix and ``K = min(M, N)``
Returns:
q (~chainerx.ndarray): A matrix with orthonormal columns.
r (~chainerx.ndarray): The upper-triangular matrix.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* Backpropagation is not implemented for non-square output matrix ``r``.
* Backpropagation is not implemented for 'r' or 'raw' modes.
.. seealso:: :func:`numpy.linalg.qr`
""")
_docs.set_doc(
chainerx.linalg.cholesky,
"""cholesky(a)
Computes the Cholesky decomposition of a matrix.
Returns the Cholesky decomposition, :math:`A = L L^T`,
for the square matrix ``a``.
Args:
a (~chainerx.ndarray): Symmetric positive-definite input matrix.
Returns:
:class:`~chainerx.ndarray`: Output array. Cholesky factor of ``a``.
Note:
The forward computation does not necessarily check if the input matrix is
symmetric (e.g. the native backend relying on LAPACK does not). However,
both the forward and the backward computations assume that it is and their
results are unspecified otherwise. The computed gradient is always a
symmetric matrix. More specifically, the gradient is computed as if the
function is restricted to a Riemannian submanifold of
:math:`R_{n \times n}` consisting just of positive-definite symmetric
matrices and is faithful to the mathematical definition of the Cholesky
decomposition.
Note:
* GPU implementation of the Cholesky decomposition routine is based on
cuSOLVER library. Older versions (<10.1) of it might not raise an error
for some non positive-definite matrices.
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.cholesky`
""")
_docs.set_doc(
chainerx.linalg.eigh,
"""eigh(a, UPLO='L')
Compute the eigenvalues and eigenvectors of a real symmetric matrix.
Args:
a (~chainerx.ndarray): Real symmetric matrix whose eigenvalues
and eigenvectors are to be computed.
UPLO (str): Specifies whether the calculation is done with the lower
triangular part of a ('L', default) or the upper triangular part ('U').
Returns:
tuple of :class:`~chainerx.ndarray`:
Returns a tuple ``(w, v)``. ``w`` contains eigenvalues and
``v`` contains eigenvectors. ``v[:, i]`` is an eigenvector
corresponding to an eigenvalue ``w[i]``.
Note:
Although ``UPLO`` can be specified to ignore either the strictly lower or
upper part of the input matrix, the backward computation assumes that the
inputs is symmetric and the computed gradient is always a symmetric matrix
with respect to ``UPLO``. More specifically, the gradient is computed as if
the function is restricted to a Riemannian submanifold of
:math:`R_{n \times n}` consisting just of symmetric matrices and is
faithful to the mathematical definition of the eigenvalue decomposition of
symmetric matrices.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.eigh`
""")
_docs.set_doc(
chainerx.linalg.eigvalsh,
"""eigvalsh(a, UPLO='L')
Compute the eigenvalues of a real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Args:
a (~chainerx.ndarray): Real symmetric matrix whose eigenvalues
and eigenvectors are to be computed.
UPLO (str): Specifies whether the calculation is done with the lower
triangular part of a (‘L’, default) or the upper triangular part (‘U’).
(optional).
Returns:
:class:`~chainerx.ndarray`: Returns eigenvalues as a vector.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* Backpropagation requires eigenvectors and, therefore, is not implemented
for this function. ``linalg.eigh`` should be used instead.
.. seealso:: :func:`numpy.linalg.eigvalsh`
""")
def _docs_logic():
_docs.set_doc(
chainerx.all,
"""all(x)
Test whether all array elements along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which AND reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.all`
""")
_docs.set_doc(
chainerx.any,
"""any(x)
Test whether any array element along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which OR reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.any`
""")
_docs.set_doc(
chainerx.logical_not,
"""logical_not(x)
Returns an array of NOT x element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_not`
""")
_docs.set_doc(
chainerx.logical_and,
"""logical_and(x1, x2)
Returns an array of x1 AND x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_and`
""")
_docs.set_doc(
chainerx.logical_or,
"""logical_or(x1, x2)
Returns an array of x1 OR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_or`
""")
_docs.set_doc(
chainerx.logical_xor,
"""logical_xor(x1, x2)
Returns an array of x1 XOR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_xor`
""")
_docs.set_doc(
chainerx.greater,
"""greater(x1, x2)
Returns an array of (x1 > x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater`
""")
_docs.set_doc(
chainerx.greater_equal,
"""greater_equal(x1, x2)
Returns an array of (x1 >= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater_equal`
""")
_docs.set_doc(
chainerx.less,
"""less(x1, x2)
Returns an array of (x1 < x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less`
""")
_docs.set_doc(
chainerx.less_equal,
"""less_equal(x1, x2)
Returns an array of (x1 <= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less_equal`
""")
_docs.set_doc(
chainerx.equal,
"""equal(x1, x2)
Returns an array of (x1 == x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.equal`
""")
_docs.set_doc(
chainerx.not_equal,
"""not_equal(x1, x2)
Returns an array of (x1 != x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.not_equal`
""")
def _docs_loss():
_docs.set_doc(
chainerx.absolute_error,
"""Element-wise absolute error function.
Computes the element-wise absolute error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = |x_1 - x_2|
Args:
x1 (~chainerx.ndarray): Input variable.
x2 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the absolute error of two inputs.
.. seealso:: :func:`chainer.functions.absolute_error`
""")
_docs.set_doc(
chainerx.squared_error,
"""Element-wise squared error function.
Computes the element-wise squared error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = (x_1 - x_2)^2
Can be used to compute mean squared error by just calling `mean()`
on the output array.
Args:
x0 (~chainerx.ndarray): Input variable.
x1 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the squared error of two inputs.
.. seealso:: :func:`chainer.functions.squared_error`
""")
_docs.set_doc(
chainerx.huber_loss,
"""Element-wise Huber loss.
The Huber loss is similar to the squared error but is less sensitive to
outliers in the data. It is defined as
.. math::
L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
\\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
\\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
\\end{array} \\right.
where :math:`a = x - t` is the difference between the input :math:`x`
and the target :math:`t`.
See: `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
Args:
x (~chainerx.ndarray): Input variable.
t (~chainerx.ndarray): Target variable for regression.
delta (float): Constant variable for Huber loss function as used in
definition.
Returns:
:class:`~chainerx.ndarray`:
A variable object holding an array representing the Huber loss
:math:`L_{\\delta}` of the two inputs.
.. seealso:: :func:`chainer.functions.huber_loss`
""")
_docs.set_doc(
chainerx.gaussian_kl_divergence,
"""Element-wise KL-divergence of Gaussian variables from the standard one.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function calculates
the element-wise KL-divergence between the given multi-dimensional
Gaussian :math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`
.. math::
D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),
where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
and :math:`I` is an identity matrix.
Args:
mean (~chainerx.ndarray):
A variable representing mean of given
gaussian distribution, :math:`\\mu`.
ln_var (~chainerx.ndarray):
A variable representing logarithm of
variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.
Returns:
:class:`~chainerx.ndarray`:
A variable representing KL-divergence between
given gaussian distribution and the standard gaussian.
.. seealso:: :func:`chainer.functions.gaussian_kl_divergence`
""")
_docs.set_doc(
chainerx.sigmoid_cross_entropy,
"""sigmoid_cross_entropy(x1, x2)
Element-wise cross entropy loss for pre-sigmoid activations.
Args:
x1 (~chainerx.ndarray): An array whose (i, j)-th element indicates the
unnormalized log probability of the j-th unit at the i-th example.
x2 (~chainerx.ndarray): An array whose (i, j)-th element indicates a signed
integer vector of ground truth labels 0 or 1. If ``x2[i, j] == -1``,
corresponding ``x1[i, j]`` is ignored. Loss is zero if all ground truth
labels are -1.
Returns:
:class:`~chainerx.ndarray`: An array of the cross entropy.
Note:
During backpropagation, this function propagates the gradient of the output
array to the input array ``x1`` only.
""")
_docs.set_doc(
chainerx.softmax_cross_entropy,
"""softmax_cross_entropy(x1, x2)
Element-wise cross entropy loss for pre-softmax activations.
Args:
x1 (~chainerx.ndarray): An array whose element indicates unnormalized log
probability: the first axis of the array represents the number of
samples, and the second axis represents the number of classes.
x2 (~chainerx.ndarray): A signed integer vector of ground truth labels. If
``x2[i] == -1``, corresponding ``x1[i]`` is ignored.
Returns:
:class:`~chainerx.ndarray`: An array of the cross entropy.
Note:
During backpropagation, this function propagates the gradient of the output
array to the input array ``x1`` only.
""")
def _docs_manipulation():
_docs.set_doc(
chainerx.reshape,
"""reshape(a, newshape)
Returns a reshaped array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
newshape (int or tuple of ints): The new shape of the array to return.
If it is an integer, then it is treated as a tuple of length one.
It should be compatible with ``a.size``. One of the elements can be
-1, which is automatically replaced with the appropriate value to
make the shape compatible with ``a.size``.
Returns:
:class:`~chainerx.ndarray`: A reshaped view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.reshape`
""")
_docs.set_doc(
chainerx.ravel,
"""ravel(a)
Returns a flattened array.
Args:
a (~chainerx.ndarray): Array to be flattened.
Returns:
:class:`~chainerx.ndarray`: A flattened view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.ravel`
""")
_docs.set_doc(
chainerx.transpose,
"""transpose(a, axes=None)
Permutes the dimensions of an array.
Args:
a (~chainerx.ndarray): Array to permute the dimensions.
axes (tuple of ints): Permutation of the dimensions. This function reverses
the shape by default.
Returns:
~chainerx.ndarray: A view of ``a`` with the dimensions permuted.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.transpose`
""")
_docs.set_doc(
chainerx.broadcast_to,
"""broadcast_to(array, shape)
Broadcasts an array to a given shape.
Args:
array (~chainerx.ndarray): Array to broadcast.
shape (tuple of ints): The shape of the desired array.
Returns:
~chainerx.ndarray: Broadcasted view.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``array``.
.. seealso:: :func:`numpy.broadcast_to`
""")
_docs.set_doc(
chainerx.squeeze,
"""squeeze(a, axis=None)
Removes size-one axes from the shape of an array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
axis (int or tuple of ints): Axes to be removed. This function removes all
size-one axes by default. If one of the specified axes is not of size
one, an exception is raised.
Returns:
~chainerx.ndarray: An array without (specified) size-one axes.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.squeeze`
""")
_docs.set_doc(
chainerx.concatenate,
"""concatenate(arrays, axis=0)
Joins arrays along an axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be joined.
All of these should have the same dimensionalities except the specified
axis.
axis (int): The axis to join arrays along.
Returns:
~chainerx.ndarray: Joined array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.concatenate`
""")
_docs.set_doc(
chainerx.stack,
"""stack(arrays, axis=0)
Stacks arrays along a new axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.stack`
""")
_docs.set_doc(
chainerx.hstack,
"""hstack(arrays)
Stack arrays in sequence horizontally (column wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.hstack`
""")
_docs.set_doc(
chainerx.vstack,
"""vstack(arrays)
Stack arrays in sequence vertically (row wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.vstack`
""")
_docs.set_doc(
chainerx.dstack,
"""dstack(arrays)
Stack arrays in sequence depth wise (along third axis).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.dstack`
""")
_docs.set_doc(
chainerx.atleast_2d,
"""atleast_2d(a)
View inputs as arrays with at least two dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 2.
Copies are avoided where possible, and views with
two or more dimensions are returned.
Note:
* Arrays that already have two or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_2d`
""")
_docs.set_doc(
chainerx.atleast_3d,
"""atleast_3d(a)
View inputs as arrays with at least three dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 3.
Copies are avoided where possible, and views with
three or more dimensions are returned.
Note:
* Arrays that already have three or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_3d`
""")
_docs.set_doc(
chainerx.split,
"""split(ary, indices_or_sections, axis=0)
Splits an array into multiple sub arrays along a given axis.
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
axis (int): Axis along which the array is split.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.split`
""")
_docs.set_doc(
chainerx.dsplit,
"""dsplit(ary, indices_or_sections)
Split array into multiple sub-arrays along the 3rd axis (depth).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.dsplit`
""")
_docs.set_doc(
chainerx.vsplit,
"""vsplit(ary, indices_or_sections)
Splits an array into multiple sub-arrays vertically (row-wise).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.vsplit`
""")
_docs.set_doc(
chainerx.hsplit,
"""hsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays horizontally (column-wise).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.hsplit`
""")
_docs.set_doc(
chainerx.swapaxes,
"""swapaxes(a, axis1, axis2)
Interchange two axes of an array.
Args:
a (~chainerx.ndarray): Array to swapaxes.
axis1 (int): First Axis
axis2 (int): Second Axis
Returns:
~chainerx.ndarray: Swaped array.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.swapaxes`
""")
_docs.set_doc(
chainerx.repeat,
"""repeat(a, repeats, axis=None)
Constructs an array by repeating a given array.
Args:
a (~chainerx.ndarray): Array to repeat.
repeats (int or tuple of ints): The number of times which each
element of a is repeated.
axis (int): The axis along which to repeat values.
Returns:
~chainerx.ndarray: The repeated output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.repeat`
""")
_docs.set_doc(
chainerx.expand_dims,
"""expand_dims(a, axis)
Expand the shape of an array.
Args:
a (~chainerx.ndarray): Input Array.
axis (int): Position in the expanded axes where the new axis is placed.
Returns:
~chainerx.ndarray: Output array.
Note:
* Output array may or may not be a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.expand_dims`
""")
_docs.set_doc(
chainerx.flip,
"""flip(m, axis)
Reverse the order of elements in an array along the given axis.
Args:
m (~chainerx.ndarray): Input Array.
axis (int or tuple of ints): Axis or axes along which to flip over.
The default, axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the
axes specified in the tuple.
Returns:
~chainerx.ndarray: A view of m with the entries of axis reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flip`
""")
_docs.set_doc(
chainerx.fliplr,
"""fliplr(m)
Flip array in the left/right direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the columns reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.fliplr`
""")
_docs.set_doc(
chainerx.flipud,
"""flipud(m)
Flip array in the up/down direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the rows reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flipud`
""")
_docs.set_doc(
chainerx.moveaxis,
"""moveaxis(a, source, destination)
Move axes of an array to new positions.
Other axes remain in their original order.
Args:
a (~chainerx.ndarray): Input Array.
source (int or tuple of ints): Original positions of the axes to move.
These must be unique.
destintation (int or tuple of ints): Destination positions for each of
the original axes. These must also be unique.
Returns:
~chainerx.ndarray: Array with moved axes. This array is a view of the
input array.
Note:
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.moveaxis`
""")
def _docs_math():
_docs.set_doc(
chainerx.negative,
"""negative(x)
Numerical negative, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = -x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.negative`
""")
_docs.set_doc(
chainerx.add,
"""add(x1, x2)
Add arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 + x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.add`
""")
_docs.set_doc(
chainerx.subtract,
"""subtract(x1, x2)
Subtract arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 - x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.subtract`
""")
_docs.set_doc(
chainerx.multiply,
"""multiply(x1, x2)
Multiply arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\times x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.multiply`
""")
_docs.set_doc(
chainerx.divide,
"""divide(x1, x2)
Divide arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\frac{x_1}{x_2}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.divide`
""")
_docs.set_doc(
chainerx.sum,
"""sum(a, axis=None, keepdims=False)
Sum of array elements over a given axis.
Args:
a (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The sum of input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.sum`
""")
_docs.set_doc(
chainerx.maximum,
"""maximum(x1, x2)
Maximum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = max(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.maximum`
""")
_docs.set_doc(
chainerx.minimum,
"""minimum(x1, x2)
Minimum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = min(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.minimum`
""")
_docs.set_doc(
chainerx.remainder,
"""remainder(x1, x2)
Return element-wise remainder of division.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: The element-wise remainder of
the quotient ``floor_divide(x1, x2)``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.remainder`
""")
_docs.set_doc(
chainerx.exp,
"""exp(x)
Numerical exponential, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\exp x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.exp`
""")
_docs.set_doc(
chainerx.log,
"""log(x)
Natural logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\ln x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log`
""")
_docs.set_doc(
chainerx.log10,
"""log10(x)
Base 10 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{10} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log10`
""")
_docs.set_doc(
chainerx.log2,
"""log2(x)
Base 2 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{2} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log2`
""")
_docs.set_doc(
chainerx.log1p,
"""log1p(x)
Natural logarithm of one plus the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log(1 + x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log1p`
""")
_docs.set_doc(
chainerx.logsumexp,
"""logsumexp(x, axis=None, keepdims=False)
The log of the sum of exponentials of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The log of the sum of exponentials of
input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.log_softmax,
"""log_softmax(x, axis=None)
The log of the softmax of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
Returns:
:class:`~chainerx.ndarray`: The log of the softmax of input elements
over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.square,
"""square(x)
Returns the element-wise square of the input.
Args:
x (~chainerx.ndarray or scalar): Input data
Returns:
~chainerx.ndarray: Returned array: :math:`y = x * x`.
A scalar is returned if ``x`` is a scalar.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``x``.
.. seealso:: :data:`numpy.square`
""")
_docs.set_doc(
chainerx.sqrt,
"""sqrt(x)
Non-negative square-root, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sqrt x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sqrt`
""")
_docs.set_doc(
chainerx.sinh,
"""sinh(x)
Hyperbolic Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sinh`
""")
_docs.set_doc(
chainerx.cosh,
"""cosh(x)
Hyperbolic Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cosh`
""")
_docs.set_doc(
chainerx.tanh,
"""tanh(x)
Element-wise hyperbolic tangent function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tanh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tanh`
""")
_docs.set_doc(
chainerx.sigmoid,
"""sigmoid(x)
Element-wise sigmoid logistic function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array:
:math:`f(x) = (1 + \\exp(-x))^{-1}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :func:`chainer.functions.sigmoid`
""")
_docs.set_doc(
chainerx.sin,
"""sin(x)
Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sin`
""")
_docs.set_doc(
chainerx.cos,
"""cos(x)
Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cos`
""")
_docs.set_doc(
chainerx.ceil,
"""ceil(x)
Return the ceiling of the input, element-wise..
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The ceiling of each element in array.
.. seealso:: :data:`numpy.ceil`
""")
_docs.set_doc(
chainerx.tan,
"""tan(x)
Tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tan`
""")
_docs.set_doc(
chainerx.relu,
"""Rectified Linear Unit function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\max (0, x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.tree_lstm,
"""tree_lstm(*inputs)
TreeLSTM unit as an activation function.
This function implements TreeLSTM units both for
N-ary TreeLSTM and Child-Sum TreeLSTM.
Let the children cell states
:math:`c_{\\text{1}}, c_{\\text{2}}, \\dots, c_{\\text{N}}`,
and the incoming signal :math:`x`.
First, the incoming signal :math:`x` is split into (3 + N) arrays
:math:`a, i, o, f_{\\text{1}}, f_{\\text{2}}, ..., f_{\\text{N}}`
of the same shapes along the second axis.
It means that :math:`x` 's second axis must have (3 + N) times
of the length of each :math:`c_{n}`.
The splitted input signals are corresponding to
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`o` : sources of output gate
- :math:`f_{n}` : sources of forget gate for n-th ary
Second, it computes outputs as
.. math::
c &= \\tanh(a) \\text{sigmoid}(i) \\\\
& + c_{\\text{1}} \\text{sigmoid}(f_{\\text{1}}), \\\\
& + c_{\\text{2}} \\text{sigmoid}(f_{\\text{2}}), \\\\
& + ..., \\\\
& + c_{\\text{N}} \\text{sigmoid}(f_{\\text{N}}), \\\\
h &= \\tanh(c) \\text{sigmoid}(o).
These are returned as a tuple of (N + 1) variables.
Args:
inputs (list of :class:`~chainerx.array`): Variable arguments which
include all cell vectors from child-nodes, and an input vector.
Each of the cell vectors and the input vector is
:class:`~chainerx.array`.
The input vector must have the second dimension whose size
is (N + 3) times of that of each cell,
where N denotes the total number of cells.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``. ``c`` is
the updated cell state. ``h`` indicates the outgoing signal.
See the papers for details: `Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks
<https://www.aclweb.org/anthology/P15-1150>`_ and
`A Fast Unified Model for Parsing and Sentence Understanding
<https://arxiv.org/pdf/1603.06021.pdf>`_.
Tai et al.'s N-Ary TreeLSTM is little extended in
Bowman et al., and this link is based on
the variant by Bowman et al.
Specifically, eq. 10 in Tai et al. only has one :math:`W` matrix
to be applied to :math:`x`, consistently for all children.
On the other hand, Bowman et al.'s model has multiple matrices,
each of which affects the forget gate for each child's cell individually.
.. admonition:: Example
Assuming ``y`` is the current input signal, ``c`` is the previous cell
state, and ``h`` is the previous output signal from an
:meth:`~chainerx.tree_lstm` function.
Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Using 2-ary (binary) TreeLSTM,
most typical preparation of ``x`` is
>>> c1 = chainerx.ones((4, 10), dtype = chainerx.float32)
>>> c2 = chainerx.ones((4, 10), dtype = chainerx.float32)
>>> x = chainerx.ones((4, 50), dtype = chainerx.float32)
>>> c, h = chainerx.tree_lstm(c1, c2, x)
""")
_docs.set_doc(
chainerx.slstm,
"""slstm(c_prev1, c_prev2, x1, x2)
S-LSTM units as an activation function.
This function implements S-LSTM unit. It is an extension of LSTM unit
applied to tree structures.
The function is applied to binary trees. Each node has two child nodes.
It gets four arguments, previous cell states ``c_prev1`` and ``c_prev2``,
and input arrays ``x1`` and ``x2``.
First both input arrays ``x1`` and ``x2`` are split into eight arrays
:math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`. They have the
same shape along the second axis.
It means that ``x1`` and ``x2`` 's second axis must have 4 times
the length of ``c_prev1`` and ``c_prev2``.
The split input arrays are corresponding to
- :math:`a_i` : sources of cell input
- :math:`i_i` : sources of input gate
- :math:`f_i` : sources of forget gate
- :math:`o_i` : sources of output gate
It computes the updated cell state ``c`` and the outgoing signal
``h`` as.
.. math::
c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)
+ c_{\\text{prev}1} \\sigma(f_1)
+ c_{\\text{prev}2} \\sigma(f_2), \\\\
h &= \\tanh(c) \\sigma(o_1 + o_2),
where :math:`\\sigma` is the elementwise sigmoid function.
The function returns ``c`` and ``h`` as a tuple.
Args:
c_prev1 (:class:`~chainerx.array`):
Variable that holds the previous cell state of the first child
node. The cell state should be a zero array or the output of
the previous call of LSTM.
c_prev2 (:class:`~chainerx.array`):
Variable that holds the previous cell state of the second child
node.
x1 (:class:`~chainerx.array`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate from the first child node. It must have the
second dimension whose size is four times of that of the cell
state.
x2 (:class:`~chainerx.array`):
Variable that holds the input sources from the second child node.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``. ``c`` is
the cell state. ``h`` indicates the outgoing signal.
See detail in paper: `Long Short-Term Memory Over Tree Structures
<https://arxiv.org/abs/1503.04881>`_.
.. admonition:: Example
Assuming ``c1``, ``c2`` is the previous cell state of children,
and ``h1``, ``h2`` is the previous outgoing signal from children.
Each of ``c1``, ``c2``, ``h1`` and ``h2`` has ``n_units`` channels.
Most typical preparation of ``x1``, ``x2`` is:
>>> n_units = 100
>>> c1 = chainerx.ones((1, n_units), np.float32)
>>> c2 = chainerx.ones((1, n_units), np.float32)
>>> x1 = chainerx.ones((1, 4 * n_units), chainerx.float32)
>>> x2 = chainerx.ones((1, 4 * n_units), chainerx.float32)
>>> c, h = chainerx.slstm(c1, c2, x1, x2)
""")
_docs.set_doc(
chainerx.arcsin,
"""arcsin(x)
Inverse sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsin`
""")
_docs.set_doc(
chainerx.arccos,
"""arccos(x)
Trigonometric inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccos`
""")
_docs.set_doc(
chainerx.arctan,
"""arctan(x)
Trigonometric inverse tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arctan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arctan`
""")
_docs.set_doc(
chainerx.arctan2,
"""arctan2(x1, x2)
Element-wise arc tangent of :math:`\\frac{x_1}{x_2}` choosing the quadrant
correctly.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returns an array where each element
represents :math:`\\theta` in the range :math:`[-\\pi, \\pi]`, such
that :math:`x_1 = r \\sin(\\theta)` and :math:`x_2 = r \\cos(\\theta)`
for some :math:`r > 0`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x1`` and/or ``x2``.
.. seealso:: :data:`numpy.arctan2`
""")
_docs.set_doc(
chainerx.arcsinh,
"""arcsinh(x)
Inverse hyperbolic sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsinh`
""")
_docs.set_doc(
chainerx.arccosh,
"""arccosh(x)
Inverse hypberbolic inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccosh`
""")
_docs.set_doc(
chainerx.fabs,
"""fabs(x)
Compute the absolute values element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The absolute values of x, the returned values
are always floats.
.. seealso:: :data:`numpy.fabs`
""")
_docs.set_doc(
chainerx.sign,
"""sign(x)
Returns an element-wise indication of the sign of a number.
The sign function returns :math:`-1 if x < 0, 0 if x==0, 1 if x > 0`.
``nan`` is returned for ``nan`` inputs.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The sign of x.
.. seealso:: :data:`numpy.sign`
""")
_docs.set_doc(
chainerx.floor,
"""floor(x)
Return the floor of the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The floor of each element in array.
.. seealso:: :data:`numpy.floor`
""")
_docs.set_doc(
chainerx.isnan,
"""isnan(x)
Test element-wise for NaN and return result as a boolean array.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is NaN, false otherwise
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isnan`
""")
_docs.set_doc(
chainerx.isfinite,
"""isfinite(x)
Test element-wise for finiteness (not infinity or not Not a Number).
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where x is not positive infinity,
negative infinity, or NaN; false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isfinite`
""")
_docs.set_doc(
chainerx.isinf,
"""isinf(x)
Test element-wise for positive or negative infinity.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is positive or negative
infinity, false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isinf`
""")
_docs.set_doc(
chainerx.bitwise_and,
"""bitwise_and(x1, x2)
Compute the bit-wise AND of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\& x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_and`
""")
_docs.set_doc(
chainerx.bitwise_or,
"""bitwise_or(x1, x2)
Compute the bit-wise OR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 | x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_or`
""")
_docs.set_doc(
chainerx.bitwise_xor,
"""bitwise_xor(x1, x2)
Compute the bit-wise XOR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\oplus x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_xor`
""")
_docs.set_doc(
chainerx.left_shift,
"""left_shift(x1, x2)
Shift the bits of an integer to the left.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the left.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.left_shift`
""") # NOQA
_docs.set_doc(
chainerx.right_shift,
"""right_shift(x1, x2)
Shift the bits of an integer to the right.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the right.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.right_shift`
""") # NOQA
def _docs_sorting():
_docs.set_doc(
chainerx.argmax,
"""argmax(a, axis=None)
Returns the indices of the maximum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the maximum of.
axis (None or int): Along which axis to compute the maximum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the maximum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmax`
""")
_docs.set_doc(
chainerx.argmin,
"""argmin(a, axis=None)
Returns the indices of the minimum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the minimum of.
axis (None or int): Along which axis to compute the minimum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the minimum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmin`
""")
def _docs_statistics():
_docs.set_doc(
chainerx.amax,
"""amax(a, axis=None, keepdims=False)
Returns the maximum of an array or the maximum along an axis.
Note:
When at least one element is NaN, the corresponding max value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the maximum.
axis (None or int or tuple of ints): Along which axis to take the maximum.
The flattened array is used by default.
If this is a tuple of ints, the maximum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The maximum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amax`
""")
_docs.set_doc(
chainerx.amin,
"""amin(a, axis=None, keepdims=False)
Returns the minimum of an array or the minimum along an axis.
Note:
When at least one element is NaN, the corresponding min value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the minimum.
axis (None or int or tuple of ints): Along which axis to take the minimum.
The flattened array is used by default.
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The minimum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amin`
""")
_docs.set_doc(
chainerx.mean,
"""mean(a, axis=None, keepdims=False)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the mean of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the mean. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The mean of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.mean`
""")
_docs.set_doc(
chainerx.var,
"""var(a, axis=None, keepdims=False)
Compute the arithmetic var along the specified axis.
Returns the var of the array elements. The var is taken over the flattened
array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the var of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the var. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The var of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.var`
""")
def _docs_connection():
_docs.set_doc(
chainerx.conv,
"""conv(x, w, b=None, stride=1, pad=0, cover_all=False)
N-dimensional convolution.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three arrays: the
input ``x``, the filter weight ``w`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``conv`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use ``cuda`` backend.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
In ``cuda`` backend, this function uses cuDNN implementation for its
forward and backward computation.
Note:
In ``cuda`` backend, this function has following limitations yet:
- The ``cover_all=True`` option is not supported yet.
- The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.convolution_nd`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> w = chainerx.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(1, 3, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
""")
_docs.set_doc(
chainerx.conv_transpose,
"""conv_transpose(x, w, b=None, stride=1, pad=0, outsize=None)
N-dimensional transposed convolution.
This is an implementation of N-dimensional transposed convolution, which is
previously known as **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three arrays: the input ``x``, the filter weight ``w``, and the
bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
- :math:`s_1, s_2, ..., s_N` are the stride of each axis of filter
application, respectively.
If ``outsize`` option is ``None``, the output size
:math:`(l_1, l_2, ..., l_N)` is determined by the following equations with
the items in the above list:
.. math::
l_n = s_n (d_n - 1) + k_n - 2 p_n \\ \\ (n = 1, ..., N)
If ``outsize`` option is given, the output size is determined by
``outsize``. In this case, the ``outsize`` :math:`(l_1, l_2, ..., l_N)`
must satisfy the following equations:
.. math::
d_n = \\lfloor (l_n + 2p_n - k_n) / s_n \\rfloor + 1 \\ \\ \
(n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_I, c_O, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation. It should be a
tuple of ints :math:`(l_1, l_2, ..., l_N)`. Default value is
``None`` and the outsize is estimated by input size, stride and
pad.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.deconvolution_nd`
.. admonition:: Example
**Example1**: the case when ``outsize`` is not given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 8, 36, 84)
>>> l1 = s1 * (d1 - 1) + k1 - 2 * p1
>>> l2 = s2 * (d2 - 1) + k2 - 2 * p2
>>> l3 = s3 * (d3 - 1) + k3 - 2 * p3
>>> y.shape == (n, c_o, l1, l2, l3)
True
**Example2**: the case when ``outsize`` is given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.array(np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32))
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.array(np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32))
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.array(np.random.uniform(0, 1, (c_o)).astype(np.float32))
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> l1, l2, l3 = 9, 38, 87
>>> d1 == int((l1 + 2 * p1 - k1) / s1) + 1
True
>>> d2 == int((l2 + 2 * p2 - k2) / s2) + 1
True
>>> d3 == int((l3 + 2 * p3 - k3) / s3) + 1
True
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3), outsize=(l1, l2, l3))
>>> y.shape
(10, 1, 9, 38, 87)
>>> y.shape == (n, c_o, l1, l2, l3)
True
""")
_docs.set_doc(
chainerx.linear,
"""linear(x, W, b=None, n_batch_axis=1)
Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
.. math:: Y = xW^\\top + b.
Args:
x (~chainerx.ndarray):
Input array, which is a :math:`(s_1, s_2, ..., s_n)`-shaped array.
W (~chainerx.ndarray):
Weight variable of shape :math:`(M, N)`,
where :math:`(N = s_{\\rm n\\_batch\\_axes} * ... * s_n)`.
b (~chainerx.ndarray):
Bias variable (optional) of shape :math:`(M,)`.
n_batch_axes (int):
The number of batch axes. The default is 1. The input variable is
reshaped into (:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional
tensor. This should be greater than 0.
Returns:
:class:`~chainerx.ndarray`:
Output array with shape of
:math:`(s_1, ..., s_{\\rm n\\_batch\\_axes}, M)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``W`` and ``b``.
""")
_docs.set_doc(
chainerx.lstm,
"""lstm(c_prev, x)
Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``. So,
please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainerx.array`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.array`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks
<http://www.felixgers.de/papers/phd.pdf>`_.
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is
>>> n_units = 100
>>> c_prev = chainerx.zeros((1, n_units), chainerx.float32)
>>> x = chainerx.zeros((1, 4 * n_units), chainerx.float32)
>>> c, h = chainerx.lstm(c_prev, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
""")
def _docs_normalization():
_docs.set_doc(
chainerx.batch_norm,
"""batch_norm(x, gamma, beta, running_mean, running_var, eps=2e-5, \
decay=0.9, axis=None)
Batch normalization function.
It takes the input array ``x`` and two parameter arrays ``gamma`` and
``beta``. The parameter arrays must both have the same size.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
running_mean (~chainerx.ndarray):
Running average of the mean. This is a running average of
the mean over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
running_var (~chainerx.ndarray):
Running average of the variance. This is a running average of
the variance over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
eps (float): Epsilon value for numerical stability.
decay (float): Decay rate of moving average. It is used during training.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x``, ``gamma`` and ``beta``.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
""")
_docs.set_doc(
chainerx.fixed_batch_norm,
"""fixed_batch_norm(x, gamma, beta, mean, var, eps=2e-5, axis=None)
Batch normalization function with fixed statistics.
This is a variant of :func:`~chainerx.batch_norm`, where the mean
and array statistics are given by the caller as fixed variables.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
mean (~chainerx.ndarray): Shifting parameter of input.
var (~chainerx.ndarray): Square of scaling parameter of input.
eps (float): Epsilon value for numerical stability.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function does not propagate gradients.
""")
def _docs_pooling():
_docs.set_doc(
chainerx.max_pool,
"""max_pool(x, ksize, stride=None, pad=0, cover_all=False)
Spatial max pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the maximum
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``. This function is only
differentiable up to the second order.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
_docs.set_doc(
chainerx.average_pool,
"""average_pool(x, ksize, stride=None, pad=0, pad_mode='ignore')
Spatial average pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the average
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_mode ({'zero', 'ignore'}): Specifies how padded region is treated.
* 'zero' -- the values in the padded region are treated as 0
* 'ignore' -- padded region is ignored (default)
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
def _docs_rnn():
_docs.set_doc(
chainerx.n_step_lstm,
"""n_step_lstm(n_layers, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weight matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (:class:`~chainerx.array`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(N, I)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``.
When sequences has different lengths, they must be
sorted in descending order of their lengths.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainerx.lstm`.
.. seealso::
:func:`chainerx.lstm`
.. admonition:: Example
>>> import chainerx as chx
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> xs = [chx.ones((b, in_size)).astype(chx.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = chx.ones(h_shape).astype(chx.float32)
>>> cx = chx.ones(h_shape).astype(chx.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([chx.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([chx.ones((out_size,)).astype(chx.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = chx.n_step_lstm(
... n_layers, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
""")
_docs.set_doc(
chainerx.n_step_bilstm,
"""n_step_bilstm(n_layers, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weight matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (:class:`~chainerx.array`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(N, I)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``.
When sequences has different lengths, they must be
sorted in descending order of their lengths.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> import chainerx as chx
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [chx.ones((b, in_size)).astype(chx.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = chx.ones(h_shape).astype(chx.float32)
>>> cx = chx.ones(h_shape).astype(chx.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([chx.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([chx.ones((out_size,)).astype(chx.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = chx.n_step_bilstm(
... n_layers, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
""")
_docs.set_doc(
chainerx.n_step_gru,
"""n_step_gru(n_layers, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (~chainerx.array):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``
""")
_docs.set_doc(
chainerx.n_step_bigru,
"""n_step_bigru(n_layers, hx, ws, bs, xs)
Stacked Bi-directional Gated Recurrent Unit function.
This function calculates stacked Bi-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r^{f}_t &= \\sigma(W^{f}_0 x_t + W^{f}_3 h_{t-1} + b^{f}_0 + b^{f}_3)
\\\\
z^{f}_t &= \\sigma(W^{f}_1 x_t + W^{f}_4 h_{t-1} + b^{f}_1 + b^{f}_4)
\\\\
h^{f'}_t &= \\tanh(W^{f}_2 x_t + b^{f}_2 + r^{f}_t \\cdot (W^{f}_5
h_{t-1} + b^{f}_5)) \\\\
h^{f}_t &= (1 - z^{f}_t) \\cdot h^{f'}_t + z^{f}_t \\cdot h_{t-1}
\\\\
r^{b}_t &= \\sigma(W^{b}_0 x_t + W^{b}_3 h_{t-1} + b^{b}_0 + b^{b}_3)
\\\\
z^{b}_t &= \\sigma(W^{b}_1 x_t + W^{b}_4 h_{t-1} + b^{b}_1 + b^{b}_4)
\\\\
h^{b'}_t &= \\tanh(W^{b}_2 x_t + b^{b}_2 + r^{b}_t \\cdot (W^{b}_5
h_{t-1} + b^{b}_5)) \\\\
h^{b}_t &= (1 - z^{b}_t) \\cdot h^{b'}_t + z^{b}_t \\cdot h_{t-1}
\\\\
h_t &= [h^{f}_t; h^{b}_t] \\\\
where :math:`W^{f}` is weight matrices for forward-GRU, :math:`W^{b}` is
weight matrices for backward-GRU.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
_docs.set_doc(
chainerx.n_step_rnn,
"""n_step_rnn(n_layers, hx, ws, bs, xs, activation='tanh')
Stacked Uni-directional RNN function for sequence inputs.
This function calculates stacked Uni-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`,
an initial cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h_t = f(W_0 x_t + W_1 h_{t-1} + b_0 + b_1)
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W_0` and
:math:`W_1`. :math:`W_0` is a parameter for an input sequence.
:math:`W_1` is a parameter for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b_0` and :math:`b_1`.
:math:`b_0` is a parameter for an input sequence.
:math:`b_1` is a parameter for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value for time ``t``.
Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
_docs.set_doc(
chainerx.n_step_birnn,
"""n_step_birnn(n_layers, hx, ws, bs, xs, activation='tanh')
Stacked Bi-directional RNN function for sequence inputs.
This function calculates stacked Bi-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`, an initial
cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h^{f}_t &=& f(W^{f}_0 x_t + W^{f}_1 h_{t-1} + b^{f}_0 + b^{f}_1), \\\\
h^{b}_t &=& f(W^{b}_0 x_t + W^{b}_1 h_{t-1} + b^{b}_0 + b^{b}_1), \\\\
h_t &=& [h^{f}_t; h^{f}_t], \\\\
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W^{f}` and
:math:`W^{b}`. :math:`W^{f}` is weight matrices for forward directional
RNN. :math:`W^{b}` is weight matrices for backward directional RNN.
:math:`W^{f}` contains :math:`W^{f}_0` for an input sequence and
:math:`W^{f}_1` for a hidden state.
:math:`W^{b}` contains :math:`W^{b}_0` for an input sequence and
:math:`W^{b}_1` for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b^{f}` and
:math:`b^{f}`. :math:`b^{f}` contains :math:`b^{f}_0` for an input sequence
and :math:`b^{f}_1` for a hidden state.
:math:`b^{b}` contains :math:`b^{b}_0` for an input sequence and
:math:`b^{b}_1` for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units. Because of bi-direction, the
first dimension length is ``2S``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i + di]`` represents weights for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``ws[i + di]`` is a list containing two matrices.
``ws[i + di][j]`` is corresponding with ``W^{f}_j`` if ``di = 0``
and corresponding with ``W^{b}_j`` if ``di = 1`` in the equation.
Only ``ws[0][j]`` and ``ws[1][j]`` where ``0 <= j < 1`` are
``(I, N)`` shape as they are multiplied with input variables.
All other matrices has ``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i + di]`` represnents biases for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``bs[i + di]`` is a list containing two vectors.
``bs[i + di][j]`` is corresponding with ``b^{f}_j`` if ``di = 0``
and corresponding with ``b^{b}_j`` if ``di = 1`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
| mit | 5,753,944,707,664,617,000 | 31.275539 | 86 | 0.637194 | false |
jeremiedecock/snippets | python/pygtk/python_gtk3_pygobject/tree_view_cellrender_text_ellipsize.py | 1 | 2818 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
This is a simple Python GTK+3 TreeView CellRenderText snippet.
See: http://python-gtk-3-tutorial.readthedocs.org/en/latest/cellrenderers.html#cellrenderertext
"""
from gi.repository import Gtk as gtk
from gi.repository import Pango as pango
# Countries, population (as in 2015) and continent.
DATA_LIST = [("China", 1370130000, "Asia"),
("India", 1271980000, "Asia"),
("United States", 321107000, "North America"),
("Indonesia", 255461700, "Asia"),
("Brazil", 204388000, "South America"),
("Pakistan", 189936000, "Asia"),
("Nigeria", 183523000, "Africa"),
("Bangladesh", 158425000, "Asia"),
("Russia", 146267288, "Eurasia"),
("Japan", 126880000, "Asia")]
def main():
window = gtk.Window()
window.set_default_size(300, 450)
window.set_border_width(18)
# Creating the ListStore model
liststore = gtk.ListStore(str, int, str)
for item in DATA_LIST:
liststore.append(list(item))
# Creating the treeview and add the columns
treeview = gtk.TreeView(liststore)
for column_index, column_title in enumerate(["Country", "Population", "Continent"]):
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(column_title, renderer, text=column_index)
column.set_resizable(True) # Let the column be resizable
# Use ellipsize for the "Population" and "Continent" columns
if column_title in ("Population", "Continent"):
renderer.set_property("ellipsize", pango.EllipsizeMode.END)
renderer.set_property("ellipsize-set", True)
if column_title == "Population":
column.set_expand(True) # This column will use all the space left
treeview.append_column(column)
# Scrolled window
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_border_width(0)
scrolled_window.set_shadow_type(gtk.ShadowType.IN) # should be gtk.ShadowType.IN, gtk.ShadowType.OUT, gtk.ShadowType.ETCHED_IN or gtk.ShadowType.ETCHED_OUT
scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS) # should be gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS or gtk.PolicyType.NEVER
scrolled_window.add(treeview)
window.add(scrolled_window)
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
| mit | -8,133,782,412,095,077,000 | 39.228571 | 188 | 0.620384 | false |
ikosenn/sms-log-handler | sms_log_handler/sms_handler.py | 1 | 2049 | import datetime
import logging
from typing import Dict
from .utils import import_from_string
class SMSHandler(logging.Handler):
def __init__(self, provider_config: Dict) -> None:
"""
Initializes the SMSHandler
params:
provider_config: The provider configurations.
{
provider_key: <key_id>
provider_secret: <secret_key>
provider_send_to: [<an array of phone numbers>]
}
"""
super().__init__(self)
self.provider_class_str = provider_config.get(
'provider_class',
'sms_log_handler.providers.africastalking.AfricasTalkingProvider')
self.provider_class = import_from_string(self.provider_class_str)
self.key = provider_config.get('provider_key', '')
self.secret = provider_config.get('provider_secret', '')
self.phone_numbers = provider_config.get('provider_send_to', [])
def emit(self, record) -> None:
"""
Sends the message
"""
to_send = self._construct_message(record)
sms_provider = self.provider_class(self.key, self.secret)
sms_provider.send(self.phone_numbers, to_send)
def _construct_message(self, record) -> str:
"""
Contruct and format the mesage to be sent.
i.e
MODULE: sms_log_handler.sms_handler
LEVEL: ERROR
TIME: 21, May 2017 10:54
MESSAGE: Duplicate records found in the user model
"""
msg = (
'MODULE: {module_path}\n\nLEVEL: {level}\n\nTIME: {time}\n\n'
'MESSAGE: {msg}')
date_time = datetime.datetime.fromtimestamp(record.created)
date_time = date_time.strftime('%d, %b %Y %H:%M')
formatted_msg = msg.format(
level=record.levelname, time=date_time, msg=record.getMessage(),
module_path=record.name, line_no=record.lineno)
return formatted_msg
| mit | 6,605,223,552,672,128,000 | 33.728814 | 79 | 0.564178 | false |
mistoll/ros_buildfarm | ros_buildfarm/jenkins.py | 1 | 11344 | # Copyright 2014-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ast import literal_eval
import copy
import difflib
import sys
from xml.etree import ElementTree
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.utils.requester import Requester
from jenkinsapi.views import Views
from .jenkins_credentials import get_credentials
from .templates import expand_template
JENKINS_MANAGEMENT_VIEW = 'Manage'
class CrumbRequester(Requester):
"""Adapter for Requester inserting the crumb in every request."""
def __init__(self, *args, **kwargs):
super(CrumbRequester, self).__init__(*args, **kwargs)
self._baseurl = kwargs['baseurl']
self._last_crumb_data = None
def post_url(self, *args, **kwargs):
if self._last_crumb_data:
# first try request with previous crumb if available
response = self._post_url_with_crumb(
self._last_crumb_data, *args, **kwargs)
# code 403 might indicate that the crumb is not valid anymore
if response.status_code != 403:
return response
# fetch new crumb (if server has crumbs enabled)
if self._last_crumb_data is not False:
self._last_crumb_data = self._get_crumb_data()
return self._post_url_with_crumb(
self._last_crumb_data, *args, **kwargs)
def _get_crumb_data(self):
response = self.get_url(self._baseurl + '/crumbIssuer/api/python')
if response.status_code in [404]:
print('The Jenkins master does not require a crumb')
return False
if response.status_code not in [200]:
raise RuntimeError('Failed to fetch crumb: %s' % response.text)
crumb_issuer_response = literal_eval(response.text)
crumb_request_field = crumb_issuer_response['crumbRequestField']
crumb = crumb_issuer_response['crumb']
print('Fetched crumb: %s' % crumb)
return {crumb_request_field: crumb}
def _post_url_with_crumb(self, crumb_data, *args, **kwargs):
if crumb_data:
if len(args) >= 5:
headers = args[4]
else:
headers = kwargs.setdefault('headers', {})
headers.update(crumb_data)
return super(CrumbRequester, self).post_url(*args, **kwargs)
class JenkinsProxy(Jenkins):
"""Proxy for Jenkins instance caching data for performance reasons."""
def __init__(self, *args, **kwargs):
requester_kwargs = copy.copy(kwargs)
requester_kwargs['baseurl'] = args[0]
kwargs['requester'] = CrumbRequester(**requester_kwargs)
super(JenkinsProxy, self).__init__(*args, **kwargs)
self.__jobs = None
@property
def jobs(self):
if self.__jobs is None:
self.__jobs = super(JenkinsProxy, self).jobs
return self.__jobs
def connect(jenkins_url):
print("Connecting to Jenkins '%s'" % jenkins_url)
username, password = get_credentials(jenkins_url)
jenkins = JenkinsProxy(jenkins_url, username=username, password=password)
print("Connected to Jenkins version '%s'" % jenkins.version)
return jenkins
def configure_management_view(jenkins, dry_run=False):
return configure_view(
jenkins, JENKINS_MANAGEMENT_VIEW, include_regex='^((?!__).)*$',
dry_run=dry_run)
def configure_view(
jenkins, view_name, include_regex=None, filter_queue=True,
template_name='generic_view.xml.em', dry_run=False):
view_config = get_view_config(
template_name, view_name, include_regex=include_regex,
filter_queue=filter_queue)
if not jenkins:
return view_config
view_type = _get_view_type(view_config)
create_view = view_name not in jenkins.views
dry_run_suffix = ' (dry run)' if dry_run else ''
if create_view:
print(
"Creating view '%s' of type '%s'%s" %
(view_name, view_type, dry_run_suffix))
view = jenkins.views.create(view_name, view_type=view_type) \
if not dry_run else None
remote_view_config = view.get_config() \
if view else None
else:
print("Ensure that view '%s' exists" % view_name)
view = jenkins.views[view_name]
remote_view_config = view.get_config()
remote_view_type = _get_view_type(remote_view_config)
if remote_view_type != view_type:
del jenkins.views[view_name]
print(
"Recreating view '%s' of type '%s'%s" %
(view_name, view_type, dry_run_suffix))
view = jenkins.views.create(view_name, view_type=view_type) \
if not dry_run else None
remote_view_config = view.get_config() \
if view else None
if not remote_view_config:
print('Can not produce diff during dry run if the view is new or of different type',
file=sys.stderr)
return None
diff = _diff_configs(remote_view_config, view_config)
# evaluate generator since it might yield no values
diff = list(diff)
if not diff:
print("Skipped '%s' because the config is the same" % view_name)
else:
print("Updating view '%s'%s" % (view_name, dry_run_suffix))
if not create_view:
print(' ', '<<<')
for line in diff:
print(' ', line.rstrip('\n'))
print(' ', '>>>')
try:
response_text = view.update_config(view_config) \
if not dry_run else None
except Exception:
print("Failed to configure view '%s' with config:\n%s" %
(view_name, view_config), file=sys.stderr)
raise
if response_text:
raise RuntimeError(
"Failed to configure view '%s':\n%s" %
(view_name, response_text))
return view
def get_view_config(
template_name, view_name, include_regex=None, filter_queue=True,
data=None):
view_data = copy.deepcopy(data) if data is not None else {}
view_data.update({
'view_name': view_name,
'include_regex': include_regex,
'filter_queue': filter_queue,
})
view_config = expand_template(template_name, view_data)
return view_config
def _get_view_type(view_config):
root = ElementTree.fromstring(view_config)
root.tag
if root.tag == 'hudson.model.ListView':
return Views.LIST_VIEW
if root.tag == 'hudson.plugins.view.dashboard.Dashboard':
return Views.DASHBOARD_VIEW
assert False, 'Unknown list type: ' + root.tag
def configure_job(jenkins, job_name, job_config, view=None, dry_run=False):
dry_run_suffix = ' (dry run)' if dry_run else ''
response_text = None
try:
if not jenkins.has_job(job_name):
print("Creating job '%s'%s" % (job_name, dry_run_suffix))
job = jenkins.create_job(job_name, job_config) \
if not dry_run else None
else:
job = jenkins.get_job(job_name)
remote_job_config = job.get_config()
diff = _diff_configs(remote_job_config, job_config)
# evaluate generator since it might yield no values
diff = list(diff)
if not diff:
print("Skipped '%s' because the config is the same" % job_name)
else:
print("Updating job '%s'%s" % (job_name, dry_run_suffix))
print(' ', '<<<')
for line in diff:
print(' ', line.rstrip('\n'))
print(' ', '>>>')
response_text = job.update_config(job_config) \
if not dry_run else None
if response_text:
print('Failed to update job config:\n%s' % response_text)
raise RuntimeError()
except Exception:
print("Failed to configure job '%s' with config:\n%s" %
(job_name, job_config), file=sys.stderr)
raise
if response_text:
raise RuntimeError(
"Failed to configure job '%s':\n%s" % (job_name, response_text))
if view is not None:
if job_name not in view:
print(
"Adding job '%s' to view '%s'%s" %
(job_name, view.name, dry_run_suffix))
job = view.add_job(job_name, job) \
if not dry_run else job
else:
print("Job '%s' is already in view '%s'" % (job_name, view.name))
return job
def invoke_job(jenkins, job_name, cause=None):
try:
if not jenkins.has_job(job_name):
print("Failed to invoke job '%s' because it does not exist" %
job_name, file=sys.stderr)
return False
job = jenkins.get_job(job_name)
if not job.is_enabled():
print("Failed to invoke job '%s' because it is disabled" %
job_name, file=sys.stderr)
return False
if job.is_queued():
print("Skipped to invoke job '%s' because it is queued" %
job_name, file=sys.stderr)
return False
if job.is_running():
print("Skipped to invoke job '%s' because it is running" %
job_name, file=sys.stderr)
return False
print("Invoking job '%s'" % job_name)
job.invoke(cause=cause)
except Exception:
print("Failed to invoke job '%s'" % job_name, file=sys.stderr)
raise
return True
def _diff_configs(remote_config, new_config):
remote_root = ElementTree.fromstring(remote_config)
new_root = ElementTree.fromstring(new_config)
# ignore description which contains timestamp
if remote_root.find('description') is not None:
remote_root.find('description').text = ''
if new_root.find('description') is not None:
new_root.find('description').text = ''
if ElementTree.tostring(remote_root) == ElementTree.tostring(new_root):
return []
xml1 = ElementTree.tostring(remote_root, encoding='unicode')
xml2 = ElementTree.tostring(new_root, encoding='unicode')
lines1 = xml1.splitlines()
lines2 = xml2.splitlines()
return difflib.unified_diff(
lines1, lines2, 'remote config', 'new config', n=0)
def remove_jobs(jenkins, job_prefix, excluded_job_names, dry_run=False):
dry_run_suffix = ' (dry run)' if dry_run else ''
for job_name in jenkins.jobs.keys():
if not job_name.startswith(job_prefix):
continue
if job_name in excluded_job_names:
continue
print("Deleting job '%s'%s" % (job_name, dry_run_suffix))
if dry_run:
continue
jenkins.delete_job(job_name)
| apache-2.0 | -2,348,657,980,131,072,000 | 36.071895 | 92 | 0.596703 | false |
MatanRubin/my.vim | virgene/config_mgr.py | 1 | 4165 | import json
import os
from os import path
from typing import List
import jinja2
from virgene.common_defs import SRC_DIR, TEMPLATES_DIR, FEATURES_DIR
from virgene.config import Config
from virgene.default_encoder import DefaultEncoder
from virgene.feature_decoder import FeatureDecoder
class ConfigMgr:
def __init__(self):
self.jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(path.join(SRC_DIR, 'templates')),
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True,
line_statement_prefix='%',
line_comment_prefix='##'
)
self.config = None
@staticmethod
def load_config_path(config_path) -> Config:
return Config.from_json(ConfigMgr.read_json_path(config_path))
@staticmethod
def read_json_path(json_path) -> json:
with open(json_path) as json_file:
return json.load(json_file)
# XXX this is convenient but not very good
def get_template(self, template_string_or_path) -> jinja2.Template:
if path.exists(path.join(TEMPLATES_DIR, template_string_or_path)):
template = self.jinja_env.get_template(template_string_or_path)
else:
template = jinja2.Template(template_string_or_path)
return template
def generate(self, config: Config) -> str:
"""
Receives a vimrc configuration object and return a string containing the
corresponding vimrc file content
:param config: Config
"""
snippets = []
plugins = []
plugin_configs = []
builtins = []
for feature in config.features:
if not feature.is_enabled():
continue
rendered_feature = feature.render(self.jinja_env)
if feature.feature_type == "Snippet":
snippets.append(rendered_feature)
if feature.feature_type == "Plugin":
plugins.append(feature)
plugin_configs.append(rendered_feature)
if feature.feature_type == "Builtin":
builtins.append(rendered_feature)
vimrc_template = self.jinja_env.get_template("vimrc_template.j2")
return vimrc_template.render(snippets=snippets, plugins=plugins,
plugin_configurations=plugin_configs,
builtins=builtins)
@staticmethod
def build_default_config() -> Config:
installed_features = ConfigMgr.read_installed_features()
config = Config()
for feature in installed_features:
config.add_feature(feature)
return config
@staticmethod
def write_config(config: Config, output_path: [str, None]):
"""
Writes a Config object to file, or to stdout if output_path is None
:param config: vimrc configuration object
:param output_path: path to output file
:return:
"""
if output_path is None:
return json.dumps(config, cls=DefaultEncoder, indent=4)
else:
with open(output_path, 'w') as output_file:
output_file.write(
json.dumps(config, cls=DefaultEncoder, indent=4))
@staticmethod
def write_default_config(output_path=None):
config = ConfigMgr.build_default_config()
ConfigMgr.write_config(config, output_path)
@staticmethod
def read_installed_features():
feature_paths = [path.join(FEATURES_DIR, x)
for x in os.listdir(FEATURES_DIR)]
features = [FeatureDecoder.decode_from_path(x) for x in feature_paths]
return [x for x in features if x.installed]
def render_plugin_configs(self, plugin_jsons) -> List[str]:
"""
takes a list of plugin jsons and produces a list of generated templates,
one per plugin json
"""
templates = [self.jinja_env.get_template(x.template_path)
for x in plugin_jsons]
return [template.render(plugin=plugin_json)
for template, plugin_json in zip(templates, plugin_jsons)]
| gpl-3.0 | -4,063,867,231,558,727,700 | 35.535088 | 80 | 0.612485 | false |
QLRace/minqlx-plugins | spec_delay.py | 1 | 2290 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
"""
Stops people spectating then quickly joining the 'free' team.
This is to stop people firing a rocket, then spectating and joining then
using the knockback from the rocket which would count as a strafe time.
"""
import minqlx
class spec_delay(minqlx.Plugin):
def __init__(self):
super().__init__()
self.add_hook("player_disconnect", self.handle_player_disconnect)
self.add_hook("team_switch_attempt", self.handle_team_switch_attempt)
self.add_hook("team_switch", self.handle_team_switch)
self.spec_delays = set()
def handle_player_disconnect(self, player, reason):
"""Sets spec delay when a player disconnects."""
self.spec_delays.add(player.steam_id)
self.allow_join(player)
def handle_team_switch_attempt(self, player, old_team, new_team):
"""Stops the player joining if spec delay is true."""
if new_team != "spectator" and old_team == "spectator" and player.steam_id in self.spec_delays:
player.tell("^6You must wait 15 seconds before joining after spectating")
return minqlx.RET_STOP_EVENT
def handle_team_switch(self, player, old_team, new_team):
"""Sets a delay on joining when the player joins spectator"""
if new_team == "spectator" and old_team == "free":
# Set spec delay
self.spec_delays.add(player.steam_id)
self.allow_join(player)
# This is only needed to stop \team s; team f
elif new_team == "free" and old_team == "spectator" and player.steam_id in self.spec_delays:
player.tell("^6You must wait 15 seconds before joining after spectating")
return minqlx.RET_STOP_EVENT
@minqlx.delay(15.1)
def allow_join(self, player):
"""Allows the player to join after 15.1 seconds."""
try:
self.spec_delays.remove(player.steam_id)
player.center_print("^6You can join now")
except KeyError:
return
except AttributeError:
return
| gpl-3.0 | 6,870,827,062,680,719,000 | 41.407407 | 103 | 0.653275 | false |
vnsofthe/odoo-dev | addons/rhwl_gene/rhwl_gene_check.py | 1 | 1376 | # -*- coding: utf-8 -*-
snp_check={
"rs1042713":["A","G"],
"rs1050152":["C","T"],
"rs1051266":["C","T"],
"rs1136410":["A","G"],
"rs1229984":["C","T"],
"rs1234315":["C","T"],
"rs12720461":["C","T"],
"rs13306517":["A","G"],
"rs1544410":["C","T"],
"rs16944":["A","G"],
"rs1695":["A","G"],
"rs1799724":["C","T"],
"rs1799782":["A","G"],
"rs1799793":["C","T"],
"rs1799895":["C","G"],
"rs1799983":["G","T"],
"rs1799998":["A","G"],
"rs1800566":["A","G"],
"rs1800629":["A","G"],
"rs1800795":["C","G"],
"rs1801131":["G","T"],
"rs1801133":["A","G"],
"rs1884444":["G","T"],
"rs2031920":["C","T"],
"rs2066702":["A","G"],
"rs2069514":["A","G"],
"rs2073618":["C","G"],
"rs2107538":["C","T"],
"rs2221903":["C","T"],
"rs2228570":["A","G"],
"rs2230806":["C","T"],
"rs2241766":["G","T"],
"rs2274223":["A","G"],
"rs231775":["A","G"],
"rs25487":["C","T"],
"rs285":["C","T"],
"rs31480":["C","T"],
"rs320":["G","T"],
"rs34637584":["A","G"],
"rs34778348":["A","G"],
"rs429358":["C","T"],
"rs4646903":["A","G"],
"rs4646994":"ID",
"rs4673":["A","G"],
"rs4880":["A","G"],
"rs5051":["C","T"],
"rs5186":["A","C"],
"rs5443":["C","T"],
"rs660339":["A","G"],
"rs662":["C","T"],
"rs6682925":["C","T"],
"rs671":["A","G"],
"rs693":["A","G"],
"rs708272":["A","G"],
"rs731236":["A","G"],
"rs7412":["C","T"],
"rs769214":["A","G"],
"rs9344":["A","G"],
"rs9642880":["G","T"],
"GSTM1":["P","D"],
"GSTT1":["P","D"],
}
| agpl-3.0 | -3,808,686,540,482,914,000 | 20.5 | 23 | 0.457122 | false |
cmancone/mygrations | tests/formats/mysql/definitions/test_database.py | 1 | 3304 | import unittest
from mygrations.formats.mysql.file_reader.database import database as database_reader
from mygrations.formats.mysql.file_reader.create_parser import create_parser
class test_database(unittest.TestCase):
def _get_sample_db(self):
strings = [
"""
CREATE TABLE `logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`message` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""", """
CREATE TABLE `more_logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`more_messages` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
]
return database_reader(strings)
def test_simple(self):
db1 = self._get_sample_db()
strings = [
"""
CREATE TABLE `logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`message` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""", """
CREATE TABLE `less_logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`more_messages` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
]
db2 = database_reader(strings)
#differences = db2 - db1
#self.assertEquals( [], differences )
def test_add_table(self):
db = self._get_sample_db()
new_table = create_parser()
new_table.parse(
"""CREATE TABLE `log_changes` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`log_id` INT(10) UNSIGNED NOT NULL,
`type_id` INT(10) UNSIGNED NOT NULL,
`change` VARCHAR(255),
PRIMARY KEY (id),
KEY `log_changes_log_id` (`log_id`),
KEY `log_changes_type_id` (`type_id`)
);
"""
)
db.add_table(new_table)
self.assertEquals(3, len(db.tables))
self.assertTrue('log_changes' in db.tables)
self.assertEquals(new_table, db.tables['log_changes'])
def test_remove_table(self):
db1 = self._get_sample_db()
db1.remove_table(db1.tables['more_logs'])
self.assertEquals(1, len(db1.tables))
self.assertTrue('logs' in db1.tables)
self.assertFalse('more_logs' in db1.tables)
def test_exception_on_remove_invalid_table(self):
db1 = self._get_sample_db()
new_table = create_parser()
new_table.parse(
"""CREATE TABLE `log_changes` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`log_id` INT(10) UNSIGNED NOT NULL,
`type_id` INT(10) UNSIGNED NOT NULL,
`change` VARCHAR(255),
PRIMARY KEY (id),
KEY `log_changes_log_id` (`log_id`),
KEY `log_changes_type_id` (`type_id`)
);
"""
)
with self.assertRaises(ValueError):
db1.remove_table(new_table)
| mit | 6,008,796,254,061,185,000 | 31.07767 | 85 | 0.521186 | false |
Tsumiki-Chan/Neko-Chan | commands/purge.py | 1 | 1524 | from functions import search, logger
DESC = "Delete x messages"
USAGE="purge [*amount*] [*user* `optional`]"
async def init(bot):
chat=bot.message.channel
try:
if len(bot.args) == 0:
await bot.sendMessage( "Didn't receive any arguments! Usage: {}".format(USAGE))
return False
try:
bot.args[0] = int(bot.args[0])
except:
await bot.sendMessage( "`{}` is not a valid number.".format(bot.args[0]))
return False
if len(bot.args) > 1:
if len(bot.message.raw_mentions)>0:
user = await search.user(chat, bot.message.raw_mentions[0])
else:
user = list(bot.args)
user.pop(0)
user = await search.user(chat, " ".join(user))
if user is not None:
def is_me(m):
check = (m.author == user)
if check:
bot.args[0] = bot.args[0]-1
return (bot.args[0]>=0)
await bot.client.purge_from(chat, limit=500, check=is_me)
#await bot.sendMessage( user.display_name))
else:
await bot.sendMessage( "Could not find any user with \"`{}`\"".format(user))
return False
else:
await bot.client.purge_from(chat, limit=bot.args[0]+1, check=None)
return False
except Exception:
logger.PrintException(bot.message)
return False
| gpl-3.0 | -421,751,238,183,873,100 | 33.636364 | 92 | 0.509186 | false |
gigglearrows/anniesbot | pajbot/models/twitter.py | 1 | 5731 | import logging
from datetime import datetime
from pajbot.tbutil import time_since, tweet_prettify_urls
from pajbot.models.db import DBManager, Base
import tweepy
from sqlalchemy import Column, Integer, String
log = logging.getLogger('pajbot')
class TwitterUser(Base):
__tablename__ = 'tb_twitter_following'
id = Column(Integer, primary_key=True)
username = Column(String(32))
def __init__(self, username):
self.username = username
class TwitterManager:
def __init__(self, bot):
self.bot = bot
self.db_session = DBManager.create_session()
self.twitter_client = None
self.twitter_stream = None
self.listener = None
if 'twitter' in bot.config:
self.use_twitter_stream = 'streaming' in bot.config['twitter'] and bot.config['twitter']['streaming'] == '1'
try:
self.twitter_auth = tweepy.OAuthHandler(bot.config['twitter']['consumer_key'], bot.config['twitter']['consumer_secret'])
self.twitter_auth.set_access_token(bot.config['twitter']['access_token'], bot.config['twitter']['access_token_secret'])
self.twitter_client = tweepy.API(self.twitter_auth)
if self.use_twitter_stream:
self.connect_to_twitter_stream()
bot.execute_every(60 * 5, self.check_twitter_connection)
except:
log.exception('Twitter authentication failed.')
self.twitter_client = None
def commit(self):
self.db_session.commit()
def reload(self):
if self.listener:
self.listener.relevant_users = []
for user in self.db_session.query(TwitterUser):
self.listener.relevant_users.append(user.username)
return self
def follow_user(self, username):
"""Add `username` to our relevant_users list."""
if self.listener:
if username not in self.listener.relevant_users:
self.db_session.add(TwitterUser(username))
self.listener.relevant_users.append(username)
log.info('Now following {0}'.format(username))
return True
return False
def unfollow_user(self, username):
"""Stop following `username`, if we are following him."""
if self.listener:
if username in self.listener.relevant_users:
user = self.db_session.query(TwitterUser).filter_by(username=username).one_or_none()
if user:
self.db_session.delete(user)
self.listener.relevant_users.remove(username)
log.info('No longer following {0}'.format(username))
return True
else:
log.warning('Trying to unfollow someone we are not following')
return False
return False
def initialize_listener(self):
if self.listener is None:
class MyStreamListener(tweepy.StreamListener):
def __init__(self, bot):
tweepy.StreamListener.__init__(self)
self.relevant_users = []
self.bot = bot
def on_status(self, tweet):
if tweet.user.screen_name.lower() in self.relevant_users:
if not tweet.text.startswith('RT ') and tweet.in_reply_to_screen_name is None:
tw = tweet_prettify_urls(tweet)
self.bot.say('New tweet from {0}: {1}'.format(tweet.user.screen_name, tw.replace("\n", " ")))
def on_error(self, status):
log.warning('Unhandled in twitter stream: {0}'.format(status))
self.listener = MyStreamListener(self.bot)
def initialize_twitter_stream(self):
if self.twitter_stream is None:
self.twitter_stream = tweepy.Stream(self.twitter_auth, self.listener, retry_420=3 * 60, daemonize_thread=True)
def connect_to_twitter_stream(self):
"""Connect to the twitter stream.
This will print out messages in the chat if a "relevant user" tweets something
"""
try:
self.initialize_listener()
self.initialize_twitter_stream()
self.twitter_stream.userstream(_with='followings', replies='all', async=True)
except:
log.exception('Exception caught while trying to connect to the twitter stream')
def check_twitter_connection(self):
"""Check if the twitter stream is running.
If it's not running, try to restart it.
"""
try:
if self.twitter_stream.running is False:
self.connect_to_twitter_stream()
except:
log.exception('Caught exception while checking twitter connection')
def get_last_tweet(self, username):
if self.twitter_client:
try:
public_tweets = self.twitter_client.user_timeline(username)
for tweet in public_tweets:
if not tweet.text.startswith('RT ') and tweet.in_reply_to_screen_name is None:
tw = tweet_prettify_urls(tweet)
return '{0} ({1} ago)'.format(tw.replace("\n", " "), time_since(datetime.now().timestamp(), tweet.created_at.timestamp(), format='short'))
except Exception:
log.exception('Exception caught while getting last tweet')
return 'FeelsBadMan'
else:
return 'Twitter not set up FeelsBadMan'
return 'FeelsBadMan'
def quit(self):
if self.twitter_stream:
self.twitter_stream.disconnect()
| mit | -5,920,211,949,906,276,000 | 38.524138 | 162 | 0.583842 | false |
mSOHU/http2 | test/benchmark2.py | 1 | 1422 | # -*- coding: utf-8 -*-
"""
copied from https://github.com/bdarnell/tornado_http2/blob/master/tornado_http2/test/benchmark.py
"""
import time
import logging
from tornado.ioloop import IOLoop
from tornado.options import define, options, parse_command_line, enable_pretty_logging
from http2 import SimpleAsyncHTTP2Client
logging.getLogger('http2').setLevel(logging.INFO)
enable_pretty_logging()
define('n', help='number of queries', default=1000)
define('h', help='host', default='http2.akamai.com')
define('p', help='port', default=None, type=int)
define('s', help='use https, [1|0]', default=True)
define('c', help='max streams concurrency', default=30)
done_count = [0]
io_loop = IOLoop.instance()
def callback(value):
done_count[0] += 1
if done_count[0] == options.n:
io_loop.stop()
elapsed = time.time() - start_time
print 'HTTP/2: %d requests in %0.3fs: %f QPS' % (options.n, elapsed,
options.n / elapsed)
if __name__ == '__main__':
options.logging = "info"
parse_command_line()
client = SimpleAsyncHTTP2Client(
host=options.h, port=options.p,
secure=options.s, max_streams=options.c,
connect_timeout=5, enable_push=False,
initial_window_size=2**24-1,
)
start_time = time.time()
for i in range(options.n):
io_loop.add_callback(lambda: client.fetch('/', callback=callback))
io_loop.start()
| apache-2.0 | -6,296,171,402,591,293,000 | 25.830189 | 97 | 0.658228 | false |
JenningsWu/personality-extractor | Classifier.py | 1 | 8693 |
# Classify partited data
# sentences description type(desc_type):action(0), speech(1), description(2)
# Character Personality type(personality):
# [impulsive(1)/calm(0),Extrovert(1)/Introvert(0),
# Optimistic(1)/pessimistic(0)]
import glob
import json
import io
# Data Structures
# {word:[probability of 3 desc_type]}
descTypeProb = dict()
# {word:{desctype:6 probabilities for each personalites},...}
personalityProb = dict()
descType = [0, 0, 0] # action, speech, description
descTypeOutput = -1 # action(0), speech(1), description(2)
# impulsive,calm, Extrovert, Introvert, Optimistic, pessimistic
personality = [0, 0, 0, 0, 0, 0]
# [impulsive(1)/calm(0),Extrovert(1)/Introvert(0),Optimistic(1)/pessimistic(0)]
personalityOutput = [0, 0, 0]
actionCount = 0
speechCount = 0
descriptionCount = 0
totalCount = 0
diffWordCount = 0
sentenceTypePrior = [0,0,0]#[0]:quote [1]: normal [2]:half
personalityPrior = [0,0,0,0,0,0]
tagFiles = 'train/developing data/'
modelPath = 'model.txt'
testDataFiles = 'train/developing data/*.json'
#variables for experiment
totalDescType = [0,0,0]#speech,description,action
speech = [0,0]#correct,wrong
description = [0,0]
action = [0,0]
totalPersonality = [0,0,0,0,0,0]
impulsive = [0,0]
calm = [0,0]
extrovert = [0,0]
introvert = [0,0]
optimistic = [0,0]
pessimistic = [0,0]
# parsing tag data
def parseTagFile(path):
tagFile = open(tagPath,'r')
tagDict = dict()
while True:
line = tagFile.readline()
if line:
charName = line.strip('\n').decode('utf-8')
#print charName
line = tagFile.readline()
tags = line.strip('\n').split()
if tags[0] == '\xe5\x86\xb2\xe5\x8a\xa8': # Impulsive
tags[0] = 1;
else:
tags[0] = 0
if tags[1] == '\xe5\xa4\x96\xe5\x90\x91': # Extrovert
tags[1] = 1;
else:
tags[1] = 0
if tags[2] == '\xe4\xb9\x90\xe8\xa7\x82': # Optimistic
tags[2] = 1;
else:
tags[2] = 0
#print tags
tagDict.setdefault(charName,[])
tagDict[charName] = tags
#print tagDict
else:
break
tagFile.close()
return tagDict
# load model
modelFile = open(modelPath, 'r')
sentenceTypePrior = modelFile.readline().split()
for i in range (0,3):
descType[i] = float(sentenceTypePrior[i])
personalityPrior = modelFile.readline().split()
for i in range(0,6):
personality[i] = float(personalityPrior[i])
diffWordCount = int(modelFile.readline())
for i in range(0, diffWordCount):
line = modelFile.readline()
data = line.strip('\n').split('\t')
data[0] = data[0].decode('utf-8')
descTypeProb.setdefault(data[0], [0,0,0])
for j in range(0, 3):
descTypeProb[data[0]][j] = data[j + 1]
while True:
line = modelFile.readline()
if line:
data = line.strip('\n').split('\t')
data[0] = data[0].decode('utf-8')
# print data
personalityProb.setdefault(data[0], {})
for i in range(0, 3):
personalityProb[data[0]].setdefault(i, [0, 0, 0, 0, 0, 0])
for j in range(0, 6):
personalityProb[data[0]][i][j] = data[i * 6 + j + 1]
else:
break
modelFile.close()
# load test data
testDataPath = glob.glob(testDataFiles)
for t in testDataPath:
test = open(t, 'r')
t = t.decode('gb2312')
#print t
novelName = t[10:len(t)-9]
#print novelName
tagPath = tagFiles + novelName + '.tag'
tagDict = parseTagFile(tagPath)
dataSets = json.load(test, encoding='utf-8')
test.close()
print 'Novel Name: ', novelName.encode('gb2312')
print '\n'
# analyze & output:
for dataSet in dataSets:
personality = [0, 0, 0, 0, 0, 0]
cname = ''
for name in tagDict.keys():
if name not in dataSet['chars']:
continue
else:
cname = name
if cname == '':
continue
else:
#print 1
for i in range (0,3):
if tagDict[cname][i]==1:
totalPersonality[2*i]+=1
else:
totalPersonality[2*i+1]+=1
print 'Character:', cname.encode('gb2312')
print 'Aliases:\n'
for i in dataSet['chars']:
print i.encode('gb2312'),'\t'
print '\nsentences:\n'
for i in dataSet['sentences']:
sentence = ''
typeTag = i['type']
#typeTag ==0:speech
if typeTag == 1 :
typeTag = 2#action
elif typeTag ==3:
typeTag = 1#description
totalDescType[typeTag]+=1
descType = [0,0,0]
for j in i['tokens']:
word = j['word']
sentence += word
try:
for k in range(0, 3):
#print descTypeProb[word][k]
descType[k] += float(descTypeProb[word][k])
except:
continue
#print descType
if descType[0] > descType[1] and descType[0] > descType[2]:
descTypeOutput = 0
elif descType[1]>descType[0] and descType[1] > descType[2]:
descTypeOutput = 1
else:
descTypeOutput = 2
print sentence.encode('utf-8')
#print typeTag
if descTypeOutput == 0:
print "Description Type: Speech"
speechCount += 1
if typeTag == descTypeOutput:
speech[0]+=1
else:
speech[1]+=1
elif descTypeOutput == 1:
print "Description Type: Description"
descriptionCount += 1
if typeTag == descTypeOutput:
description[0]+=1
else:
description[1]+=1
else:
print "Description Type: Action"
actionCount += 1
if typeTag == descTypeOutput:
action[0]+=1
else:
action[1]+=1
for j in i['tokens']:
word = j['word']
try:
for k in range(0, 6):
personality[k] += float(
personalityProb[word][descTypeOutput][k])
except:
continue
for k in range(0, 3):
if personality[2 * k] > personality[2 * k + 1]:
personalityOutput[k] = 1
else:
personalityOutput[k] = 0
print '\nPersonality:\n'
if personalityOutput[0] == 1:
print 'impulsive;'
if tagDict[cname][0] == 1:
impulsive[0]+=1
else:
impulsive[1]+=1
else:
print 'calm;'
if tagDict[cname][0] == 0:
calm[0]+=1
else:
calm[1]+=1
if personalityOutput[1] == 1:
print 'extrovert;'
if tagDict[cname][1] == 1:
extrovert[0]+=1
else:
extrovert[1]+=1
else:
print 'introvert;'
if tagDict[cname][1] == 0:
introvert[0]+=1
else:
introvert[1]+=1
if personalityOutput[2] == 1:
print 'optimistic.'
if tagDict[cname][2] == 1:
optimistic[0]+=1
else:
optimistic[1]+=1
else:
print 'pessimistic.'
if tagDict[cname][2] == 0:
pessimistic[0]+=1
else:
pessimistic[1]+=1
print '\n'
totalCount = actionCount + speechCount + descriptionCount
print'\nWriting Style:\n'
print'Speech:', speechCount / float(totalCount)
print'Action:', actionCount / float(totalCount)
print'Description:', descriptionCount / float(totalCount)
print totalDescType,speech,description,action
print totalPersonality,impulsive,calm,extrovert,introvert,optimistic,pessimistic
| gpl-3.0 | 8,126,149,522,154,109,000 | 33.224409 | 80 | 0.484643 | false |
masasin/advent_of_code_2015 | day_11.py | 1 | 3790 | """
http://adventofcode.com/day/10
--- Day 11: Corporate Policy ---
Santa's previous password expired, and he needs help choosing a new one.
To help him remember his new password after the old one expires, Santa has
devised a method of coming up with a password based on the previous one.
Corporate policy dictates that passwords must be exactly eight lowercase letters
(for security reasons), so he finds his new password by incrementing his old
password string repeatedly until it is valid.
Incrementing is just like counting with numbers: xx, xy, xz, ya, yb, and so on.
Increase the rightmost letter one step; if it was z, it wraps around to a, and
repeat with the next letter to the left until one doesn't wrap around.
Unfortunately for Santa, a new Security-Elf recently started, and he has imposed
some additional password requirements:
- Passwords must include one increasing straight of at least three letters,
like abc, bcd, cde, and so on, up to xyz. They cannot skip letters; abd
doesn't count.
- Passwords may not contain the letters i, o, or l, as these letters can be
mistaken for other characters and are therefore confusing.
- Passwords must contain at least two different, non-overlapping pairs of
letters, like aa, bb, or zz.
For example:
- hijklmmn meets the first requirement (because it contains the straight
hij) but fails the second requirement (because it contains i and l).
- abbceffg meets the third requirement (because it repeats bb and ff) but
fails the first requirement.
- abbcegjk fails the third requirement, because it only has one double
letter (bb).
- The next password after abcdefgh is abcdffaa.
- The next password after ghijklmn is ghjaabcc, because you eventually skip
all the passwords that start with ghi..., since i is not allowed.
Given Santa's current password (your puzzle input), what should his next
password be?
--- Part Two ---
Santa's password expired again. What's the next one?
"""
import re
from string import ascii_lowercase
def find_next_password(password, n=1):
for i in range(n):
password = increment_password(password)
while not validate(password):
password = increment_password(password)
return password
def validate(password):
# Requirement 2
if re.search(r"[iol]", password):
return False
# Requirement 1
for i in range(len(password) - 2):
if password[i:i+3] in ascii_lowercase:
break
else:
return False
# Requirement 3
return True if re.search(r"(\w)\1.*(\w)\2", password) else False
def increment_password(password):
if password.endswith("z"):
i_z = password.index("z")
n_z = len(password) - i_z
boundary_letter = password[i_z - 1]
return password[:i_z - 1] + next_letter(boundary_letter) + "a" * n_z
else:
return password[:-1] + next_letter(password[-1])
def next_letter(c):
try:
return ascii_lowercase[ascii_lowercase.index(c) + 1]
except IndexError: # z
return "a"
def part_one():
with open("inputs/day_11_input.txt") as fin:
password = fin.readline().strip()
print("Next password: {}".format(find_next_password(password)))
def part_two():
with open("inputs/day_11_input.txt") as fin:
password = fin.readline().strip()
print("Next password: {}".format(find_next_password(password, 2)))
def main():
with open("inputs/day_11_input.txt") as fin:
password = fin.readline().strip()
next_password = find_next_password(password)
print("Next password: {}".format(next_password))
print("Next next password: {}".format(find_next_password(next_password)))
if __name__ == "__main__":
main()
| mit | 9,128,645,441,959,390,000 | 31.956522 | 80 | 0.683113 | false |
NicolasHug/Surprise | surprise/dataset.py | 1 | 9634 | """
The :mod:`dataset <surprise.dataset>` module defines the :class:`Dataset` class
and other subclasses which are used for managing datasets.
Users may use both *built-in* and user-defined datasets (see the
:ref:`getting_started` page for examples). Right now, three built-in datasets
are available:
* The `movielens-100k <http://grouplens.org/datasets/movielens/>`_ dataset.
* The `movielens-1m <http://grouplens.org/datasets/movielens/>`_ dataset.
* The `Jester <http://eigentaste.berkeley.edu/dataset/>`_ dataset 2.
Built-in datasets can all be loaded (or downloaded if you haven't already)
using the :meth:`Dataset.load_builtin` method.
Summary:
.. autosummary::
:nosignatures:
Dataset.load_builtin
Dataset.load_from_file
Dataset.load_from_folds
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
import sys
import os
import itertools
from six.moves import input
from .reader import Reader
from .builtin_datasets import download_builtin_dataset
from .builtin_datasets import BUILTIN_DATASETS
from .trainset import Trainset
class Dataset:
"""Base class for loading datasets.
Note that you should never instantiate the :class:`Dataset` class directly
(same goes for its derived classes), but instead use one of the three
available methods for loading datasets."""
def __init__(self, reader):
self.reader = reader
@classmethod
def load_builtin(cls, name='ml-100k', prompt=True):
"""Load a built-in dataset.
If the dataset has not already been loaded, it will be downloaded and
saved. You will have to split your dataset using the :meth:`split
<DatasetAutoFolds.split>` method. See an example in the :ref:`User
Guide <cross_validate_example>`.
Args:
name(:obj:`string`): The name of the built-in dataset to load.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'.
Default is 'ml-100k'.
prompt(:obj:`bool`): Prompt before downloading if dataset is not
already on disk.
Default is True.
Returns:
A :obj:`Dataset` object.
Raises:
ValueError: If the ``name`` parameter is incorrect.
"""
try:
dataset = BUILTIN_DATASETS[name]
except KeyError:
raise ValueError('unknown dataset ' + name +
'. Accepted values are ' +
', '.join(BUILTIN_DATASETS.keys()) + '.')
# if dataset does not exist, offer to download it
if not os.path.isfile(dataset.path):
answered = not prompt
while not answered:
print('Dataset ' + name + ' could not be found. Do you want '
'to download it? [Y/n] ', end='')
choice = input().lower()
if choice in ['yes', 'y', '', 'omg this is so nice of you!!']:
answered = True
elif choice in ['no', 'n', 'hell no why would i want that?!']:
answered = True
print("Ok then, I'm out!")
sys.exit()
download_builtin_dataset(name)
reader = Reader(**dataset.reader_params)
return cls.load_from_file(file_path=dataset.path, reader=reader)
@classmethod
def load_from_file(cls, file_path, reader):
"""Load a dataset from a (custom) file.
Use this if you want to use a custom dataset and all of the ratings are
stored in one file. You will have to split your dataset using the
:meth:`split <DatasetAutoFolds.split>` method. See an example in the
:ref:`User Guide <load_from_file_example>`.
Args:
file_path(:obj:`string`): The path to the file containing ratings.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the file.
"""
return DatasetAutoFolds(ratings_file=file_path, reader=reader)
@classmethod
def load_from_folds(cls, folds_files, reader):
"""Load a dataset where folds (for cross-validation) are predefined by
some files.
The purpose of this method is to cover a common use case where a
dataset is already split into predefined folds, such as the
movielens-100k dataset which defines files u1.base, u1.test, u2.base,
u2.test, etc... It can also be used when you don't want to perform
cross-validation but still want to specify your training and testing
data (which comes down to 1-fold cross-validation anyway). See an
example in the :ref:`User Guide <load_from_folds_example>`.
Args:
folds_files(:obj:`iterable` of :obj:`tuples`): The list of the
folds. A fold is a tuple of the form ``(path_to_train_file,
path_to_test_file)``.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the files.
"""
return DatasetUserFolds(folds_files=folds_files, reader=reader)
@classmethod
def load_from_df(cls, df, reader):
"""Load a dataset from a pandas dataframe.
Use this if you want to use a custom dataset that is stored in a pandas
dataframe. See the :ref:`User Guide<load_from_df_example>` for an
example.
Args:
df(`Dataframe`): The dataframe containing the ratings. It must have
three columns, corresponding to the user (raw) ids, the item
(raw) ids, and the ratings, in this order.
reader(:obj:`Reader <surprise.reader.Reader>`): A reader to read
the file. Only the ``rating_scale`` field needs to be
specified.
"""
return DatasetAutoFolds(reader=reader, df=df)
def read_ratings(self, file_name):
"""Return a list of ratings (user, item, rating, timestamp) read from
file_name"""
with open(os.path.expanduser(file_name)) as f:
raw_ratings = [self.reader.parse_line(line) for line in
itertools.islice(f, self.reader.skip_lines, None)]
return raw_ratings
def construct_trainset(self, raw_trainset):
raw2inner_id_users = {}
raw2inner_id_items = {}
current_u_index = 0
current_i_index = 0
ur = defaultdict(list)
ir = defaultdict(list)
# user raw id, item raw id, translated rating, time stamp
for urid, irid, r, timestamp in raw_trainset:
try:
uid = raw2inner_id_users[urid]
except KeyError:
uid = current_u_index
raw2inner_id_users[urid] = current_u_index
current_u_index += 1
try:
iid = raw2inner_id_items[irid]
except KeyError:
iid = current_i_index
raw2inner_id_items[irid] = current_i_index
current_i_index += 1
ur[uid].append((iid, r))
ir[iid].append((uid, r))
n_users = len(ur) # number of users
n_items = len(ir) # number of items
n_ratings = len(raw_trainset)
trainset = Trainset(ur,
ir,
n_users,
n_items,
n_ratings,
self.reader.rating_scale,
raw2inner_id_users,
raw2inner_id_items)
return trainset
def construct_testset(self, raw_testset):
return [(ruid, riid, r_ui_trans)
for (ruid, riid, r_ui_trans, _) in raw_testset]
class DatasetUserFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are predefined."""
def __init__(self, folds_files=None, reader=None):
Dataset.__init__(self, reader)
self.folds_files = folds_files
# check that all files actually exist.
for train_test_files in self.folds_files:
for f in train_test_files:
if not os.path.isfile(os.path.expanduser(f)):
raise ValueError('File ' + str(f) + ' does not exist.')
class DatasetAutoFolds(Dataset):
"""A derived class from :class:`Dataset` for which folds (for
cross-validation) are not predefined. (Or for when there are no folds at
all)."""
def __init__(self, ratings_file=None, reader=None, df=None):
Dataset.__init__(self, reader)
self.has_been_split = False # flag indicating if split() was called.
if ratings_file is not None:
self.ratings_file = ratings_file
self.raw_ratings = self.read_ratings(self.ratings_file)
elif df is not None:
self.df = df
self.raw_ratings = [(uid, iid, float(r), None)
for (uid, iid, r) in
self.df.itertuples(index=False)]
else:
raise ValueError('Must specify ratings file or dataframe.')
def build_full_trainset(self):
"""Do not split the dataset into folds and just return a trainset as
is, built from the whole dataset.
User can then query for predictions, as shown in the :ref:`User Guide
<train_on_whole_trainset>`.
Returns:
The :class:`Trainset <surprise.Trainset>`.
"""
return self.construct_trainset(self.raw_ratings)
| bsd-3-clause | 877,863,226,206,800,500 | 34.289377 | 79 | 0.585219 | false |
dpressel/baseline | api-examples/layers_classify_pytorch.py | 1 | 6102 | import argparse
import baseline.embeddings
from eight_mile.confusion import ConfusionMatrix
import baseline
from eight_mile.pytorch.optz import OptimizerManager, EagerOptimizer
import baseline.pytorch.embeddings
import eight_mile.pytorch.layers as L
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, TensorDataset
import logging
import numpy as np
import time
import torch
def to_device(m):
return m.cuda()
def to_host(o):
return o.cpu().float().numpy()
parser = argparse.ArgumentParser(description='Train a Layers model with PyTorch API')
parser.add_argument('--model_type', help='What type of model to build', type=str, default='default')
parser.add_argument('--poolsz', help='How many hidden units for pooling', type=int, default=100)
parser.add_argument('--stacksz', help='How many hidden units for stacking', type=int, nargs='+')
parser.add_argument('--name', help='(optional) signature name', type=str)
parser.add_argument('--epochs', help='Number of epochs to train', type=int, default=2)
parser.add_argument('--batchsz', help='Batch size', type=int, default=50)
parser.add_argument('--filts', help='Parallel convolution filter widths (if default model)', type=int, default=[3, 4, 5], nargs='+')
parser.add_argument('--mxlen', help='Maximum post length (number of words) during training', type=int, default=100)
parser.add_argument('--train', help='Training file', default='../data/stsa.binary.phrases.train')
parser.add_argument('--valid', help='Validation file', default='../data/stsa.binary.dev')
parser.add_argument('--test', help='Testing file', default='../data/stsa.binary.test')
parser.add_argument('--embeddings', help='Pretrained embeddings file', default='/data/embeddings/GoogleNews-vectors-negative300.bin')
parser.add_argument('--ll', help='Log level', type=str, default='info')
parser.add_argument('--lr', help='Learning rate', type=float, default=0.001)
parser.add_argument("--device", type=str,
default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
args = parser.parse_known_args()[0]
feature_desc = {
'word': {
'vectorizer': baseline.Token1DVectorizer(mxlen=100, transform_fn=baseline.lowercase),
'embed': {'file': args.embeddings, 'type': 'default', 'unif': 0.25}
}
}
# Create a reader that is using our vectorizers to parse a TSV file
# with rows like:
# <label>\t<sentence>\n
class DictionaryDatasetWrapper(Dataset):
def __init__(self, x, x_lengths, y):
self.tensor_dataset = TensorDataset(x, x_lengths, y)
def __getitem__(self, index):
# stuff
x, x_length, y = self.tensor_dataset[index]
return {'word': x.to(args.device), "lengths": x_length.to(args.device)}, y.to(args.device)
def __len__(self):
return len(self.tensor_dataset)
class Data:
def __init__(self, ts, batchsz):
self.ds = self._to_tensors(ts)
self.batchsz = batchsz
def _to_tensors(self, ts):
x = []
x_lengths = []
y = []
for sample in ts:
x.append(sample['word'].squeeze())
x_lengths.append(sample['word_lengths'].squeeze())
y.append(sample['y'].squeeze())
return DictionaryDatasetWrapper(torch.tensor(np.stack(x), dtype=torch.long), torch.tensor(np.stack(x_lengths), dtype=torch.long), torch.tensor(np.stack(y), dtype=torch.long))
def get_input(self, training=False):
return DataLoader(self.ds, batch_size=self.batchsz, shuffle=training)
vectorizers = {k: v['vectorizer'] for k, v in feature_desc.items()}
reader = baseline.TSVSeqLabelReader(vectorizers, clean_fn=baseline.TSVSeqLabelReader.do_clean)
train_file = args.train
valid_file = args.valid
test_file = args.test
# This builds a set of counters
vocabs, labels = reader.build_vocab([train_file,
valid_file,
test_file])
# This builds a set of embeddings objects, these are typically not DL-specific
# but if they happen to be addons, they can be
embeddings = dict()
for k, v in feature_desc.items():
embed_config = v['embed']
embeddings_for_k = baseline.embeddings.load_embeddings('word', embed_file=embed_config['file'], known_vocab=vocabs[k],
embed_type=embed_config.get('type', 'default'),
unif=embed_config.get('unif', 0.), use_mmap=True)
embeddings[k] = embeddings_for_k['embeddings']
# Reset the vocab to the embeddings one
vocabs[k] = embeddings_for_k['vocab']
train_set = Data(reader.load(train_file, vocabs=vocabs, batchsz=1), args.batchsz)
valid_set = Data(reader.load(valid_file, vocabs=vocabs, batchsz=1), args.batchsz)
test_set = Data(reader.load(test_file, vocabs=vocabs, batchsz=1), args.batchsz)
stacksz = len(args.filts) * args.poolsz
num_epochs = 2
model = to_device(
L.EmbedPoolStackModel(2, L.EmbeddingsStack(embeddings), L.WithoutLength(L.ParallelConv(300, args.poolsz, args.filts)), L.Highway(stacksz))
)
def loss(model, x, y):
y_ = model(x)
l = F.nll_loss(y_, y)
return l
optimizer = EagerOptimizer(loss, optim="adam", lr=0.001)
for epoch in range(num_epochs):
loss_acc = 0.
step = 0
start = time.time()
for x, y in train_set.get_input(training=True):
loss_value = optimizer.update(model, x, y)
loss_acc += loss_value
step += 1
print('training time {}'.format(time.time() - start))
mean_loss = loss_acc / step
print('Training Loss {}'.format(mean_loss))
cm = ConfusionMatrix(['0', '1'])
for x, y in valid_set.get_input():
with torch.no_grad():
y_ = np.argmax(to_host(model(x)), axis=1)
cm.add_batch(y, y_)
print(cm)
print(cm.get_all_metrics())
print('FINAL')
cm = ConfusionMatrix(['0', '1'])
with torch.no_grad():
for x, y in test_set.get_input():
y_ = np.argmax(to_host(model(x)), axis=1)
cm.add_batch(y, y_)
print(cm)
print(cm.get_all_metrics())
| apache-2.0 | 7,061,987,987,167,613,000 | 36.666667 | 182 | 0.653228 | false |
usc-isi/extra-specs | nova/api/openstack/compute/contrib/quotas.py | 1 | 3875 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova import quota
authorize = extensions.extension_authorizer('compute', 'quotas')
class QuotaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id')
for resource in quota.quota_resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaSetsController(object):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict"""
result = dict(id=str(project_id))
for resource in quota.quota_resources:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, limit):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = _("Quota limit must be -1 or greater.")
raise webob.exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
sqlalchemy_api.authorize_project_context(context, id)
return self._format_quota_set(id,
quota.get_project_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
project_id = id
for key in body['quota_set'].keys():
if key in quota.quota_resources:
value = int(body['quota_set'][key])
self._validate_quota_limit(value)
try:
db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': quota.get_project_quotas(context, project_id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
authorize(req.environ['nova.context'])
return self._format_quota_set(id, quota._get_default_quotas())
class Quotas(extensions.ExtensionDescriptor):
"""Quotas management support"""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
| apache-2.0 | -5,099,529,885,917,966,000 | 33.598214 | 79 | 0.634065 | false |
isotoma/alm.solrindex | alm/solrindex/schema.py | 1 | 2814 |
"""Parser of a Solr schema.xml"""
from alm.solrindex.interfaces import ISolrField
from alm.solrindex.interfaces import ISolrFieldHandler
from alm.solrindex.interfaces import ISolrSchema
from elementtree.ElementTree import parse
from zope.component import getUtility
from zope.component import queryUtility
from zope.interface import implements
import logging
import urllib2
log = logging.getLogger(__name__)
class SolrSchema(object):
implements(ISolrSchema)
uniqueKey = None
defaultSearchField = None
def __init__(self, solr_uri=None):
self.fields = []
if solr_uri:
f = self.download_from(solr_uri)
try:
self.xml_init(f)
finally:
f.close()
def download_from(self, solr_uri):
"""Get schema.xml from a running Solr instance"""
schema_uris = ('%s/admin/file/?file=schema.xml', # solr 1.3
'%s/admin/get-file.jsp?file=schema.xml') # solr 1.2
for i, uri in enumerate(schema_uris):
uri = uri % solr_uri
log.debug('getting schema from %s', uri)
try:
f = urllib2.urlopen(uri)
except urllib2.URLError:
if i < len(schema_uris) - 1:
# try the next URI
continue
raise
return f
def xml_init(self, f):
"""Initialize this instance from a Solr schema.xml"""
tree = parse(f)
e = tree.find('uniqueKey')
if e is not None:
self.uniqueKey = e.text.strip()
e = tree.find('defaultSearchField')
if e is not None:
self.defaultSearchField = e.text.strip()
types = {}
for e in tree.findall('types/fieldType'):
types[e.attrib['name']] = e
for e in tree.findall('fields/field'):
t = types[e.attrib['type']]
self.fields.append(SolrField(e, t))
class SolrField(object):
implements(ISolrField)
_boolean_attrs = (
'indexed', 'stored', 'required', 'multiValued',
)
def __init__(self, elem, fieldType):
self.name = elem.attrib['name']
self.type = elem.attrib['type']
self.java_class = fieldType.attrib['class']
for attr in self._boolean_attrs:
value = elem.get(attr)
if value is not None:
value = {'true': True, 'false': False}[value.lower()]
setattr(self, attr, value)
handler = queryUtility(ISolrFieldHandler, name=self.name)
if handler is None:
handler = queryUtility(
ISolrFieldHandler, name=self.java_class)
if handler is None:
handler = getUtility(ISolrFieldHandler)
self.handler = handler
| bsd-3-clause | 2,881,713,634,405,479,000 | 29.586957 | 75 | 0.569652 | false |
yaukwankiu/armor | tests/modifiedMexicanHatTest5a.py | 1 | 2438 | # supplementing modifiedMexicanHatTest5.py
# outputing the charts, given the results
import numpy as np
import matplotlib.pyplot as plt
from armor import pattern
from armor import defaultParameters as dp
dbz = pattern.DBZ
DS = pattern.DBZstream
dataFolder = dp.root + "labLogs/2014-5-2-modifiedMexicanHatTest5/"
outputFolder= dataFolder
WRFnames = [ "WRF"+("0"+str(v))[-2:] for v in range(1,21)]
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256, 320,]
allWRFsStreamMean = 0.
dbzCount = 0
for WRFname in WRFnames:
ds = DS(dataFolder=dataFolder,
name="kongrey" + WRFname,
outputFolder="",
imageFolder="",
key1=WRFname, # keywords to pick out specific files
key2="LOGspec.dat",
key3="kongreywrf", #safety check
preload=True,
imageExtension = '.png', #added 2013-09-27
dataExtension = '.dat',
)
print "\n==================\nSaving histograms for ", ds.name
for dbzpattern in ds:
dbzCount += 1
streamMeanUpdate = np.array([(dbzpattern.matrix==v).sum() for v in sigmas])
allWRFsStreamMean = 1.* ((allWRFsStreamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
histogramName = "kongreywrf" + dbzpattern.dataTime + WRFname + "_LOGspec_histogram"+ ds.imageExtension
print dbzpattern.name, "->", histogramName
plt.clf()
dbzpattern.histogram(display=False, outputPath=outputFolder+histogramName)
plt.close()
plt.plot(sigmas, allWRFsStreamMean)
plt.title(ds.name + '- average laplacian-of-gaussian max-response spectrum for ' +str(dbzCount) + 'WRF patterns')
plt.savefig(outputFolder + ds.name + "_all_wrfs_average_LoG_max_response spectrum.png")
plt.close()
"""
# run modifiedMexicanHatTest6a.py and then:
allWRFsStreamMean = array([ 2562.4375, 655.5625, 526.15 , 741.51 , 858.6425,
1457.79 , 1710.095 , 2971.355 , 3561.9125, 4406.915 ,
1488.0375, 59.5925, 0. , 0. , 0. , 0. ])
streamMeanCOMPREF = streamMean
sigmas = np.array(sigmas)
plt.close()
plt.plot(sigmas, streamMeanCOMPREF)
plt.plot(sigmas[:-4]*4, allWRFsStreamMean[:-4]*16)
plt.title("COMPREF and WRFs mean max-response LOG spectra from Kong-Rey data")
plt.show()
"""
| cc0-1.0 | -1,526,503,526,948,186,400 | 38.967213 | 113 | 0.609516 | false |
Abdoctor/behave | behave/i18n.py | 1 | 27520 | # -*- coding: UTF-8 -*-
# -- FILE GENERATED BY: convert_i18n_yaml.py with i18n.yml
# pylint: disable=line-too-long
languages = \
{'ar': {'and': [u'*', u'\u0648'],
'background': [u'\u0627\u0644\u062e\u0644\u0641\u064a\u0629'],
'but': [u'*', u'\u0644\u0643\u0646'],
'examples': [u'\u0627\u0645\u062b\u0644\u0629'],
'feature': [u'\u062e\u0627\u0635\u064a\u0629'],
'given': [u'*', u'\u0628\u0641\u0631\u0636'],
'name': [u'Arabic'],
'native': [u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629'],
'scenario': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648'],
'scenario_outline': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648 \u0645\u062e\u0637\u0637'],
'then': [u'*', u'\u0627\u0630\u0627\u064b', u'\u062b\u0645'],
'when': [u'*',
u'\u0645\u062a\u0649',
u'\u0639\u0646\u062f\u0645\u0627']},
'bg': {'and': [u'*', u'\u0418'],
'background': [u'\u041f\u0440\u0435\u0434\u0438\u0441\u0442\u043e\u0440\u0438\u044f'],
'but': [u'*', u'\u041d\u043e'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442'],
'given': [u'*', u'\u0414\u0430\u0434\u0435\u043d\u043e'],
'name': [u'Bulgarian'],
'native': [u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0420\u0430\u043c\u043a\u0430 \u043d\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'then': [u'*', u'\u0422\u043e'],
'when': [u'*', u'\u041a\u043e\u0433\u0430\u0442\u043e']},
'ca': {'and': [u'*', u'I'],
'background': [u'Rerefons', u'Antecedents'],
'but': [u'*', u'Per\xf2'],
'examples': [u'Exemples'],
'feature': [u'Caracter\xedstica', u'Funcionalitat'],
'given': [u'*', u'Donat', u'Donada', u'At\xe8s', u'Atesa'],
'name': [u'Catalan'],
'native': [u'catal\xe0'],
'scenario': [u'Escenari'],
'scenario_outline': [u"Esquema de l'escenari"],
'then': [u'*', u'Aleshores', u'Cal'],
'when': [u'*', u'Quan']},
'cs': {'and': [u'*', u'A', u'A tak\xe9'],
'background': [u'Pozad\xed', u'Kontext'],
'but': [u'*', u'Ale'],
'examples': [u'P\u0159\xedklady'],
'feature': [u'Po\u017eadavek'],
'given': [u'*', u'Pokud'],
'name': [u'Czech'],
'native': [u'\u010cesky'],
'scenario': [u'Sc\xe9n\xe1\u0159'],
'scenario_outline': [u'N\xe1\u010drt Sc\xe9n\xe1\u0159e',
u'Osnova sc\xe9n\xe1\u0159e'],
'then': [u'*', u'Pak'],
'when': [u'*', u'Kdy\u017e']},
'cy-GB': {'and': [u'*', u'A'],
'background': [u'Cefndir'],
'but': [u'*', u'Ond'],
'examples': [u'Enghreifftiau'],
'feature': [u'Arwedd'],
'given': [u'*', u'Anrhegedig a'],
'name': [u'Welsh'],
'native': [u'Cymraeg'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenario Amlinellol'],
'then': [u'*', u'Yna'],
'when': [u'*', u'Pryd']},
'da': {'and': [u'*', u'Og'],
'background': [u'Baggrund'],
'but': [u'*', u'Men'],
'examples': [u'Eksempler'],
'feature': [u'Egenskab'],
'given': [u'*', u'Givet'],
'name': [u'Danish'],
'native': [u'dansk'],
'scenario': [u'Scenarie'],
'scenario_outline': [u'Abstrakt Scenario'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe5r']},
'de': {'and': [u'*', u'Und'],
'background': [u'Grundlage'],
'but': [u'*', u'Aber'],
'examples': [u'Beispiele'],
'feature': [u'Funktionalit\xe4t'],
'given': [u'*', u'Angenommen', u'Gegeben sei'],
'name': [u'German'],
'native': [u'Deutsch'],
'scenario': [u'Szenario'],
'scenario_outline': [u'Szenariogrundriss'],
'then': [u'*', u'Dann'],
'when': [u'*', u'Wenn']},
'en': {'and': [u'*', u'And'],
'background': [u'Background'],
'but': [u'*', u'But'],
'examples': [u'Examples', u'Scenarios'],
'feature': [u'Feature'],
'given': [u'*', u'Given'],
'name': [u'English'],
'native': [u'English'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenario Outline', u'Scenario Template'],
'then': [u'*', u'Then'],
'when': [u'*', u'When']},
'en-Scouse': {'and': [u'*', u'An'],
'background': [u'Dis is what went down'],
'but': [u'*', u'Buh'],
'examples': [u'Examples'],
'feature': [u'Feature'],
'given': [u'*', u'Givun', u'Youse know when youse got'],
'name': [u'Scouse'],
'native': [u'Scouse'],
'scenario': [u'The thing of it is'],
'scenario_outline': [u'Wharrimean is'],
'then': [u'*', u'Dun', u'Den youse gotta'],
'when': [u'*', u'Wun', u'Youse know like when']},
'en-au': {'and': [u'*', u'N'],
'background': [u'Background'],
'but': [u'*', u'Cept'],
'examples': [u'Cobber'],
'feature': [u'Crikey'],
'given': [u'*', u'Ya know how'],
'name': [u'Australian'],
'native': [u'Australian'],
'scenario': [u'Mate'],
'scenario_outline': [u'Blokes'],
'then': [u'*', u'Ya gotta'],
'when': [u'*', u'When']},
'en-lol': {'and': [u'*', u'AN'],
'background': [u'B4'],
'but': [u'*', u'BUT'],
'examples': [u'EXAMPLZ'],
'feature': [u'OH HAI'],
'given': [u'*', u'I CAN HAZ'],
'name': [u'LOLCAT'],
'native': [u'LOLCAT'],
'scenario': [u'MISHUN'],
'scenario_outline': [u'MISHUN SRSLY'],
'then': [u'*', u'DEN'],
'when': [u'*', u'WEN']},
'en-pirate': {'and': [u'*', u'Aye'],
'background': [u'Yo-ho-ho'],
'but': [u'*', u'Avast!'],
'examples': [u'Dead men tell no tales'],
'feature': [u'Ahoy matey!'],
'given': [u'*', u'Gangway!'],
'name': [u'Pirate'],
'native': [u'Pirate'],
'scenario': [u'Heave to'],
'scenario_outline': [u'Shiver me timbers'],
'then': [u'*', u'Let go and haul'],
'when': [u'*', u'Blimey!']},
'en-tx': {'and': [u'*', u"And y'all"],
'background': [u'Background'],
'but': [u'*', u"But y'all"],
'examples': [u'Examples'],
'feature': [u'Feature'],
'given': [u'*', u"Given y'all"],
'name': [u'Texan'],
'native': [u'Texan'],
'scenario': [u'Scenario'],
'scenario_outline': [u"All y'all"],
'then': [u'*', u"Then y'all"],
'when': [u'*', u"When y'all"]},
'eo': {'and': [u'*', u'Kaj'],
'background': [u'Fono'],
'but': [u'*', u'Sed'],
'examples': [u'Ekzemploj'],
'feature': [u'Trajto'],
'given': [u'*', u'Donita\u0135o'],
'name': [u'Esperanto'],
'native': [u'Esperanto'],
'scenario': [u'Scenaro'],
'scenario_outline': [u'Konturo de la scenaro'],
'then': [u'*', u'Do'],
'when': [u'*', u'Se']},
'es': {'and': [u'*', u'Y'],
'background': [u'Antecedentes'],
'but': [u'*', u'Pero'],
'examples': [u'Ejemplos'],
'feature': [u'Caracter\xedstica'],
'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'],
'name': [u'Spanish'],
'native': [u'espa\xf1ol'],
'scenario': [u'Escenario'],
'scenario_outline': [u'Esquema del escenario'],
'then': [u'*', u'Entonces'],
'when': [u'*', u'Cuando']},
'et': {'and': [u'*', u'Ja'],
'background': [u'Taust'],
'but': [u'*', u'Kuid'],
'examples': [u'Juhtumid'],
'feature': [u'Omadus'],
'given': [u'*', u'Eeldades'],
'name': [u'Estonian'],
'native': [u'eesti keel'],
'scenario': [u'Stsenaarium'],
'scenario_outline': [u'Raamstsenaarium'],
'then': [u'*', u'Siis'],
'when': [u'*', u'Kui']},
'fi': {'and': [u'*', u'Ja'],
'background': [u'Tausta'],
'but': [u'*', u'Mutta'],
'examples': [u'Tapaukset'],
'feature': [u'Ominaisuus'],
'given': [u'*', u'Oletetaan'],
'name': [u'Finnish'],
'native': [u'suomi'],
'scenario': [u'Tapaus'],
'scenario_outline': [u'Tapausaihio'],
'then': [u'*', u'Niin'],
'when': [u'*', u'Kun']},
'fr': {'and': [u'*', u'Et'],
'background': [u'Contexte'],
'but': [u'*', u'Mais'],
'examples': [u'Exemples'],
'feature': [u'Fonctionnalit\xe9'],
'given': [u'*',
u'Soit',
u'Etant donn\xe9',
u'Etant donn\xe9e',
u'Etant donn\xe9s',
u'Etant donn\xe9es',
u'\xc9tant donn\xe9',
u'\xc9tant donn\xe9e',
u'\xc9tant donn\xe9s',
u'\xc9tant donn\xe9es'],
'name': [u'French'],
'native': [u'fran\xe7ais'],
'scenario': [u'Sc\xe9nario'],
'scenario_outline': [u'Plan du sc\xe9nario', u'Plan du Sc\xe9nario'],
'then': [u'*', u'Alors'],
'when': [u'*', u'Quand', u'Lorsque', u"Lorsqu'<"]},
'he': {'and': [u'*', u'\u05d5\u05d2\u05dd'],
'background': [u'\u05e8\u05e7\u05e2'],
'but': [u'*', u'\u05d0\u05d1\u05dc'],
'examples': [u'\u05d3\u05d5\u05d2\u05de\u05d0\u05d5\u05ea'],
'feature': [u'\u05ea\u05db\u05d5\u05e0\u05d4'],
'given': [u'*', u'\u05d1\u05d4\u05d9\u05e0\u05ea\u05df'],
'name': [u'Hebrew'],
'native': [u'\u05e2\u05d1\u05e8\u05d9\u05ea'],
'scenario': [u'\u05ea\u05e8\u05d7\u05d9\u05e9'],
'scenario_outline': [u'\u05ea\u05d1\u05e0\u05d9\u05ea \u05ea\u05e8\u05d7\u05d9\u05e9'],
'then': [u'*', u'\u05d0\u05d6', u'\u05d0\u05d6\u05d9'],
'when': [u'*', u'\u05db\u05d0\u05e9\u05e8']},
'hr': {'and': [u'*', u'I'],
'background': [u'Pozadina'],
'but': [u'*', u'Ali'],
'examples': [u'Primjeri', u'Scenariji'],
'feature': [u'Osobina', u'Mogu\u0107nost', u'Mogucnost'],
'given': [u'*', u'Zadan', u'Zadani', u'Zadano'],
'name': [u'Croatian'],
'native': [u'hrvatski'],
'scenario': [u'Scenarij'],
'scenario_outline': [u'Skica', u'Koncept'],
'then': [u'*', u'Onda'],
'when': [u'*', u'Kada', u'Kad']},
'hu': {'and': [u'*', u'\xc9s'],
'background': [u'H\xe1tt\xe9r'],
'but': [u'*', u'De'],
'examples': [u'P\xe9ld\xe1k'],
'feature': [u'Jellemz\u0151'],
'given': [u'*', u'Amennyiben', u'Adott'],
'name': [u'Hungarian'],
'native': [u'magyar'],
'scenario': [u'Forgat\xf3k\xf6nyv'],
'scenario_outline': [u'Forgat\xf3k\xf6nyv v\xe1zlat'],
'then': [u'*', u'Akkor'],
'when': [u'*', u'Majd', u'Ha', u'Amikor']},
'id': {'and': [u'*', u'Dan'],
'background': [u'Dasar'],
'but': [u'*', u'Tapi'],
'examples': [u'Contoh'],
'feature': [u'Fitur'],
'given': [u'*', u'Dengan'],
'name': [u'Indonesian'],
'native': [u'Bahasa Indonesia'],
'scenario': [u'Skenario'],
'scenario_outline': [u'Skenario konsep'],
'then': [u'*', u'Maka'],
'when': [u'*', u'Ketika']},
'is': {'and': [u'*', u'Og'],
'background': [u'Bakgrunnur'],
'but': [u'*', u'En'],
'examples': [u'D\xe6mi', u'Atbur\xf0ar\xe1sir'],
'feature': [u'Eiginleiki'],
'given': [u'*', u'Ef'],
'name': [u'Icelandic'],
'native': [u'\xcdslenska'],
'scenario': [u'Atbur\xf0ar\xe1s'],
'scenario_outline': [u'L\xfdsing Atbur\xf0ar\xe1sar',
u'L\xfdsing D\xe6ma'],
'then': [u'*', u'\xde\xe1'],
'when': [u'*', u'\xdeegar']},
'it': {'and': [u'*', u'E'],
'background': [u'Contesto'],
'but': [u'*', u'Ma'],
'examples': [u'Esempi'],
'feature': [u'Funzionalit\xe0'],
'given': [u'*', u'Dato', u'Data', u'Dati', u'Date'],
'name': [u'Italian'],
'native': [u'italiano'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Schema dello scenario'],
'then': [u'*', u'Allora'],
'when': [u'*', u'Quando']},
'ja': {'and': [u'*', u'\u304b\u3064<'],
'background': [u'\u80cc\u666f'],
'but': [u'*',
u'\u3057\u304b\u3057<',
u'\u4f46\u3057<',
u'\u305f\u3060\u3057<'],
'examples': [u'\u4f8b', u'\u30b5\u30f3\u30d7\u30eb'],
'feature': [u'\u30d5\u30a3\u30fc\u30c1\u30e3', u'\u6a5f\u80fd'],
'given': [u'*', u'\u524d\u63d0<'],
'name': [u'Japanese'],
'native': [u'\u65e5\u672c\u8a9e'],
'scenario': [u'\u30b7\u30ca\u30ea\u30aa'],
'scenario_outline': [u'\u30b7\u30ca\u30ea\u30aa\u30a2\u30a6\u30c8\u30e9\u30a4\u30f3',
u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8',
u'\u30c6\u30f3\u30d7\u30ec',
u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec'],
'then': [u'*', u'\u306a\u3089\u3070<'],
'when': [u'*', u'\u3082\u3057<']},
'ko': {'and': [u'*', u'\uadf8\ub9ac\uace0<'],
'background': [u'\ubc30\uacbd'],
'but': [u'*', u'\ud558\uc9c0\ub9cc<', u'\ub2e8<'],
'examples': [u'\uc608'],
'feature': [u'\uae30\ub2a5'],
'given': [u'*', u'\uc870\uac74<', u'\uba3c\uc800<'],
'name': [u'Korean'],
'native': [u'\ud55c\uad6d\uc5b4'],
'scenario': [u'\uc2dc\ub098\ub9ac\uc624'],
'scenario_outline': [u'\uc2dc\ub098\ub9ac\uc624 \uac1c\uc694'],
'then': [u'*', u'\uadf8\ub7ec\uba74<'],
'when': [u'*', u'\ub9cc\uc77c<', u'\ub9cc\uc57d<']},
'lt': {'and': [u'*', u'Ir'],
'background': [u'Kontekstas'],
'but': [u'*', u'Bet'],
'examples': [u'Pavyzd\u017eiai', u'Scenarijai', u'Variantai'],
'feature': [u'Savyb\u0117'],
'given': [u'*', u'Duota'],
'name': [u'Lithuanian'],
'native': [u'lietuvi\u0173 kalba'],
'scenario': [u'Scenarijus'],
'scenario_outline': [u'Scenarijaus \u0161ablonas'],
'then': [u'*', u'Tada'],
'when': [u'*', u'Kai']},
'lu': {'and': [u'*', u'an', u'a'],
'background': [u'Hannergrond'],
'but': [u'*', u'awer', u'm\xe4'],
'examples': [u'Beispiller'],
'feature': [u'Funktionalit\xe9it'],
'given': [u'*', u'ugeholl'],
'name': [u'Luxemburgish'],
'native': [u'L\xebtzebuergesch'],
'scenario': [u'Szenario'],
'scenario_outline': [u'Plang vum Szenario'],
'then': [u'*', u'dann'],
'when': [u'*', u'wann']},
'lv': {'and': [u'*', u'Un'],
'background': [u'Konteksts', u'Situ\u0101cija'],
'but': [u'*', u'Bet'],
'examples': [u'Piem\u0113ri', u'Paraugs'],
'feature': [u'Funkcionalit\u0101te', u'F\u012b\u010da'],
'given': [u'*', u'Kad'],
'name': [u'Latvian'],
'native': [u'latvie\u0161u'],
'scenario': [u'Scen\u0101rijs'],
'scenario_outline': [u'Scen\u0101rijs p\u0113c parauga'],
'then': [u'*', u'Tad'],
'when': [u'*', u'Ja']},
'nl': {'and': [u'*', u'En'],
'background': [u'Achtergrond'],
'but': [u'*', u'Maar'],
'examples': [u'Voorbeelden'],
'feature': [u'Functionaliteit'],
'given': [u'*', u'Gegeven', u'Stel'],
'name': [u'Dutch'],
'native': [u'Nederlands'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Abstract Scenario'],
'then': [u'*', u'Dan'],
'when': [u'*', u'Als']},
'no': {'and': [u'*', u'Og'],
'background': [u'Bakgrunn'],
'but': [u'*', u'Men'],
'examples': [u'Eksempler'],
'feature': [u'Egenskap'],
'given': [u'*', u'Gitt'],
'name': [u'Norwegian'],
'native': [u'norsk'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenariomal', u'Abstrakt Scenario'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe5r']},
'pl': {'and': [u'*', u'Oraz', u'I'],
'background': [u'Za\u0142o\u017cenia'],
'but': [u'*', u'Ale'],
'examples': [u'Przyk\u0142ady'],
'feature': [u'W\u0142a\u015bciwo\u015b\u0107'],
'given': [u'*', u'Zak\u0142adaj\u0105c', u'Maj\u0105c'],
'name': [u'Polish'],
'native': [u'polski'],
'scenario': [u'Scenariusz'],
'scenario_outline': [u'Szablon scenariusza'],
'then': [u'*', u'Wtedy'],
'when': [u'*', u'Je\u017celi', u'Je\u015bli']},
'pt': {'and': [u'*', u'E'],
'background': [u'Contexto'],
'but': [u'*', u'Mas'],
'examples': [u'Exemplos'],
'feature': [u'Funcionalidade'],
'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'],
'name': [u'Portuguese'],
'native': [u'portugu\xeas'],
'scenario': [u'Cen\xe1rio', u'Cenario'],
'scenario_outline': [u'Esquema do Cen\xe1rio', u'Esquema do Cenario'],
'then': [u'*', u'Ent\xe3o', u'Entao'],
'when': [u'*', u'Quando']},
'ro': {'and': [u'*', u'Si', u'\u0218i', u'\u015ei'],
'background': [u'Context'],
'but': [u'*', u'Dar'],
'examples': [u'Exemple'],
'feature': [u'Functionalitate',
u'Func\u021bionalitate',
u'Func\u0163ionalitate'],
'given': [u'*',
u'Date fiind',
u'Dat fiind',
u'Dati fiind',
u'Da\u021bi fiind',
u'Da\u0163i fiind'],
'name': [u'Romanian'],
'native': [u'rom\xe2n\u0103'],
'scenario': [u'Scenariu'],
'scenario_outline': [u'Structura scenariu',
u'Structur\u0103 scenariu'],
'then': [u'*', u'Atunci'],
'when': [u'*', u'Cand', u'C\xe2nd']},
'ru': {'and': [u'*',
u'\u0418',
u'\u041a \u0442\u043e\u043c\u0443 \u0436\u0435'],
'background': [u'\u041f\u0440\u0435\u0434\u044b\u0441\u0442\u043e\u0440\u0438\u044f',
u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442'],
'but': [u'*', u'\u041d\u043e', u'\u0410'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u044b'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u044f',
u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b',
u'\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u043e'],
'given': [u'*',
u'\u0414\u043e\u043f\u0443\u0441\u0442\u0438\u043c',
u'\u0414\u0430\u043d\u043e',
u'\u041f\u0443\u0441\u0442\u044c'],
'name': [u'Russian'],
'native': [u'\u0440\u0443\u0441\u0441\u043a\u0438\u0439'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u044f'],
'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0433\u0434\u0430'],
'when': [u'*',
u'\u0415\u0441\u043b\u0438',
u'\u041a\u043e\u0433\u0434\u0430']},
'sk': {'and': [u'*', u'A'],
'background': [u'Pozadie'],
'but': [u'*', u'Ale'],
'examples': [u'Pr\xedklady'],
'feature': [u'Po\u017eiadavka'],
'given': [u'*', u'Pokia\u013e'],
'name': [u'Slovak'],
'native': [u'Slovensky'],
'scenario': [u'Scen\xe1r'],
'scenario_outline': [u'N\xe1\u010drt Scen\xe1ru'],
'then': [u'*', u'Tak'],
'when': [u'*', u'Ke\u010f']},
'sr-Cyrl': {'and': [u'*', u'\u0418'],
'background': [u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442',
u'\u041e\u0441\u043d\u043e\u0432\u0430',
u'\u041f\u043e\u0437\u0430\u0434\u0438\u043d\u0430'],
'but': [u'*', u'\u0410\u043b\u0438'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438',
u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442',
u'\u041c\u043e\u0433\u0443\u045b\u043d\u043e\u0441\u0442',
u'\u041e\u0441\u043e\u0431\u0438\u043d\u0430'],
'given': [u'*',
u'\u0417\u0430\u0434\u0430\u0442\u043e',
u'\u0417\u0430\u0434\u0430\u0442\u0435',
u'\u0417\u0430\u0434\u0430\u0442\u0438'],
'name': [u'Serbian'],
'native': [u'\u0421\u0440\u043f\u0441\u043a\u0438'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u043e',
u'\u041f\u0440\u0438\u043c\u0435\u0440'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0430',
u'\u0421\u043a\u0438\u0446\u0430',
u'\u041a\u043e\u043d\u0446\u0435\u043f\u0442'],
'then': [u'*', u'\u041e\u043d\u0434\u0430'],
'when': [u'*',
u'\u041a\u0430\u0434\u0430',
u'\u041a\u0430\u0434']},
'sr-Latn': {'and': [u'*', u'I'],
'background': [u'Kontekst', u'Osnova', u'Pozadina'],
'but': [u'*', u'Ali'],
'examples': [u'Primeri', u'Scenariji'],
'feature': [u'Funkcionalnost',
u'Mogu\u0107nost',
u'Mogucnost',
u'Osobina'],
'given': [u'*', u'Zadato', u'Zadate', u'Zatati'],
'name': [u'Serbian (Latin)'],
'native': [u'Srpski (Latinica)'],
'scenario': [u'Scenario', u'Primer'],
'scenario_outline': [u'Struktura scenarija',
u'Skica',
u'Koncept'],
'then': [u'*', u'Onda'],
'when': [u'*', u'Kada', u'Kad']},
'sv': {'and': [u'*', u'Och'],
'background': [u'Bakgrund'],
'but': [u'*', u'Men'],
'examples': [u'Exempel'],
'feature': [u'Egenskap'],
'given': [u'*', u'Givet'],
'name': [u'Swedish'],
'native': [u'Svenska'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Abstrakt Scenario', u'Scenariomall'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe4r']},
'tr': {'and': [u'*', u'Ve'],
'background': [u'Ge\xe7mi\u015f'],
'but': [u'*', u'Fakat', u'Ama'],
'examples': [u'\xd6rnekler'],
'feature': [u'\xd6zellik'],
'given': [u'*', u'Diyelim ki'],
'name': [u'Turkish'],
'native': [u'T\xfcrk\xe7e'],
'scenario': [u'Senaryo'],
'scenario_outline': [u'Senaryo tasla\u011f\u0131'],
'then': [u'*', u'O zaman'],
'when': [u'*', u'E\u011fer ki']},
'uk': {'and': [u'*',
u'\u0406',
u'\u0410 \u0442\u0430\u043a\u043e\u0436',
u'\u0422\u0430'],
'background': [u'\u041f\u0435\u0440\u0435\u0434\u0443\u043c\u043e\u0432\u0430'],
'but': [u'*', u'\u0410\u043b\u0435'],
'examples': [u'\u041f\u0440\u0438\u043a\u043b\u0430\u0434\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0456\u043e\u043d\u0430\u043b'],
'given': [u'*',
u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e',
u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e, \u0449\u043e',
u'\u041d\u0435\u0445\u0430\u0439',
u'\u0414\u0430\u043d\u043e'],
'name': [u'Ukrainian'],
'native': [u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0456\u0439'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0456\u044e'],
'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0434\u0456'],
'when': [u'*',
u'\u042f\u043a\u0449\u043e',
u'\u041a\u043e\u043b\u0438']},
'uz': {'and': [u'*', u'\u0412\u0430'],
'background': [u'\u0422\u0430\u0440\u0438\u0445'],
'but': [u'*',
u'\u041b\u0435\u043a\u0438\u043d',
u'\u0411\u0438\u0440\u043e\u043a',
u'\u0410\u043c\u043c\u043e'],
'examples': [u'\u041c\u0438\u0441\u043e\u043b\u043b\u0430\u0440'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b'],
'given': [u'*', u'\u0410\u0433\u0430\u0440'],
'name': [u'Uzbek'],
'native': [u'\u0423\u0437\u0431\u0435\u043a\u0447\u0430'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439 \u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430\u0441\u0438'],
'then': [u'*', u'\u0423\u043d\u0434\u0430'],
'when': [u'*', u'\u0410\u0433\u0430\u0440']},
'vi': {'and': [u'*', u'V\xe0'],
'background': [u'B\u1ed1i c\u1ea3nh'],
'but': [u'*', u'Nh\u01b0ng'],
'examples': [u'D\u1eef li\u1ec7u'],
'feature': [u'T\xednh n\u0103ng'],
'given': [u'*', u'Bi\u1ebft', u'Cho'],
'name': [u'Vietnamese'],
'native': [u'Ti\u1ebfng Vi\u1ec7t'],
'scenario': [u'T\xecnh hu\u1ed1ng', u'K\u1ecbch b\u1ea3n'],
'scenario_outline': [u'Khung t\xecnh hu\u1ed1ng',
u'Khung k\u1ecbch b\u1ea3n'],
'then': [u'*', u'Th\xec'],
'when': [u'*', u'Khi']},
'zh-CN': {'and': [u'*', u'\u800c\u4e14<'],
'background': [u'\u80cc\u666f'],
'but': [u'*', u'\u4f46\u662f<'],
'examples': [u'\u4f8b\u5b50'],
'feature': [u'\u529f\u80fd'],
'given': [u'*', u'\u5047\u5982<'],
'name': [u'Chinese simplified'],
'native': [u'\u7b80\u4f53\u4e2d\u6587'],
'scenario': [u'\u573a\u666f'],
'scenario_outline': [u'\u573a\u666f\u5927\u7eb2'],
'then': [u'*', u'\u90a3\u4e48<'],
'when': [u'*', u'\u5f53<']},
'zh-TW': {'and': [u'*', u'\u800c\u4e14<', u'\u4e26\u4e14<'],
'background': [u'\u80cc\u666f'],
'but': [u'*', u'\u4f46\u662f<'],
'examples': [u'\u4f8b\u5b50'],
'feature': [u'\u529f\u80fd'],
'given': [u'*', u'\u5047\u8a2d<'],
'name': [u'Chinese traditional'],
'native': [u'\u7e41\u9ad4\u4e2d\u6587'],
'scenario': [u'\u5834\u666f', u'\u5287\u672c'],
'scenario_outline': [u'\u5834\u666f\u5927\u7db1',
u'\u5287\u672c\u5927\u7db1'],
'then': [u'*', u'\u90a3\u9ebc<'],
'when': [u'*', u'\u7576<']}}
| bsd-2-clause | -8,859,400,245,196,874,000 | 43.820847 | 149 | 0.470712 | false |
ingwinlu/simpleMediaCenter | test/test_AutoPlayerSelect.py | 1 | 2359 | from simpleMediaCenter.interface.Interface import InterfaceListable
from simpleMediaCenter.player.Omxplayer import Omxplayer
from simpleMediaCenter.player.Twitchplayer import Twitchplayer
from simpleMediaCenter.player.Youtubeplayer import Youtubeplayer
from simpleMediaCenter.browser.Browser import *
import logging
import unittest
class TestAutoPlayerSelect(unittest.TestCase):
def test_auto_select(self):
logging.info("creating player objects")
omxplayer = Omxplayer("-o both")
twitchplayer = Twitchplayer("-o both")
youtubeplayer = Youtubeplayer("-o both")
playerlist = InterfaceListable([omxplayer,twitchplayer, youtubeplayer])
logging.info("creating browser objects")
fileBrowser = FileBrowser()
twitchWinluBrowser = TwitchBrowser('winlu')
twitchKillerkakaduBrowser = TwitchBrowser('killerkakadu')
youtubeBrowser = YoutubeBrowser()
browserList = InterfaceListable([fileBrowser,twitchWinluBrowser,twitchKillerkakaduBrowser,youtubeBrowser])
browserList.setActive(0)
for supportedPlayer in browserList.getActive().getSupportedPlayers():
id = playerlist.getIDfromName(supportedPlayer)
if (id is not None):
break
self.assertEqual(id,0)
browserList.setActive(1)
for supportedPlayer in browserList.getActive().getSupportedPlayers():
id = playerlist.getIDfromName(supportedPlayer)
if (id is not None):
break
self.assertEqual(id,1)
browserList.setActive(2)
for supportedPlayer in browserList.getActive().getSupportedPlayers():
id = playerlist.getIDfromName(supportedPlayer)
if (id is not None):
break
self.assertEqual(id,1)
browserList.setActive(3)
for supportedPlayer in browserList.getActive().getSupportedPlayers():
id = playerlist.getIDfromName(supportedPlayer)
if (id is not None):
break
self.assertEqual(id,2)
def suite(self):
testSuite = unittest.TestSuite()
testSuite.addTest(unittest.makeSuite(TestAutoPlayerSelect))
return testSuite
| gpl-2.0 | -2,013,993,086,357,953,300 | 35.859375 | 114 | 0.64858 | false |
matthiaskrgr/cppcheck | addons/naming.py | 1 | 2383 | #!/usr/bin/env python
#
# cppcheck addon for naming conventions
#
# Example usage (variable name must start with lowercase, function name must start with uppercase):
# $ cppcheck --dump path-to-src/
# $ python addons/naming.py --var='[a-z].*' --function='[A-Z].*' path-to-src/*.dump
#
import cppcheckdata
import sys
import re
RE_VARNAME = None
RE_PRIVATE_MEMBER_VARIABLE = None
RE_FUNCTIONNAME = None
for arg in sys.argv[1:]:
if arg[:6] == '--var=':
RE_VARNAME = arg[6:]
elif arg.startswith('--private-member-variable='):
RE_PRIVATE_MEMBER_VARIABLE = arg[arg.find('=')+1:]
elif arg[:11] == '--function=':
RE_FUNCTIONNAME = arg[11:]
FoundError = False
def reportError(token, severity, msg):
global FoundError
FoundError = True
sys.stderr.write(
'[' + token.file + ':' + str(token.linenr) + '] (' + severity + ') naming.py: ' + msg + '\n')
for arg in sys.argv[1:]:
if not arg[-5:] == '.dump':
continue
print('Checking ' + arg + '...')
data = cppcheckdata.parsedump(arg)
for cfg in data.configurations:
if len(data.configurations) > 1:
print('Checking ' + arg + ', config "' + cfg.name + '"...')
if RE_VARNAME:
for var in cfg.variables:
res = re.match(RE_VARNAME, var.nameToken.str)
if not res:
reportError(var.typeStartToken, 'style', 'Variable ' +
var.nameToken.str + ' violates naming convention')
if RE_PRIVATE_MEMBER_VARIABLE:
for var in cfg.variables:
if (var.access is None) or var.access != 'Private':
continue
res = re.match(RE_PRIVATE_MEMBER_VARIABLE, var.nameToken.str)
if not res:
reportError(var.typeStartToken, 'style', 'Private member variable ' +
var.nameToken.str + ' violates naming convention')
if RE_FUNCTIONNAME:
for scope in cfg.scopes:
if scope.type == 'Function':
res = re.match(RE_FUNCTIONNAME, scope.className)
if not res:
reportError(
scope.bodyStart, 'style', 'Function ' + scope.className + ' violates naming convention')
if FoundError:
print('FoundError')
sys.exit(1)
| gpl-3.0 | -6,974,778,811,751,254,000 | 34.567164 | 116 | 0.5577 | false |
scbzyhx/sdn_access_network | log_info.py | 1 | 1618 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License
import logging
from ryu.base import app_manager
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from events import FlowRateEvent,FlowEvent
class LOG_INFO(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(LOG_INFO, self).__init__(*args, **kwargs)
self.rate_logger = open("rate.log",'wr')
self.flow_logger = open("flow.log",'wr')
if self.CONF.enable_debugger:
self.logger.setLevel(logging.DEBUG)
@set_ev_cls(FlowRateEvent)
def flowrate_handler(self, ev):
self.rate_logger.write("%s\n" % ev)
self.rate_logger.flush()
@set_ev_cls(FlowEvent)
def flowevent_handler(self,ev):
self.flow_logger.write("%s\n" % ev)
self.flow_logger.flush()
def __del__(self):
if self.rate_logger is not None:
self.rate_logger.close()
if self.flow_logger is not None:
self.flow_logger.close()
| gpl-2.0 | -2,271,025,446,576,273,400 | 33.425532 | 69 | 0.68047 | false |
enthought/traitsgui | enthought/pyface/action/action_item.py | 1 | 4849 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enth373ought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" An action manager item that represents an actual action. """
# Enthought library imports.
from enthought.traits.api import Any, Instance, List, Property, Str
# Local imports.
from action import Action
from action_manager_item import ActionManagerItem
# Import the toolkit specific versions of the internal classes.
from enthought.pyface.toolkit import toolkit_object
_MenuItem = toolkit_object('action.action_item:_MenuItem')
_Tool = toolkit_object('action.action_item:_Tool')
_PaletteTool = toolkit_object('action.action_item:_PaletteTool')
class ActionItem(ActionManagerItem):
""" An action manager item that represents an actual action. """
#### 'ActionManagerItem' interface ########################################
# The item's unique identifier ('unique' in this case means unique within
# its group).
id = Property(Str)
#### 'ActionItem' interface ###############################################
# The action!
action = Instance(Action)
# The toolkit specific control created for this item.
control = Any
# The toolkit specific Id of the control created for this item.
#
# We have to keep the Id as well as the control because wx tool bar tools
# are created as 'wxObjectPtr's which do not have Ids, and the Id is
# required to manipulate the state of a tool via the tool bar 8^(
# FIXME v3: Why is this part of the public interface?
control_id = Any
#### Private interface ####################################################
# All of the internal instances that wrap this item.
_wrappers = List(Any)
###########################################################################
# 'ActionManagerItem' interface.
###########################################################################
#### Trait properties #####################################################
def _get_id(self):
""" Return's the item's Id. """
return self.action.id
#### Trait change handlers ################################################
def _enabled_changed(self, trait_name, old, new):
""" Static trait change handler. """
self.action.enabled = new
return
def _visible_changed(self, trait_name, old, new):
""" Static trait change handler. """
self.action.visible = True
return
###########################################################################
# 'ActionItem' interface.
###########################################################################
def add_to_menu(self, parent, menu, controller):
""" Adds the item to a menu. """
if (controller is None) or controller.can_add_to_menu(self.action):
wrapper = _MenuItem(parent, menu, self, controller)
# fixme: Martin, who uses this information?
if controller is None:
self.control = wrapper.control
self.control_id = wrapper.control_id
self._wrappers.append(wrapper)
return
def add_to_toolbar(self, parent, tool_bar, image_cache, controller,
show_labels=True):
""" Adds the item to a tool bar. """
if (controller is None) or controller.can_add_to_toolbar(self.action):
wrapper = _Tool(
parent, tool_bar, image_cache, self, controller, show_labels
)
# fixme: Martin, who uses this information?
if controller is None:
self.control = wrapper.control
self.control_id = wrapper.control_id
self._wrappers.append(wrapper)
return
def add_to_palette(self, tool_palette, image_cache, show_labels=True):
""" Adds the item to a tool palette. """
wrapper = _PaletteTool(tool_palette, image_cache, self, show_labels)
self._wrappers.append(wrapper)
return
def destroy(self):
""" Called when the action is no longer required.
By default this method calls 'destroy' on the action itself.
"""
self.action.destroy()
return
#### EOF ######################################################################
| bsd-3-clause | 2,791,425,329,490,104,300 | 32.673611 | 79 | 0.541761 | false |
simleo/pydoop-features | pyfeatures/app/deserialize.py | 1 | 2362 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Deserialize BioImgPlane records.
"""
import sys
import os
import warnings
from contextlib import closing
import errno
try:
from pyavroc import AvroFileReader
except ImportError:
from pyfeatures.pyavroc_emu import AvroFileReader
warnings.warn("pyavroc not found, using standard avro lib\n")
import numpy as np
from libtiff import TIFF
from pyfeatures.bioimg import BioImgPlane
# no schema needed for deserialization
def iterplanes(avro_file):
with open(avro_file, 'rb') as f:
reader = AvroFileReader(f)
for r in reader:
yield BioImgPlane(r)
def run(logger, args, extra_argv=None):
try:
os.makedirs(args.out_dir)
except OSError as e:
if e.errno != errno.EEXIST:
sys.exit('Cannot create output dir: %s' % e)
for p in iterplanes(args.avro_file):
pixels = p.get_xy()
out_tag = '%s-z%04d-c%04d-t%04d' % (p.name, p.z, p.c, p.t)
logger.info("writing plane %s", out_tag)
if args.img:
out_fn = os.path.join(args.out_dir, '%s.tif' % out_tag)
with closing(TIFF.open(out_fn, mode="w")) as fo:
fo.write_image(pixels)
else:
out_fn = os.path.join(args.out_dir, '%s.npy' % out_tag)
np.save(out_fn, pixels)
return 0
def add_parser(subparsers):
parser = subparsers.add_parser("deserialize", description=__doc__)
parser.add_argument('avro_file', metavar='AVRO_FILE')
parser.add_argument('out_dir', metavar='OUT_DIR')
parser.add_argument('--img', action='store_true',
help='write images instead of .npy dumps')
parser.set_defaults(func=run)
return parser
| apache-2.0 | 2,729,805,358,433,821,700 | 29.675325 | 77 | 0.664691 | false |
lakshmi-kannan/st2 | st2common/st2common/models/api/action.py | 1 | 24297 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from st2common.util import isotime
from st2common.util import schema as util_schema
from st2common import log as logging
from st2common.constants.pack import DEFAULT_PACK_NAME
from st2common.models.api.base import BaseAPI
from st2common.models.api.base import APIUIDMixin
from st2common.models.api.tag import TagsHelper
from st2common.models.api.notification import (NotificationSubSchemaAPI, NotificationsHelper)
from st2common.models.db.action import ActionDB
from st2common.models.db.actionalias import ActionAliasDB
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.runner import RunnerTypeDB
from st2common.constants.action import LIVEACTION_STATUSES
from st2common.models.system.common import ResourceReference
__all__ = [
'ActionAPI',
'ActionCreateAPI',
'LiveActionAPI',
'LiveActionCreateAPI',
'RunnerTypeAPI',
'AliasExecutionAPI',
'ActionAliasAPI',
'ActionAliasMatchAPI'
]
LOG = logging.getLogger(__name__)
class RunnerTypeAPI(BaseAPI):
"""
The representation of an RunnerType in the system. An RunnerType
has a one-to-one mapping to a particular ActionRunner implementation.
"""
model = RunnerTypeDB
schema = {
"title": "Runner",
"description": "A handler for a specific type of actions.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action runner.",
"type": "string",
"default": None
},
"uid": {
"type": "string"
},
"name": {
"description": "The name of the action runner.",
"type": "string",
"required": True
},
"description": {
"description": "The description of the action runner.",
"type": "string"
},
"enabled": {
"description": "Enable or disable the action runner.",
"type": "boolean",
"default": True
},
"runner_module": {
"description": "The python module that implements the "
"action runner for this type.",
"type": "string",
"required": True
},
"query_module": {
"description": "The python module that implements the "
"results tracker (querier) for the runner.",
"type": "string",
"required": False
},
"runner_parameters": {
"description": "Input parameters for the action runner.",
"type": "object",
"patternProperties": {
"^\w+$": util_schema.get_action_parameters_schema()
},
'additionalProperties': False
}
},
"additionalProperties": False
}
def __init__(self, **kw):
# Ideally, you should not do that. You should not redefine __init__ to validate and then set
# default values, instead you should define defaults in schema and use BaseAPI __init__
# validator to unwrap them. The problem here is that draft schema also contains default
# values and we don't want them to be unwrapped at the same time. I've tried to remove the
# default values from draft schema, but, either because of a bug or some weird intention, it
# has continued to resolve $ref'erenced properties against the initial draft schema, not the
# modified one
for key, value in kw.items():
setattr(self, key, value)
if not hasattr(self, 'runner_parameters'):
setattr(self, 'runner_parameters', dict())
@classmethod
def to_model(cls, runner_type):
name = runner_type.name
description = runner_type.description
enabled = getattr(runner_type, 'enabled', True)
runner_module = str(runner_type.runner_module)
runner_parameters = getattr(runner_type, 'runner_parameters', dict())
query_module = getattr(runner_type, 'query_module', None)
model = cls.model(name=name, description=description, enabled=enabled,
runner_module=runner_module, runner_parameters=runner_parameters,
query_module=query_module)
return model
class ActionAPI(BaseAPI, APIUIDMixin):
"""
The system entity that represents a Stack Action/Automation in the system.
"""
model = ActionDB
schema = {
"title": "Action",
"description": "An activity that happens as a response to the external event.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action.",
"type": "string"
},
"ref": {
"description": "System computed user friendly reference for the action. \
Provided value will be overridden by computed value.",
"type": "string"
},
"uid": {
"type": "string"
},
"name": {
"description": "The name of the action.",
"type": "string",
"required": True
},
"description": {
"description": "The description of the action.",
"type": "string"
},
"enabled": {
"description": "Enable or disable the action from invocation.",
"type": "boolean",
"default": True
},
"runner_type": {
"description": "The type of runner that executes the action.",
"type": "string",
"required": True
},
"entry_point": {
"description": "The entry point for the action.",
"type": "string",
"default": ""
},
"pack": {
"description": "The content pack this action belongs to.",
"type": "string",
"default": DEFAULT_PACK_NAME
},
"parameters": {
"description": "Input parameters for the action.",
"type": "object",
"patternProperties": {
"^\w+$": util_schema.get_action_parameters_schema()
},
'additionalProperties': False,
"default": {}
},
"tags": {
"description": "User associated metadata assigned to this object.",
"type": "array",
"items": {"type": "object"}
},
"notify": {
"description": "Notification settings for action.",
"type": "object",
"properties": {
"on-complete": NotificationSubSchemaAPI,
"on-failure": NotificationSubSchemaAPI,
"on-success": NotificationSubSchemaAPI
},
"additionalProperties": False
}
},
"additionalProperties": False
}
def __init__(self, **kw):
for key, value in kw.items():
setattr(self, key, value)
if not hasattr(self, 'parameters'):
setattr(self, 'parameters', dict())
if not hasattr(self, 'entry_point'):
setattr(self, 'entry_point', '')
@classmethod
def from_model(cls, model, mask_secrets=False):
action = cls._from_model(model)
action['runner_type'] = action['runner_type']['name']
action['tags'] = TagsHelper.from_model(model.tags)
if getattr(model, 'notify', None):
action['notify'] = NotificationsHelper.from_model(model.notify)
return cls(**action)
@classmethod
def to_model(cls, action):
name = getattr(action, 'name', None)
description = getattr(action, 'description', None)
enabled = bool(getattr(action, 'enabled', True))
entry_point = str(action.entry_point)
pack = str(action.pack)
runner_type = {'name': str(action.runner_type)}
parameters = getattr(action, 'parameters', dict())
tags = TagsHelper.to_model(getattr(action, 'tags', []))
ref = ResourceReference.to_string_reference(pack=pack, name=name)
if getattr(action, 'notify', None):
notify = NotificationsHelper.to_model(action.notify)
else:
# We use embedded document model for ``notify`` in action model. If notify is
# set notify to None, Mongoengine interprets ``None`` as unmodified
# field therefore doesn't delete the embedded document. Therefore, we need
# to use an empty document.
notify = NotificationsHelper.to_model({})
model = cls.model(name=name, description=description, enabled=enabled,
entry_point=entry_point, pack=pack, runner_type=runner_type,
tags=tags, parameters=parameters, notify=notify,
ref=ref)
return model
class ActionCreateAPI(ActionAPI, APIUIDMixin):
"""
API model for create action operation.
"""
schema = copy.deepcopy(ActionAPI.schema)
schema['properties']['data_files'] = {
'description': 'Optional action script and data files which are written to the filesystem.',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'file_path': {
'type': 'string',
'required': True
},
'content': {
'type': 'string',
'required': True
},
},
'additionalProperties': False
},
'default': []
}
class ActionUpdateAPI(ActionAPI, APIUIDMixin):
"""
API model for update action operation.
"""
schema = copy.deepcopy(ActionCreateAPI.schema)
del schema['properties']['pack']['default']
class LiveActionAPI(BaseAPI):
"""The system entity that represents the execution of a Stack Action/Automation
in the system.
"""
model = LiveActionDB
schema = {
"title": "liveaction",
"description": "An execution of an action.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action execution.",
"type": "string"
},
"status": {
"description": "The current status of the action execution.",
"type": "string",
"enum": LIVEACTION_STATUSES
},
"start_timestamp": {
"description": "The start time when the action is executed.",
"type": "string",
"pattern": isotime.ISO8601_UTC_REGEX
},
"end_timestamp": {
"description": "The timestamp when the action has finished.",
"type": "string",
"pattern": isotime.ISO8601_UTC_REGEX
},
"action": {
"description": "Reference to the action to be executed.",
"type": "string",
"required": True
},
"parameters": {
"description": "Input parameters for the action.",
"type": "object",
"patternProperties": {
"^\w+$": {
"anyOf": [
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"},
{"type": "null"}
]
}
},
'additionalProperties': False
},
"result": {
"anyOf": [{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"}]
},
"context": {
"type": "object"
},
"callback": {
"type": "object"
},
"runner_info": {
"type": "object"
},
"notify": {
"description": "Notification settings for liveaction.",
"type": "object",
"properties": {
"on-complete": NotificationSubSchemaAPI,
"on-failure": NotificationSubSchemaAPI,
"on-success": NotificationSubSchemaAPI
},
"additionalProperties": False
}
},
"additionalProperties": False
}
@classmethod
def from_model(cls, model, mask_secrets=False):
doc = super(cls, cls)._from_model(model, mask_secrets=mask_secrets)
if model.start_timestamp:
doc['start_timestamp'] = isotime.format(model.start_timestamp, offset=False)
if model.end_timestamp:
doc['end_timestamp'] = isotime.format(model.end_timestamp, offset=False)
if getattr(model, 'notify', None):
doc['notify'] = NotificationsHelper.from_model(model.notify)
return cls(**doc)
@classmethod
def to_model(cls, live_action):
action = live_action.action
if getattr(live_action, 'start_timestamp', None):
start_timestamp = isotime.parse(live_action.start_timestamp)
else:
start_timestamp = None
if getattr(live_action, 'end_timestamp', None):
end_timestamp = isotime.parse(live_action.end_timestamp)
else:
end_timestamp = None
status = getattr(live_action, 'status', None)
parameters = getattr(live_action, 'parameters', dict())
context = getattr(live_action, 'context', dict())
callback = getattr(live_action, 'callback', dict())
result = getattr(live_action, 'result', None)
if getattr(live_action, 'notify', None):
notify = NotificationsHelper.to_model(live_action.notify)
else:
notify = None
model = cls.model(action=action,
start_timestamp=start_timestamp, end_timestamp=end_timestamp,
status=status, parameters=parameters, context=context,
callback=callback, result=result, notify=notify)
return model
class LiveActionCreateAPI(LiveActionAPI):
"""
API model for action execution create (run action) operations.
"""
schema = copy.deepcopy(LiveActionAPI.schema)
schema['properties']['user'] = {
'description': 'User context under which action should run (admins only)',
'type': 'string',
'default': None
}
class ActionExecutionStateAPI(BaseAPI):
"""
System entity that represents state of an action in the system.
This is used only in tests for now.
"""
model = ActionExecutionStateDB
schema = {
"title": "ActionExecutionState",
"description": "Execution state of an action.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action execution state.",
"type": "string"
},
"execution_id": {
"type": "string",
"description": "ID of the action execution.",
"required": True
},
"query_context": {
"type": "object",
"description": "query context to be used by querier.",
"required": True
},
"query_module": {
"type": "string",
"description": "Name of the query module.",
"required": True
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, state):
execution_id = state.execution_id
query_module = state.query_module
query_context = state.query_context
model = cls.model(execution_id=execution_id, query_module=query_module,
query_context=query_context)
return model
class ActionAliasAPI(BaseAPI, APIUIDMixin):
"""
Alias for an action in the system.
"""
model = ActionAliasDB
schema = {
"title": "ActionAlias",
"description": "Alias for an action.",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for the action alias.",
"type": "string"
},
"ref": {
"description": "System computed user friendly reference for the alias. \
Provided value will be overridden by computed value.",
"type": "string"
},
"uid": {
"type": "string"
},
"name": {
"type": "string",
"description": "Name of the action alias.",
"required": True
},
"pack": {
"description": "The content pack this actionalias belongs to.",
"type": "string",
"required": True
},
"description": {
"type": "string",
"description": "Description of the action alias.",
"default": None
},
"enabled": {
"description": "Flag indicating of action alias is enabled.",
"type": "boolean",
"default": True
},
"action_ref": {
"type": "string",
"description": "Reference to the aliased action.",
"required": True
},
"formats": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"display": {"type": "string"},
"representation": {
"type": "array",
"items": {"type": "string"}
}
}
}
]
},
"description": "Possible parameter format."
},
"ack": {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"format": {"type": "string"},
"extra": {"type": "object"},
"append_url": {"type": "boolean"}
},
"description": "Acknowledgement message format."
},
"result": {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"format": {"type": "string"},
"extra": {"type": "object"}
},
"description": "Execution message format."
},
"extra": {
"type": "object",
"description": "Extra parameters, usually adapter-specific."
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, alias):
name = alias.name
description = getattr(alias, 'description', None)
pack = alias.pack
ref = ResourceReference.to_string_reference(pack=pack, name=name)
enabled = getattr(alias, 'enabled', True)
action_ref = alias.action_ref
formats = alias.formats
ack = getattr(alias, 'ack', None)
result = getattr(alias, 'result', None)
extra = getattr(alias, 'extra', None)
model = cls.model(name=name, description=description, pack=pack, ref=ref,
enabled=enabled, action_ref=action_ref, formats=formats,
ack=ack, result=result, extra=extra)
return model
class AliasExecutionAPI(BaseAPI):
"""
Alias for an action in the system.
"""
model = None
schema = {
"title": "AliasExecution",
"description": "Execution of an ActionAlias.",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the action alias which matched.",
"required": True
},
"format": {
"type": "string",
"description": "Format string which matched.",
"required": True
},
"command": {
"type": "string",
"description": "Command used in chat.",
"required": True
},
"user": {
"type": "string",
"description": "User that requested the execution.",
"default": "channel" # TODO: This value doesnt get set
},
"source_channel": {
"type": "string",
"description": "Channel from which the execution was requested. This is not the \
channel as defined by the notification system.",
"required": True
},
"notification_channel": {
"type": "string",
"description": "StackStorm notification channel to use to respond.",
"required": False
},
"notification_route": {
"type": "string",
"description": "StackStorm notification route to use to respond.",
"required": False
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, aliasexecution):
# probably should be unsupported
raise NotImplementedError()
@classmethod
def from_model(cls, aliasexecution):
raise NotImplementedError()
class ActionAliasMatchAPI(BaseAPI):
"""
API model used for alias match API endpoint.
"""
model = None
schema = {
"title": "ActionAliasMatchAPI",
"description": "ActionAliasMatchAPI.",
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "Command string to try to match the aliases against.",
"required": True
}
},
"additionalProperties": False
}
@classmethod
def to_model(cls, aliasexecution):
raise NotImplementedError()
@classmethod
def from_model(cls, aliasexecution):
raise NotImplementedError()
| apache-2.0 | 578,311,950,099,190,000 | 34.52193 | 100 | 0.503519 | false |
cagriulas/algorithm-analysis-17 | w3/complexity_graphic.py | 1 | 3297 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
import time
def maxsubsumOn(vector):
max_ending_here = max_so_far = vector[0]
for x in vector[1:]:
max_ending_here = max(x, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
def maxsubsumOn3(vector):
maxsum = 0
vectorlen = len(vector)
for i in range(vectorlen):
for j in range(i,vectorlen):
thissum=0
for k in range (i,j):
thissum=thissum+vector[k]
if(thissum>maxsum):
maxsum=thissum
return maxsum
def find_max_triple(a,b,c):
if a>b:
if b>c:
return a
elif a>c:
return a
else:
return c
elif b>c:
return b
else:
return c
def find_middle(list):
middle=int(len(list)/2)
sum_left_max=0
sum_left=0
for i in range(middle-1,-1,-1):
sum_left=sum_left+list[i]
if sum_left>sum_left_max:
sum_left_max=sum_left
sum_right_max=0
sum_right=0
for i in range(middle,len(list)):
sum_right=sum_right+list[i]
if sum_right>sum_right_max:
sum_right_max=sum_right
return sum_left_max+sum_right_max
def maxsubsumOnlogn(array):
if(len(array)<2):
return sum(array)
else:
middle=int(len(array)/2)
sum_left=maxsubsumOnlogn(array[0:middle - 1])
sum_right=maxsubsumOnlogn(array[middle:])
sum_middle=find_middle(array)
return find_max_triple(sum_left,sum_right,sum_middle)
if __name__ == '__main__':
nib = random.sample(range(-500, 500), k=100)
nonib = random.sample(range(-5000, 5000), k=500)
zuybin = random.sample(range(-50000, 50000), k=1000)
noylim = random.sample(range(-500000, 500000), k=2000)
circle = {'nib': nib,
'nonib': nonib,
'zuybin': zuybin,
'noylim': noylim}
times = {}
for key in circle:
print(key)
print(circle[key], times, time.time())
print(key)
start = time.time()
maxsubsumOnlogn(circle[key])
times['nlogn' + key] = time.time() - start
# start = time.time()
# maxsubsumOn3(circle[key])
# times['n3' + key] = time.time() - start
start = time.time()
maxsubsumOn(circle[key])
times['n' + key] = time.time() - start
x = np.array([100, 500, 1000, 2000])
# n3 = np.array([times['n3nib'],
# times['n3nonib'],
# times['n3zuybin'],
# times['n3noylim']])
nlogn = np.array([times['nlognnib'],
times['nlognnonib'],
times['nlognzuybin'],
times['nlognnoylim']])
n = np.array([times['nnib'],
times['nnonib'],
times['nzuybin'],
times['nnoylim']])
# plt.plot(x, n3*100)
plt.plot(x, nlogn*100)
plt.plot(x, n * 100)
plt.xticks(x)
plt.xlabel('Dizi uzunluğu')
plt.ylabel('Zaman (milisaniye)')
plt.legend(['n3', 'nlogn', 'n'], loc='upper left')
plt.savefig('foo.png', dpi=1000) | unlicense | -8,398,150,691,713,920,000 | 25.376 | 58 | 0.533374 | false |
mvaled/sentry | src/sentry/south_migrations/0504_fix_state.py | 1 | 139898 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'sentry.activity': {
'Meta': {'unique_together': '()', 'object_name': 'Activity', 'index_together': '()'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alertrule': {
'Meta': {'unique_together': "(('organization', 'name'),)", 'object_name': 'AlertRule', 'index_together': '()'},
'aggregation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'alert_threshold': ('django.db.models.fields.IntegerField', [], {}),
'dataset': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True', 'db_index': 'False'}),
'query': ('django.db.models.fields.TextField', [], {}),
'query_subscriptions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'query_subscriptions'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRuleQuerySubscription']", 'to': "orm['sentry.QuerySubscription']"}),
'resolution': ('django.db.models.fields.IntegerField', [], {}),
'resolve_threshold': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'threshold_period': ('django.db.models.fields.IntegerField', [], {}),
'threshold_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'time_window': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.alertrulequerysubscription': {
'Meta': {'unique_together': '()', 'object_name': 'AlertRuleQuerySubscription', 'index_together': '()'},
'alert_rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AlertRule']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'query_subscription': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.QuerySubscription']", 'unique': 'True'})
},
'sentry.apiapplication': {
'Meta': {'unique_together': '()', 'object_name': 'ApiApplication', 'index_together': '()'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'632724aa8a6747ac8820b551b4e9cf78dd03d0733a5245eb99aa0b0007915d59'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'cdecdb1341e047278e36a677d56c524c0cf0496928d848f49d92285f200e2297'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Viable Leopard'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'unique_together': '()', 'object_name': 'ApiGrant', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'f361c0810f9542ce8fb51e4035979100'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 9, 12, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'unique_together': '()', 'object_name': 'ApiKey', 'index_together': '()'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'unique_together': '()', 'object_name': 'ApiToken', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 10, 12, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'4949a993e47e43f9b1b953de3904efb036b4efe5bbd14dcea3e5d35c7f5c510a'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'d2b53d6a4e9642d7aedd311f28b31fb9fdb08e4a509f45ae86fca706d9224865'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'", 'index_together': '()'},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'unique_together': '()', 'object_name': 'AuditLogEntry', 'index_together': '()'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'", 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity', 'index_together': '()'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'unique_together': '()', 'object_name': 'AuthProvider', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'unique_together': '()', 'object_name': 'Broadcast', 'index_together': '()'},
'cta': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 9, 19, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen', 'index_together': '()'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor', 'index_together': '()'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'unique_together': '()', 'object_name': 'Counter', 'db_table': "'sentry_projectcounter'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dashboard': {
'Meta': {'unique_together': "(('organization', 'title'),)", 'object_name': 'Dashboard', 'index_together': '()'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.deletedorganization': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedOrganization', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedProject', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedTeam', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'unique_together': '()', 'object_name': 'Deploy', 'index_together': '()'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.discoversavedquery': {
'Meta': {'unique_together': '()', 'object_name': 'DiscoverSavedQuery', 'index_together': '()'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}),
'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.discoversavedqueryproject': {
'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject', 'index_together': '()'},
'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.email': {
'Meta': {'unique_together': '()', 'object_name': 'Email', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject', 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventattachment': {
'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.externalissue': {
'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption', 'index_together': '()'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'unique_together': '()', 'object_name': 'File', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'unique_together': '()', 'object_name': 'FileBlob', 'index_together': '()'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'), ('project', 'id'))"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'unique_together': '()', 'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'", 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution', 'index_together': '()'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread', 'index_together': '()'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "(('group', 'environment'),)", 'object_name': 'GroupEnvironment', 'index_together': "(('environment', 'first_release'),)"},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.DO_NOTHING'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'unique_together': "(('organization_id', 'previous_short_id', 'previous_project_slug'),)", 'object_name': 'GroupRedirect', 'index_together': '()'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'}),
'previous_project_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'previous_short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease', 'index_together': '()'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'unique_together': '()', 'object_name': 'GroupResolution', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'unique_together': '()', 'object_name': 'GroupShare', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'23a7c5b16b854d838db1285f502ca2ed'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'unique_together': '()', 'object_name': 'GroupSnooze', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey', 'index_together': '()'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'unique_together': '()', 'object_name': 'GroupTombstone', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity', 'index_together': '()'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.incident': {
'Meta': {'unique_together': "(('organization', 'identifier'),)", 'object_name': 'Incident', 'index_together': '()'},
'alert_rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.AlertRule']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_detected': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'detection_uuid': ('sentry.db.models.fields.uuid.UUIDField', [], {'max_length': '32', 'null': 'True', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'incidents'", 'symmetrical': 'False', 'through': "orm['sentry.IncidentGroup']", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.IntegerField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'incidents'", 'symmetrical': 'False', 'through': "orm['sentry.IncidentProject']", 'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'sentry.incidentactivity': {
'Meta': {'unique_together': '()', 'object_name': 'IncidentActivity', 'index_together': '()'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_stats_snapshot': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.TimeSeriesSnapshot']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'previous_value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.incidentgroup': {
'Meta': {'unique_together': "(('group', 'incident'),)", 'object_name': 'IncidentGroup', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'db_index': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"})
},
'sentry.incidentproject': {
'Meta': {'unique_together': "(('project', 'incident'),)", 'object_name': 'IncidentProject', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'db_index': 'False'})
},
'sentry.incidentseen': {
'Meta': {'unique_together': "(('user', 'incident'),)", 'object_name': 'IncidentSeen', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.incidentsnapshot': {
'Meta': {'unique_together': '()', 'object_name': 'IncidentSnapshot', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_stats_snapshot': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.TimeSeriesSnapshot']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sentry.Incident']", 'unique': 'True'}),
'total_events': ('django.db.models.fields.IntegerField', [], {}),
'unique_users': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.incidentsubscription': {
'Meta': {'unique_together': "(('incident', 'user'),)", 'object_name': 'IncidentSubscription', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']", 'db_index': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.incidentsuspectcommit': {
'Meta': {'unique_together': "(('incident', 'commit'),)", 'object_name': 'IncidentSuspectCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']", 'db_index': 'False'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.integrationexternalproject': {
'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integrationfeature': {
'Meta': {'unique_together': "(('sentry_app', 'feature'),)", 'object_name': 'IntegrationFeature', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SentryApp']"}),
'user_description': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease', 'index_together': '()'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'unique_together': '()', 'object_name': 'LostPasswordHash', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.monitor': {
'Meta': {'unique_together': '()', 'object_name': 'Monitor', 'index_together': "(('type', 'next_checkin'),)"},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'next_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.monitorcheckin': {
'Meta': {'unique_together': '()', 'object_name': 'MonitorCheckIn', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'duration': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'location': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.MonitorLocation']", 'null': 'True'}),
'monitor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Monitor']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.monitorlocation': {
'Meta': {'unique_together': '()', 'object_name': 'MonitorLocation', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'unique_together': '()', 'object_name': 'Option', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'unique_together': '()', 'object_name': 'Organization', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'unique_together': '()', 'object_name': 'OrganizationAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'token_expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.platformexternalissue': {
'Meta': {'unique_together': "(('group_id', 'service_type'),)", 'object_name': 'PlatformExternalIssue', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.TextField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'service_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'web_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue', 'index_together': '()'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectavatar': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectBookmark', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdebugfile': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'", 'index_together': "(('project', 'debug_id'), ('project', 'code_id'))"},
'code_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectKey', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectOwnership', 'index_together': '()'},
'auto_assignment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectredirect': {
'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.promptsactivity': {
'Meta': {'unique_together': "(('user', 'feature', 'organization_id', 'project_id'),)", 'object_name': 'PromptsActivity', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.pullrequestcommit': {
'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'", 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"})
},
'sentry.querysubscription': {
'Meta': {'unique_together': '()', 'object_name': 'QuerySubscription', 'index_together': '()'},
'aggregation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'dataset': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'resolution': ('django.db.models.fields.IntegerField', [], {}),
'subscription_id': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'time_window': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.TextField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent', 'index_together': '()'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.recentsearch': {
'Meta': {'unique_together': "(('user', 'organization', 'type', 'query_hash'),)", 'object_name': 'RecentSearch', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.relay': {
'Meta': {'unique_together': '()', 'object_name': 'Relay', 'index_together': '()'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release', 'index_together': '()'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization', 'release', 'environment'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'", 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile', 'index_together': "(('release', 'name'),)"},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment', 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository', 'index_together': '()'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport', 'index_together': '()'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'unique_together': '()', 'object_name': 'Rule', 'index_together': '()'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'), ('organization', 'owner', 'type'))", 'object_name': 'SavedSearch', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_global': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion', 'index_together': '()'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 10, 12, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'1556f91e60d4439cbcffdc7c81bb756d'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'unique_together': '()', 'object_name': 'ScheduledJob', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.sentryapp': {
'Meta': {'unique_together': '()', 'object_name': 'SentryApp', 'index_together': '()'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}),
'author': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_alertable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.TextField', [], {}),
'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'owned_sentry_apps'", 'to': "orm['sentry.Organization']"}),
'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}),
'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'schema': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'3d8204ac-6b09-46e5-86a4-77da6bec90b4'", 'max_length': '64'}),
'verify_install': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'webhook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.sentryappavatar': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.SentryApp']"})
},
'sentry.sentryappcomponent': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppComponent', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'schema': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'components'", 'to': "orm['sentry.SentryApp']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'uuid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'})
},
'sentry.sentryappinstallation': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppInstallation', 'index_together': '()'},
'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}),
'api_token': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiToken']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_app_installations'", 'to': "orm['sentry.Organization']"}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'installations'", 'to': "orm['sentry.SentryApp']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'05c99e26-e5aa-4cd8-b7db-bacc31f3171f'", 'max_length': '64'})
},
'sentry.sentryappinstallationtoken': {
'Meta': {'unique_together': "(('sentry_app_installation', 'api_token'),)", 'object_name': 'SentryAppInstallationToken', 'index_together': '()'},
'api_token': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiToken']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sentry_app_installation': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SentryAppInstallation']"})
},
'sentry.servicehook': {
'Meta': {'unique_together': '()', 'object_name': 'ServiceHook', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'c6ab63d377fd48a2b2d162ef7402be07165ff1b2fbc2413d9fca00c1ac0c6471'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.servicehookproject': {
'Meta': {'unique_together': "(('service_hook', 'project_id'),)", 'object_name': 'ServiceHookProject', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'service_hook': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ServiceHook']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teamavatar': {
'Meta': {'unique_together': '()', 'object_name': 'TeamAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"})
},
'sentry.timeseriessnapshot': {
'Meta': {'unique_together': '()', 'object_name': 'TimeSeriesSnapshot', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'values': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'sentry.db.models.fields.array.ArrayField', [], {'null': 'True'})})
},
'sentry.user': {
'Meta': {'unique_together': '()', 'object_name': 'User', 'db_table': "'auth_user'", 'index_together': '()'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'unique_together': '()', 'object_name': 'UserAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail', 'index_together': '()'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'5VTVHt0BarZ69mqs53xvSzi66LbRQnJ9'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP', 'index_together': '()'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.widget': {
'Meta': {'unique_together': "(('dashboard', 'order'), ('dashboard', 'title'))", 'object_name': 'Widget', 'index_together': '()'},
'dashboard': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Dashboard']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'display_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.widgetdatasource': {
'Meta': {'unique_together': "(('widget', 'name'), ('widget', 'order'))", 'object_name': 'WidgetDataSource', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'widget': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Widget']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause | -5,910,418,855,912,723,000 | 94.820548 | 255 | 0.579794 | false |
maas/maas | src/maasserver/models/tests/test_filesystemgroup.py | 1 | 104094 | # Copyright 2015-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `FilesystemGroup`."""
import random
import re
from unittest import skip
from uuid import uuid4
from django.core.exceptions import PermissionDenied, ValidationError
from django.http import Http404
from testtools import ExpectedException
from testtools.matchers import Equals, Is, MatchesStructure, Not
from maasserver.enum import (
CACHE_MODE_TYPE,
FILESYSTEM_GROUP_RAID_TYPES,
FILESYSTEM_GROUP_TYPE,
FILESYSTEM_TYPE,
PARTITION_TABLE_TYPE,
)
from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE
from maasserver.models.filesystem import Filesystem
from maasserver.models.filesystemgroup import (
Bcache,
BcacheManager,
FilesystemGroup,
LVM_PE_SIZE,
RAID,
RAID_SUPERBLOCK_OVERHEAD,
RAIDManager,
VMFS,
VolumeGroup,
VolumeGroupManager,
)
from maasserver.models.partition import PARTITION_ALIGNMENT_SIZE
from maasserver.models.partitiontable import PARTITION_TABLE_EXTRA_SPACE
from maasserver.models.physicalblockdevice import PhysicalBlockDevice
from maasserver.models.virtualblockdevice import VirtualBlockDevice
from maasserver.permissions import NodePermission
from maasserver.testing.factory import factory
from maasserver.testing.orm import reload_objects
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.utils.converters import (
machine_readable_bytes,
round_size_to_nearest_block,
)
from maasserver.utils.orm import reload_object
from maastesting.matchers import MockCalledOnceWith, MockNotCalled
class TestManagersGetObjectOr404(MAASServerTestCase):
"""Tests for the `get_object_or_404` on the managers."""
scenarios = (
("FilesystemGroup", {"model": FilesystemGroup, "type": None}),
(
"VolumeGroup",
{"model": VolumeGroup, "type": FILESYSTEM_GROUP_TYPE.LVM_VG},
),
("RAID", {"model": RAID, "type": FILESYSTEM_GROUP_TYPE.RAID_0}),
("Bcache", {"model": Bcache, "type": FILESYSTEM_GROUP_TYPE.BCACHE}),
)
def test_raises_Http404_when_invalid_node(self):
user = factory.make_admin()
filesystem_group = factory.make_FilesystemGroup(group_type=self.type)
self.assertRaises(
Http404,
self.model.objects.get_object_or_404,
factory.make_name("system_id"),
filesystem_group.id,
user,
NodePermission.view,
)
def test_raises_Http404_when_invalid_device(self):
user = factory.make_admin()
node = factory.make_Node()
self.assertRaises(
Http404,
self.model.objects.get_object_or_404,
node.system_id,
random.randint(0, 100),
user,
NodePermission.view,
)
def test_view_raises_PermissionDenied_when_user_not_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=factory.make_User())
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertRaises(
PermissionDenied,
self.model.objects.get_object_or_404,
node.system_id,
filesystem_group.id,
user,
NodePermission.view,
)
def test_view_returns_device_by_name(self):
user = factory.make_User()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id,
filesystem_group.name,
user,
NodePermission.view,
).id,
)
def test_view_returns_device_when_no_owner(self):
user = factory.make_User()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.view
).id,
)
def test_view_returns_device_when_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=user)
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.view
).id,
)
def test_edit_raises_PermissionDenied_when_user_not_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=factory.make_User())
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertRaises(
PermissionDenied,
self.model.objects.get_object_or_404,
node.system_id,
filesystem_group.id,
user,
NodePermission.edit,
)
def test_edit_returns_device_when_user_is_owner(self):
user = factory.make_User()
node = factory.make_Node(owner=user)
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.edit
).id,
)
def test_admin_raises_PermissionDenied_when_user_requests_admin(self):
user = factory.make_User()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertRaises(
PermissionDenied,
self.model.objects.get_object_or_404,
node.system_id,
filesystem_group.id,
user,
NodePermission.admin,
)
def test_admin_returns_device_when_admin(self):
user = factory.make_admin()
node = factory.make_Node()
filesystem_group = factory.make_FilesystemGroup(
node=node, group_type=self.type
)
self.assertEqual(
filesystem_group.id,
self.model.objects.get_object_or_404(
node.system_id, filesystem_group.id, user, NodePermission.admin
).id,
)
class TestManagersFilterByBlockDevice(MAASServerTestCase):
"""Tests for the managers `filter_by_block_device`."""
def test_volume_group_on_block_device(self):
block_device = factory.make_PhysicalBlockDevice()
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_block_device(
block_device
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_partition(self):
block_device = factory.make_PhysicalBlockDevice(size=10 * 1024 ** 3)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition = factory.make_Partition(
size=5 * 1024 ** 3, partition_table=partition_table
)
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_block_device(
block_device
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_two_partitions(self):
block_device = factory.make_PhysicalBlockDevice()
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = VolumeGroup.objects.filter_by_block_device(
block_device
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_one
)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_block_device(
block_device_one
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_partitions(self):
block_device = factory.make_PhysicalBlockDevice()
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_block_device(block_device)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
cache_set = factory.make_CacheSet(block_device=block_device_one)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=block_device_two,
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_block_device(
block_device_one
)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_partitions(self):
device_size = random.randint(
MIN_BLOCK_DEVICE_SIZE * 4, MIN_BLOCK_DEVICE_SIZE * 1024
)
block_device = factory.make_PhysicalBlockDevice(
size=device_size + PARTITION_TABLE_EXTRA_SPACE
)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(
partition_table=partition_table, size=device_size // 2
)
partition_two = factory.make_Partition(
partition_table=partition_table, size=device_size // 2
)
cache_set = factory.make_CacheSet(partition=partition_one)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_block_device(block_device)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
class TestManagersFilterByNode(MAASServerTestCase):
"""Tests for the managers `filter_by_node`."""
def test_volume_group_on_block_device(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_partition(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition = factory.make_Partition(partition_table=partition_table)
filesystem = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]
)
filesystem_groups = VolumeGroup.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_volume_group_on_two_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = VolumeGroup.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_one
)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, block_device=block_device_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_raid_on_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_one
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=[filesystem_one, filesystem_two],
)
filesystem_groups = RAID.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_block_devices(self):
node = factory.make_Node()
block_device_one = factory.make_PhysicalBlockDevice(node=node)
cache_set = factory.make_CacheSet(block_device=block_device_one)
block_device_two = factory.make_PhysicalBlockDevice(node=node)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=block_device_two,
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
def test_bcache_on_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partition_one = factory.make_Partition(partition_table=partition_table)
partition_two = factory.make_Partition(partition_table=partition_table)
cache_set = factory.make_CacheSet(partition=partition_one)
filesystem_backing = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING, partition=partition_two
)
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=[filesystem_backing],
)
filesystem_groups = Bcache.objects.filter_by_node(node)
result_filesystem_group_ids = [
fsgroup.id for fsgroup in filesystem_groups
]
self.assertItemsEqual(
[filesystem_group.id], result_filesystem_group_ids
)
class TestFilesystemGroupManager(MAASServerTestCase):
"""Tests for the `FilesystemGroupManager`."""
def test_get_available_name_for_returns_next_idx(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE
)
filesystem_group.save()
prefix = filesystem_group.get_name_prefix()
current_idx = int(filesystem_group.name.replace(prefix, ""))
self.assertEqual(
"%s%s" % (prefix, current_idx + 1),
FilesystemGroup.objects.get_available_name_for(filesystem_group),
)
def test_get_available_name_for_ignores_bad_int(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE
)
filesystem_group.save()
prefix = filesystem_group.get_name_prefix()
filesystem_group.name = "%s%s" % (prefix, factory.make_name("bad"))
filesystem_group.save()
self.assertEqual(
"%s0" % prefix,
FilesystemGroup.objects.get_available_name_for(filesystem_group),
)
class TestVolumeGroupManager(MAASServerTestCase):
"""Tests for the `VolumeGroupManager`."""
def test_create_volume_group_with_name_and_uuid(self):
block_device = factory.make_PhysicalBlockDevice()
name = factory.make_name("vg")
vguuid = "%s" % uuid4()
volume_group = VolumeGroup.objects.create_volume_group(
name, [block_device], [], uuid=vguuid
)
self.assertEqual(name, volume_group.name)
self.assertEqual(vguuid, volume_group.uuid)
def test_create_volume_group_with_block_devices(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
name = factory.make_name("vg")
volume_group = VolumeGroup.objects.create_volume_group(
name, block_devices, []
)
block_devices_in_vg = [
filesystem.block_device.actual_instance
for filesystem in volume_group.filesystems.all()
]
self.assertItemsEqual(block_devices, block_devices_in_vg)
def test_create_volume_group_with_partitions(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(
node=node,
size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE,
)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partitions = [
partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE)
for _ in range(2)
]
name = factory.make_name("vg")
volume_group = VolumeGroup.objects.create_volume_group(
name, [], partitions
)
partitions_in_vg = [
filesystem.partition
for filesystem in volume_group.filesystems.all()
]
self.assertItemsEqual(partitions, partitions_in_vg)
def test_create_volume_group_with_block_devices_and_partitions(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
block_device = factory.make_PhysicalBlockDevice(
node=node,
size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE,
)
partition_table = factory.make_PartitionTable(
block_device=block_device
)
partitions = [
partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE)
for _ in range(2)
]
name = factory.make_name("vg")
volume_group = VolumeGroup.objects.create_volume_group(
name, block_devices, partitions
)
block_devices_in_vg = [
filesystem.block_device.actual_instance
for filesystem in volume_group.filesystems.all()
if filesystem.block_device is not None
]
partitions_in_vg = [
filesystem.partition
for filesystem in volume_group.filesystems.all()
if filesystem.partition is not None
]
self.assertItemsEqual(block_devices, block_devices_in_vg)
self.assertItemsEqual(partitions, partitions_in_vg)
class TestFilesystemGroup(MAASServerTestCase):
"""Tests for the `FilesystemGroup` model."""
def test_virtual_device_raises_AttributeError_for_lvm(self):
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG
)
with ExpectedException(AttributeError):
fsgroup.virtual_device
def test_virtual_device_returns_VirtualBlockDevice_for_group(self):
fsgroup = factory.make_FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.LVM_VG
)
)
self.assertEqual(
VirtualBlockDevice.objects.get(filesystem_group=fsgroup),
fsgroup.virtual_device,
)
def test_get_numa_node_indexes_all_same(self):
fsgroup = factory.make_FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.VMFS6
)
)
self.assertEqual(fsgroup.get_numa_node_indexes(), [0])
def test_get_numa_node_indexes_multiple(self):
node = factory.make_Node()
numa_nodes = [
node.default_numanode,
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
]
block_devices = [
factory.make_PhysicalBlockDevice(numa_node=numa_node)
for numa_node in numa_nodes
]
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
for block_device in block_devices
]
fsgroup = factory.make_FilesystemGroup(
node=node,
filesystems=filesystems,
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
)
self.assertEqual(fsgroup.get_numa_node_indexes(), [0, 1, 2])
def test_get_numa_node_indexes_nested(self):
node = factory.make_Node()
numa_nodes = [
node.default_numanode,
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
factory.make_NUMANode(node=node),
]
# 2 physical disks have filesystems on them directly
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(
numa_node=numa_node
),
)
for numa_node in numa_nodes[:2]
]
# the 3 remaining disks are part of another filesystem group which gets
# added to the first
nested_filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(
numa_node=numa_node
),
)
for numa_node in numa_nodes[2:]
]
nested_group = factory.make_FilesystemGroup(
node=node,
filesystems=nested_filesystems,
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
)
virtual_block_device = factory.make_VirtualBlockDevice(
filesystem_group=nested_group
)
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=virtual_block_device,
)
)
fsgroup = factory.make_FilesystemGroup(
node=node,
filesystems=filesystems,
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
)
self.assertEqual(fsgroup.get_numa_node_indexes(), [0, 1, 2, 3, 4])
def test_get_node_returns_first_filesystem_node(self):
fsgroup = factory.make_FilesystemGroup()
self.assertEqual(
fsgroup.filesystems.first().get_node(), fsgroup.get_node()
)
def test_get_node_returns_None_if_no_filesystems(self):
fsgroup = FilesystemGroup()
self.assertIsNone(fsgroup.get_node())
def test_get_size_returns_0_if_lvm_without_filesystems(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.LVM_VG)
self.assertEqual(0, fsgroup.get_size())
def test_get_size_returns_sum_of_all_filesystem_sizes_for_lvm(self):
node = factory.make_Node()
block_size = 4096
total_size = 0
filesystems = []
for _ in range(3):
size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
total_size += size
block_device = factory.make_PhysicalBlockDevice(
node=node, size=size, block_size=block_size
)
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
# Reserve one extent per filesystem for LVM headers.
extents = (total_size // LVM_PE_SIZE) - 3
self.assertEqual(extents * LVM_PE_SIZE, fsgroup.get_size())
def test_get_size_returns_0_if_raid_without_filesystems(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.RAID_0)
self.assertEqual(0, fsgroup.get_size())
def test_get_size_returns_smallest_disk_size_for_raid_0(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
large_size = random.randint(small_size + 1, small_size + (10 ** 5))
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=large_size
),
),
]
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0, filesystems=filesystems
)
# Size should be twice the smallest device (the rest of the larger
# device remains unused.
self.assertEqual(
(small_size * 2) - RAID_SUPERBLOCK_OVERHEAD, fsgroup.get_size()
)
def test_get_size_returns_smallest_disk_size_for_raid_1(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
large_size = random.randint(small_size + 1, small_size + (10 ** 5))
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=large_size
),
),
]
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1, filesystems=filesystems
)
self.assertEqual(
small_size - RAID_SUPERBLOCK_OVERHEAD, fsgroup.get_size()
)
def test_get_size_returns_correct_disk_size_for_raid_5(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
other_size = random.randint(small_size + 1, small_size + (10 ** 5))
number_of_raid_devices = random.randint(2, 9)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
)
]
for _ in range(number_of_raid_devices):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
# Spares are ignored and not taken into calculation.
for _ in range(3):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_5, filesystems=filesystems
)
self.assertEqual(
(small_size * number_of_raid_devices) - RAID_SUPERBLOCK_OVERHEAD,
fsgroup.get_size(),
)
def test_get_size_returns_correct_disk_size_for_raid_6(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
other_size = random.randint(small_size + 1, small_size + (10 ** 5))
number_of_raid_devices = random.randint(3, 9)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
)
]
for _ in range(number_of_raid_devices):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
# Spares are ignored and not taken into calculation.
for _ in range(3):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_6, filesystems=filesystems
)
self.assertEqual(
(small_size * (number_of_raid_devices - 1))
- RAID_SUPERBLOCK_OVERHEAD,
fsgroup.get_size(),
)
@skip("XXX: GavinPanella 2015-12-04 bug=1522965: Fails spuriously.")
def test_get_size_returns_correct_disk_size_for_raid_10(self):
node = factory.make_Node()
small_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
other_size = random.randint(small_size + 1, small_size + (10 ** 5))
number_of_raid_devices = random.randint(3, 9)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=small_size
),
)
]
for _ in range(number_of_raid_devices):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
# Spares are ignored and not taken into calculation.
for _ in range(3):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=other_size
),
)
)
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems
)
self.assertEqual(
(small_size * (number_of_raid_devices + 1) // 2)
- RAID_SUPERBLOCK_OVERHEAD,
fsgroup.get_size(),
)
def test_get_size_returns_0_if_bcache_without_backing(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE)
self.assertEqual(0, fsgroup.get_size())
def test_get_size_returns_size_of_backing_device_with_bcache(self):
node = factory.make_Node()
backing_size = random.randint(
MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE ** 2
)
cache_set = factory.make_CacheSet(node=node)
backing_block_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=backing_block_device,
)
]
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
cache_set=cache_set,
filesystems=filesystems,
)
self.assertEqual(backing_size, fsgroup.get_size())
def test_get_size_returns_total_size_with_vmfs(self):
vmfs = factory.make_VMFS()
self.assertEqual(vmfs.get_total_size(), vmfs.get_size())
def test_get_total_size(self):
vmfs = factory.make_VMFS()
size = 0
for fs in vmfs.filesystems.all():
size += fs.get_size()
self.assertEqual(size, vmfs.get_total_size())
def test_is_lvm_returns_true_when_LVM_VG(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.LVM_VG)
self.assertTrue(fsgroup.is_lvm())
def test_is_lvm_returns_false_when_not_LVM_VG(self):
fsgroup = FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.LVM_VG
)
)
self.assertFalse(fsgroup.is_lvm())
def test_is_raid_returns_true_for_all_raid_types(self):
fsgroup = FilesystemGroup()
for raid_type in FILESYSTEM_GROUP_RAID_TYPES:
fsgroup.group_type = raid_type
self.assertTrue(
fsgroup.is_raid(),
"is_raid should return true for %s" % raid_type,
)
def test_is_raid_returns_false_for_LVM_VG(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.LVM_VG)
self.assertFalse(fsgroup.is_raid())
def test_is_raid_returns_false_for_BCACHE(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE)
self.assertFalse(fsgroup.is_raid())
def test_is_bcache_returns_true_when_BCACHE(self):
fsgroup = FilesystemGroup(group_type=FILESYSTEM_GROUP_TYPE.BCACHE)
self.assertTrue(fsgroup.is_bcache())
def test_is_bcache_returns_false_when_not_BCACHE(self):
fsgroup = FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.BCACHE
)
)
self.assertFalse(fsgroup.is_bcache())
def test_is_vmfs(self):
vmfs = factory.make_VMFS()
self.assertTrue(vmfs.is_vmfs())
def test_creating_vmfs_automatically_creates_mounted_fs(self):
part = factory.make_Partition()
name = factory.make_name("datastore")
vmfs = VMFS.objects.create_vmfs(name, [part])
self.assertEqual(
"/vmfs/volumes/%s" % name,
vmfs.virtual_device.get_effective_filesystem().mount_point,
)
def test_can_save_new_filesystem_group_without_filesystems(self):
fsgroup = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
fsgroup.save()
self.expectThat(fsgroup.id, Not(Is(None)))
self.expectThat(fsgroup.filesystems.count(), Equals(0))
def test_cannot_save_without_filesystems(self):
fsgroup = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
fsgroup.save()
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['At least one filesystem must have "
"been added.']}"
),
):
fsgroup.save(force_update=True)
def test_cannot_save_without_filesystems_from_different_nodes(self):
filesystems = [factory.make_Filesystem(), factory.make_Filesystem()]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['All added filesystems must belong to "
"the same node.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=filesystems,
)
def test_cannot_save_volume_group_if_invalid_filesystem(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Volume group can only contain lvm "
"physical volumes.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
filesystems=filesystems,
)
def test_can_save_volume_group_if_valid_filesystems(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
),
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
def test_cannot_save_volume_group_if_logical_volumes_larger(self):
node = factory.make_Node()
filesystem_one = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
filesystem_two = factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
filesystems = [filesystem_one, filesystem_two]
volume_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
factory.make_VirtualBlockDevice(
size=volume_group.get_size(), filesystem_group=volume_group
)
filesystem_two.delete()
with ExpectedException(
ValidationError,
re.escape(
"['Volume group cannot be smaller than its "
"logical volumes.']"
),
):
volume_group.save()
def test_cannot_save_raid_0_with_less_than_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=filesystems,
)
def test_cannot_save_raid_0_with_spare_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(2)
]
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0,
filesystems=filesystems,
)
def test_can_save_raid_0_with_exactly_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(2)
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0, filesystems=filesystems
)
def test_can_save_raid_0_with_more_then_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(10)
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0, filesystems=filesystems
)
def test_cannot_save_raid_1_with_less_than_2_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 1 must have at least 2 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1,
filesystems=filesystems,
)
def test_can_save_raid_1_with_spare_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(2)
]
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1, filesystems=filesystems
)
def test_can_save_raid_1_with_2_or_more_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(2, 10))
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_1, filesystems=filesystems
)
def test_cannot_save_raid_5_with_less_than_3_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(1, 2))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 5 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_5,
filesystems=filesystems,
)
def test_can_save_raid_5_with_3_or_more_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(3, 10))
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_5, filesystems=filesystems
)
def test_cannot_save_raid_6_with_less_than_4_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(1, 3))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_6,
filesystems=filesystems,
)
def test_can_save_raid_6_with_4_or_more_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(4, 10))
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_6, filesystems=filesystems
)
def test_cannot_save_raid_10_with_less_than_3_raid_devices(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(1, 2))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 10 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10,
filesystems=filesystems,
)
def test_can_save_raid_10_with_3_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(3)
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems
)
def test_can_save_raid_10_with_4_or_more_raid_devices_and_spares(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(4, 10))
]
for _ in range(random.randint(1, 5)):
filesystems.append(
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.RAID_SPARE,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
)
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_10, filesystems=filesystems
)
def test_cannot_save_bcache_without_cache_set(self):
node = factory.make_Node()
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Bcache requires an assigned cache set.']}"
),
):
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
filesystems=filesystems,
)
filesystem_group.cache_set = None
filesystem_group.save()
def test_cannot_save_bcache_without_backing(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['At least one filesystem must have "
"been added.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=[],
)
def test_cannot_save_bcache_with_logical_volume_as_backing(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_VirtualBlockDevice(node=node),
)
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Bcache cannot use a logical volume as a "
"backing device.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=filesystems,
)
def test_can_save_bcache_with_cache_set_and_backing(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
]
# Test is that this does not raise an exception.
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=filesystems,
)
def test_cannot_save_bcache_with_multiple_backings(self):
node = factory.make_Node()
cache_set = factory.make_CacheSet(node=node)
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.BCACHE_BACKING,
block_device=factory.make_PhysicalBlockDevice(node=node),
)
for _ in range(random.randint(2, 10))
]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['Bcache can only contain one backing "
"device.']}"
),
):
factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE,
cache_set=cache_set,
filesystems=filesystems,
)
def test_save_doesnt_overwrite_uuid(self):
uuid = uuid4()
fsgroup = factory.make_FilesystemGroup(uuid=uuid)
self.assertEqual("%s" % uuid, fsgroup.uuid)
def test_save_doesnt_allow_changing_group_type(self):
fsgroup = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.RAID_0
)
fsgroup.save()
fsgroup.group_type = FILESYSTEM_GROUP_TYPE.RAID_1
error = self.assertRaises(ValidationError, fsgroup.save)
self.assertEqual(
"Cannot change the group_type of a FilesystemGroup.", error.message
)
def test_save_calls_create_or_update_for_when_filesystems_linked(self):
mock_create_or_update_for = self.patch(
VirtualBlockDevice.objects, "create_or_update_for"
)
filesystem_group = factory.make_FilesystemGroup()
self.assertThat(
mock_create_or_update_for, MockCalledOnceWith(filesystem_group)
)
def test_save_doesnt_call_create_or_update_for_when_no_filesystems(self):
mock_create_or_update_for = self.patch(
VirtualBlockDevice.objects, "create_or_update_for"
)
filesystem_group = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
filesystem_group.save()
self.assertThat(mock_create_or_update_for, MockNotCalled())
def test_get_lvm_allocated_size_and_get_lvm_free_space(self):
"""Check get_lvm_allocated_size and get_lvm_free_space methods."""
backing_volume_size = machine_readable_bytes("10G")
node = factory.make_Node()
fsgroup = FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG,
name=factory.make_name("vg"),
)
fsgroup.save()
block_size = 4096
for i in range(5):
block_device = factory.make_BlockDevice(
node=node, size=backing_volume_size, block_size=block_size
)
factory.make_Filesystem(
filesystem_group=fsgroup,
fstype=FILESYSTEM_TYPE.LVM_PV,
block_device=block_device,
)
# Size should be 50 GB minus one extent per filesystem for LVM headers.
pv_total_size = 50 * 1000 ** 3
extents = (pv_total_size // LVM_PE_SIZE) - 5
usable_size = extents * LVM_PE_SIZE
self.assertEqual(usable_size, fsgroup.get_size())
# Allocate two VirtualBlockDevice's
factory.make_VirtualBlockDevice(
filesystem_group=fsgroup, size=35 * 1000 ** 3
)
factory.make_VirtualBlockDevice(
filesystem_group=fsgroup, size=5 * 1000 ** 3
)
expected_size = round_size_to_nearest_block(
40 * 1000 ** 3, PARTITION_ALIGNMENT_SIZE, False
)
self.assertEqual(expected_size, fsgroup.get_lvm_allocated_size())
self.assertEqual(
usable_size - expected_size, fsgroup.get_lvm_free_space()
)
def test_get_virtual_block_device_block_size_returns_backing_for_bc(self):
# This test is not included in the scenario below
# `TestFilesystemGroupGetVirtualBlockDeviceBlockSize` because it has
# different logic that doesn't fit in the scenario.
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.BCACHE
)
filesystem = filesystem_group.get_bcache_backing_filesystem()
self.assertEqual(
filesystem.get_block_size(),
filesystem_group.get_virtual_block_device_block_size(),
)
def test_delete_deletes_filesystems_not_block_devices(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=bd
)
for bd in block_devices
]
filesystem_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=filesystems
)
filesystem_group.delete()
deleted_filesystems = reload_objects(Filesystem, filesystems)
kept_block_devices = reload_objects(PhysicalBlockDevice, block_devices)
self.assertItemsEqual([], deleted_filesystems)
self.assertItemsEqual(block_devices, kept_block_devices)
def test_delete_cannot_delete_volume_group_with_logical_volumes(self):
volume_group = factory.make_FilesystemGroup(
group_type=FILESYSTEM_GROUP_TYPE.LVM_VG
)
factory.make_VirtualBlockDevice(
size=volume_group.get_size(), filesystem_group=volume_group
)
error = self.assertRaises(ValidationError, volume_group.delete)
self.assertEqual(
"This volume group has logical volumes; it cannot be deleted.",
error.message,
)
def test_delete_deletes_virtual_block_device(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=factory.pick_enum(
FILESYSTEM_GROUP_TYPE, but_not=FILESYSTEM_GROUP_TYPE.LVM_VG
)
)
virtual_device = filesystem_group.virtual_device
filesystem_group.delete()
self.assertIsNone(
reload_object(virtual_device),
"VirtualBlockDevice should have been deleted.",
)
class TestFilesystemGroupGetNiceName(MAASServerTestCase):
scenarios = [
(
FILESYSTEM_GROUP_TYPE.LVM_VG,
{
"group_type": FILESYSTEM_GROUP_TYPE.LVM_VG,
"name": "volume group",
},
),
(
FILESYSTEM_GROUP_TYPE.RAID_0,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_0, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_1,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_1, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_5,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_5, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_6,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_6, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_10,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_10, "name": "RAID"},
),
(
FILESYSTEM_GROUP_TYPE.BCACHE,
{"group_type": FILESYSTEM_GROUP_TYPE.BCACHE, "name": "Bcache"},
),
(
FILESYSTEM_GROUP_TYPE.VMFS6,
{"group_type": FILESYSTEM_GROUP_TYPE.VMFS6, "name": "VMFS"},
),
]
def test_returns_prefix(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=self.group_type
)
self.assertEqual(self.name, filesystem_group.get_nice_name())
class TestFilesystemGroupGetNamePrefix(MAASServerTestCase):
scenarios = [
(
FILESYSTEM_GROUP_TYPE.LVM_VG,
{"group_type": FILESYSTEM_GROUP_TYPE.LVM_VG, "prefix": "vg"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_0,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_0, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_1,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_1, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_5,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_5, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_6,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_6, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.RAID_10,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_10, "prefix": "md"},
),
(
FILESYSTEM_GROUP_TYPE.BCACHE,
{"group_type": FILESYSTEM_GROUP_TYPE.BCACHE, "prefix": "bcache"},
),
(
FILESYSTEM_GROUP_TYPE.VMFS6,
{"group_type": FILESYSTEM_GROUP_TYPE.VMFS6, "prefix": "vmfs"},
),
]
def test_returns_prefix(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=self.group_type
)
self.assertEqual(self.prefix, filesystem_group.get_name_prefix())
class TestFilesystemGroupGetVirtualBlockDeviceBlockSize(MAASServerTestCase):
scenarios = [
(
FILESYSTEM_GROUP_TYPE.LVM_VG,
{"group_type": FILESYSTEM_GROUP_TYPE.LVM_VG, "block_size": 4096},
),
(
FILESYSTEM_GROUP_TYPE.RAID_0,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_0, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_1,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_1, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_5,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_5, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_6,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_6, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.RAID_10,
{"group_type": FILESYSTEM_GROUP_TYPE.RAID_10, "block_size": 512},
),
(
FILESYSTEM_GROUP_TYPE.VMFS6,
{"group_type": FILESYSTEM_GROUP_TYPE.VMFS6, "block_size": 1024},
),
# For BCACHE see
# `test_get_virtual_block_device_block_size_returns_backing_for_bc`
# above.
]
def test_returns_block_size(self):
filesystem_group = factory.make_FilesystemGroup(
group_type=self.group_type
)
self.assertEqual(
self.block_size,
filesystem_group.get_virtual_block_device_block_size(),
)
class TestVolumeGroup(MAASServerTestCase):
def test_objects_is_VolumeGroupManager(self):
self.assertIsInstance(VolumeGroup.objects, VolumeGroupManager)
def test_group_type_set_to_LVM_VG(self):
obj = VolumeGroup()
self.assertEqual(FILESYSTEM_GROUP_TYPE.LVM_VG, obj.group_type)
def test_update_block_devices_and_partitions(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
new_block_device = factory.make_PhysicalBlockDevice(node=node)
partition_block_device = factory.make_PhysicalBlockDevice(
node=node,
size=(MIN_BLOCK_DEVICE_SIZE * 4) + PARTITION_TABLE_EXTRA_SPACE,
)
partition_table = factory.make_PartitionTable(
block_device=partition_block_device
)
partitions = [
partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE)
for _ in range(2)
]
new_partition = partition_table.add_partition(
size=MIN_BLOCK_DEVICE_SIZE
)
initial_bd_filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, block_device=bd
)
for bd in block_devices
]
initial_part_filesystems = [
factory.make_Filesystem(
fstype=FILESYSTEM_TYPE.LVM_PV, partition=part
)
for part in partitions
]
volume_group = factory.make_VolumeGroup(
filesystems=initial_bd_filesystems + initial_part_filesystems
)
deleted_block_device = block_devices[0]
updated_block_devices = [new_block_device] + block_devices[1:]
deleted_partition = partitions[0]
update_partitions = [new_partition] + partitions[1:]
volume_group.update_block_devices_and_partitions(
updated_block_devices, update_partitions
)
self.assertIsNone(deleted_block_device.get_effective_filesystem())
self.assertIsNone(deleted_partition.get_effective_filesystem())
self.assertEqual(
volume_group.id,
new_block_device.get_effective_filesystem().filesystem_group.id,
)
self.assertEqual(
volume_group.id,
new_partition.get_effective_filesystem().filesystem_group.id,
)
for device in block_devices[1:] + partitions[1:]:
self.assertEqual(
volume_group.id,
device.get_effective_filesystem().filesystem_group.id,
)
def test_create_logical_volume(self):
volume_group = factory.make_VolumeGroup()
name = factory.make_name()
vguuid = "%s" % uuid4()
size = random.randint(MIN_BLOCK_DEVICE_SIZE, volume_group.get_size())
logical_volume = volume_group.create_logical_volume(
name=name, uuid=vguuid, size=size
)
logical_volume = reload_object(logical_volume)
expected_size = round_size_to_nearest_block(
size, PARTITION_ALIGNMENT_SIZE, False
)
self.assertThat(
logical_volume,
MatchesStructure.byEquality(
name=name,
uuid=vguuid,
size=expected_size,
block_size=volume_group.get_virtual_block_device_block_size(),
),
)
class TestRAID(MAASServerTestCase):
def test_objects_is_RAIDManager(self):
self.assertIsInstance(RAID.objects, RAIDManager)
def test_init_raises_ValueError_if_group_type_not_set_to_raid_type(self):
self.assertRaises(
ValueError, RAID, group_type=FILESYSTEM_GROUP_TYPE.LVM_VG
)
def test_create_raid(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
for bd in block_devices[5:]:
factory.make_PartitionTable(block_device=bd)
partitions = [
bd.get_partitiontable().add_partition() for bd in block_devices[5:]
]
spare_block_device = block_devices[0]
spare_partition = partitions[0]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
block_devices=block_devices[1:5],
partitions=partitions[1:],
spare_devices=[spare_block_device],
spare_partitions=[spare_partition],
)
self.assertEqual("md0", raid.name)
self.assertEqual(
(6 * partitions[1].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_6, raid.group_type)
self.assertEqual(uuid, raid.uuid)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
8, raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID).count()
)
self.assertEqual(
2,
raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID_SPARE).count(),
)
def test_create_raid_0_with_a_spare_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4)
for _ in range(10)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_0,
uuid=uuid,
block_devices=block_devices[1:],
partitions=[],
spare_devices=block_devices[:1],
spare_partitions=[],
)
def test_create_raid_without_devices_fails(self):
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['At least one filesystem must have been "
"added.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_0,
uuid=uuid,
block_devices=[],
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_0_with_one_element_fails(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 0 must have at least 2 raid "
"devices and no spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_0,
uuid=uuid,
block_devices=[block_device],
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_1_with_spares(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
for bd in block_devices[5:]:
factory.make_PartitionTable(block_device=bd)
partitions = [
bd.get_partitiontable().add_partition() for bd in block_devices[5:]
]
# Partition size will be smaller than the disk, because of overhead.
spare_block_device = block_devices[0]
spare_partition = partitions[0]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_1,
uuid=uuid,
block_devices=block_devices[1:5],
partitions=partitions[1:],
spare_devices=[spare_block_device],
spare_partitions=[spare_partition],
)
self.assertEqual("md0", raid.name)
self.assertEqual(
partitions[1].size - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_1, raid.group_type)
self.assertEqual(uuid, raid.uuid)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
8, raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID).count()
)
self.assertEqual(
2,
raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID_SPARE).count(),
)
def test_create_raid_1_with_one_element_fails(self):
node = factory.make_Node()
block_device = factory.make_PhysicalBlockDevice(node=node)
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 1 must have at least 2 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_1,
uuid=uuid,
block_devices=[block_device],
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_5_with_spares(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
for bd in block_devices[5:]:
factory.make_PartitionTable(block_device=bd)
partitions = [
bd.get_partitiontable().add_partition() for bd in block_devices[5:]
]
spare_block_device = block_devices[0]
spare_partition = partitions[0]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices[1:5],
partitions=partitions[1:],
spare_devices=[spare_block_device],
spare_partitions=[spare_partition],
)
self.assertEqual("md0", raid.name)
self.assertEqual(
(7 * partitions[1].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_5, raid.group_type)
self.assertEqual(uuid, raid.uuid)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
8, raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID).count()
)
self.assertEqual(
2,
raid.filesystems.filter(fstype=FILESYSTEM_TYPE.RAID_SPARE).count(),
)
def test_create_raid_5_with_2_elements_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4)
for _ in range(2)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 5 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_6_with_3_elements_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(3)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
block_devices=block_devices,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_10_with_2_elements_fails(self):
node = factory.make_Node()
block_devices = [
factory.make_PhysicalBlockDevice(node=node) for _ in range(2)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 10 must have at least 3 raid "
"devices and any number of spares.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_10,
uuid=uuid,
block_devices=block_devices,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_create_raid_with_block_device_from_other_node_fails(self):
node1 = factory.make_Node()
node2 = factory.make_Node()
block_devices_1 = [
factory.make_PhysicalBlockDevice(node=node1) for _ in range(5)
]
block_devices_2 = [
factory.make_PhysicalBlockDevice(node=node2) for _ in range(5)
]
uuid = str(uuid4())
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['All added filesystems must belong to the "
"same node.']}"
),
):
RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_1,
uuid=uuid,
block_devices=block_devices_1 + block_devices_2,
partitions=[],
spare_devices=[],
spare_partitions=[],
)
def test_add_device_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(node=node, size=device_size)
raid.add_device(device, FILESYSTEM_TYPE.RAID)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(10 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_spare_device_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(node=node, size=device_size)
raid.add_device(device, FILESYSTEM_TYPE.RAID_SPARE)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_partition_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
raid.add_partition(partition, FILESYSTEM_TYPE.RAID)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(10 * partition.size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_spare_partition_to_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
raid.add_partition(partition, FILESYSTEM_TYPE.RAID_SPARE)
self.assertEqual(11, raid.filesystems.count())
self.assertEqual(
(9 * partition.size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_device_from_another_node_to_array_fails(self):
node = factory.make_Node()
other_node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(
node=other_node, size=device_size
)
with ExpectedException(
ValidationError,
re.escape(
"['Device needs to be from the same node as the rest of the "
"array.']"
),
):
raid.add_device(device, FILESYSTEM_TYPE.RAID)
self.assertEqual(10, raid.filesystems.count()) # Still 10 devices
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_partition_from_another_node_to_array_fails(self):
node = factory.make_Node()
other_node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=other_node, size=device_size
)
).add_partition()
with ExpectedException(
ValidationError,
re.escape(
"['Partition must be on a device from the same node as "
"the rest of the array.']"
),
):
raid.add_partition(partition, FILESYSTEM_TYPE.RAID)
self.assertEqual(10, raid.filesystems.count()) # Nothing added
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_add_already_used_device_to_array_fails(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
device = factory.make_PhysicalBlockDevice(node=node, size=device_size)
Filesystem.objects.create(
block_device=device,
mount_point="/export/home",
fstype=FILESYSTEM_TYPE.EXT4,
)
with ExpectedException(
ValidationError,
re.escape("['There is another filesystem on this device.']"),
):
raid.add_device(device, FILESYSTEM_TYPE.RAID)
self.assertEqual(10, raid.filesystems.count()) # Nothing added.
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_remove_device_from_array_invalidates_array_fails(self):
"""Checks it's not possible to remove a device from an RAID in such way
as to make the RAID invalid (a 1-device RAID-0/1, a 2-device RAID-5
etc). The goal is to make sure we trigger the RAID internal validation.
"""
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(4)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
block_devices=block_devices,
)
fsids_before = [fs.id for fs in raid.filesystems.all()]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
raid.remove_device(block_devices[0])
self.assertEqual(4, raid.filesystems.count())
self.assertEqual(
(2 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
# Ensure the filesystems are the exact same before and after.
self.assertItemsEqual(
fsids_before, [fs.id for fs in raid.filesystems.all()]
)
def test_remove_partition_from_array_invalidates_array_fails(self):
"""Checks it's not possible to remove a partition from an RAID in such
way as to make the RAID invalid (a 1-device RAID-0/1, a 2-device RAID-5
etc). The goal is to make sure we trigger the RAID internal validation.
"""
node = factory.make_Node(bios_boot_method="uefi")
device_size = 10 * 1000 ** 4
partitions = [
factory.make_PartitionTable(
table_type=PARTITION_TABLE_TYPE.GPT,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
),
).add_partition()
for _ in range(4)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_6,
uuid=uuid,
partitions=partitions,
)
fsids_before = [fs.id for fs in raid.filesystems.all()]
with ExpectedException(
ValidationError,
re.escape(
"{'__all__': ['RAID level 6 must have at least 4 raid "
"devices and any number of spares.']}"
),
):
raid.remove_partition(partitions[0])
self.assertEqual(4, raid.filesystems.count())
self.assertEqual(
(2 * partitions[0].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
# Ensure the filesystems are the exact same before and after.
self.assertItemsEqual(
fsids_before, [fs.id for fs in raid.filesystems.all()]
)
def test_remove_device_from_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices[:-2],
spare_devices=block_devices[-2:],
)
raid.remove_device(block_devices[0])
self.assertEqual(9, raid.filesystems.count())
self.assertEqual(
(6 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
def test_remove_partition_from_array(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
partitions = [
factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
partitions=partitions[:-2],
spare_partitions=partitions[-2:],
)
raid.remove_partition(partitions[0])
self.assertEqual(9, raid.filesystems.count())
self.assertEqual(
(6 * partitions[0].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
def test_remove_invalid_partition_from_array_fails(self):
node = factory.make_Node(bios_boot_method="uefi")
device_size = 10 * 1000 ** 4
partitions = [
factory.make_PartitionTable(
table_type=PARTITION_TABLE_TYPE.GPT,
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
),
).add_partition()
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
partitions=partitions,
)
with ExpectedException(
ValidationError,
re.escape("['Partition does not belong to this array.']"),
):
raid.remove_partition(
factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=device_size
)
).add_partition()
)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
(9 * partitions[0].size) - RAID_SUPERBLOCK_OVERHEAD,
raid.get_size(),
)
def test_remove_device_from_array_fails(self):
node = factory.make_Node()
device_size = 10 * 1000 ** 4
block_devices = [
factory.make_PhysicalBlockDevice(node=node, size=device_size)
for _ in range(10)
]
uuid = str(uuid4())
raid = RAID.objects.create_raid(
name="md0",
level=FILESYSTEM_GROUP_TYPE.RAID_5,
uuid=uuid,
block_devices=block_devices,
)
with ExpectedException(
ValidationError,
re.escape("['Device does not belong to this array.']"),
):
raid.remove_device(
factory.make_PhysicalBlockDevice(node=node, size=device_size)
)
self.assertEqual(10, raid.filesystems.count())
self.assertEqual(
(9 * device_size) - RAID_SUPERBLOCK_OVERHEAD, raid.get_size()
)
class TestBcache(MAASServerTestCase):
def test_objects_is_BcacheManager(self):
self.assertIsInstance(Bcache.objects, BcacheManager)
def test_group_type_set_to_BCACHE(self):
obj = Bcache()
self.assertEqual(FILESYSTEM_GROUP_TYPE.BCACHE, obj.group_type)
def test_create_bcache_with_physical_block_devices(self):
"""Checks creation of a Bcache with physical block devices for caching
and backing roles."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_set = factory.make_CacheSet(node=node)
backing_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
uuid = str(uuid4())
bcache = Bcache.objects.create_bcache(
name="bcache0",
uuid=uuid,
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(backing_size, bcache.get_size())
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_device.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache, backing_device.get_effective_filesystem().filesystem_group
)
def test_create_bcache_with_virtual_block_devices(self):
"""Checks creation of a Bcache with virtual block devices for caching
and backing roles."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_size = 1000 ** 4
# A caching device that's ridiculously fast to read from, but slow for
# writing to it.
cache_device = RAID.objects.create_raid(
block_devices=[
factory.make_PhysicalBlockDevice(node=node, size=cache_size)
for _ in range(10)
],
level=FILESYSTEM_GROUP_TYPE.RAID_1,
).virtual_device
cache_set = factory.make_CacheSet(block_device=cache_device)
# A ridiculously reliable backing store.
backing_device = RAID.objects.create_raid(
block_devices=[
factory.make_PhysicalBlockDevice(node=node, size=backing_size)
for _ in range(12)
], # 10 data devices, 2 checksum devices.
level=FILESYSTEM_GROUP_TYPE.RAID_6,
).virtual_device
bcache = Bcache.objects.create_bcache(
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEAROUND,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(
(10 * backing_size) - RAID_SUPERBLOCK_OVERHEAD, bcache.get_size()
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_CACHE,
cache_device.get_effective_filesystem().fstype,
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_device.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache, backing_device.get_effective_filesystem().filesystem_group
)
def test_create_bcache_with_partitions(self):
"""Checks creation of a Bcache with partitions for caching and backing
roles."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_size = 1000 ** 4
cache_partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=cache_size
)
).add_partition()
cache_set = factory.make_CacheSet(partition=cache_partition)
backing_partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
).add_partition()
uuid = str(uuid4())
bcache = Bcache.objects.create_bcache(
name="bcache0",
uuid=uuid,
cache_set=cache_set,
backing_partition=backing_partition,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(backing_partition.size, bcache.get_size())
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_CACHE,
cache_partition.get_effective_filesystem().fstype,
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_partition.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache,
backing_partition.get_effective_filesystem().filesystem_group,
)
def test_create_bcache_with_block_devices_and_partition(self):
"""Checks creation of a Bcache with a partition for caching and a
physical block device for backing."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_size = 1000 ** 4
cache_partition = factory.make_PartitionTable(
block_device=factory.make_PhysicalBlockDevice(
node=node, size=cache_size
)
).add_partition()
cache_set = factory.make_CacheSet(partition=cache_partition)
backing_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
uuid = str(uuid4())
bcache = Bcache.objects.create_bcache(
name="bcache0",
uuid=uuid,
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
# Verify the filesystems were properly created on the target devices
self.assertEqual(backing_size, bcache.get_size())
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_CACHE,
cache_partition.get_effective_filesystem().fstype,
)
self.assertEqual(
FILESYSTEM_TYPE.BCACHE_BACKING,
backing_device.get_effective_filesystem().fstype,
)
self.assertEqual(cache_set, bcache.cache_set)
self.assertEqual(
bcache, backing_device.get_effective_filesystem().filesystem_group
)
def test_delete_bcache(self):
"""Ensures deletion of a bcache also deletes bcache filesystems from
caching and backing devices."""
node = factory.make_Node()
backing_size = 10 * 1000 ** 4
cache_set = factory.make_CacheSet(node=node)
backing_device = factory.make_PhysicalBlockDevice(
node=node, size=backing_size
)
bcache = Bcache.objects.create_bcache(
cache_set=cache_set,
backing_device=backing_device,
cache_mode=CACHE_MODE_TYPE.WRITEBACK,
)
bcache.delete()
# Verify both filesystems were deleted.
self.assertIsNone(backing_device.get_effective_filesystem())
# Verify the cache_set is not deleted.
self.assertIsNotNone(reload_object(cache_set))
| agpl-3.0 | 4,331,299,812,725,828,600 | 36.123395 | 79 | 0.574106 | false |
MGEScan/mgescan | mgescan/utils.py | 1 | 1147 | import time
import os, errno
import subprocess as sub
def get_abspath(path):
try:
return os.path.abspath(path)
except:
# print [DEBUG] Failed to convert a path to an absolute path
return path
def create_directory(path, skipifexists=True):
if not os.path.exists(path):
os.makedirs(path)
else:
if skipifexists:
new_path = path + ".1"
return create_directory(new_path, skipifexists)
return get_abspath(path)
def exists(path):
try:
return os.path.exists(path)
except:
return False
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
def cmd_exists(cmd):
return sub.call(["which", cmd], stdout=sub.PIPE, stderr=sub.PIPE) == 0
def check_cmd(cmd):
if not cmd_exists(cmd):
print "=" * 50
print "[Error] " + cmd + " is not found. "
print "=" * 50
time.sleep(3)
| gpl-3.0 | 8,974,377,971,734,298,000 | 25.674419 | 79 | 0.61116 | false |
Subsets and Splits