repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
trondhindenes/ansible-modules-core
|
network/nxos/nxos_snmp_contact.py
|
20
|
11922
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_snmp_contact
version_added: "2.2"
short_description: Manages SNMP contact info.
description:
- Manages SNMP contact information.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- C(state=absent) removes the contact configuration if it is configured.
options:
contact:
description:
- Contact information.
required: true
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp contact is configured
- nxos_snmp_contact:
contact: Test
state: present
host: {{ inventory_hostname }}
username: {{ un }}
password: {{ pwd }}
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"contact": "New_Test"}
existing:
description: k/v pairs of existing snmp contact
type: dict
sample: {"contact": "Test"}
end_state:
description: k/v pairs of snmp contact after module execution
returned: always
type: dict
sample: {"contact": "New_Test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server contact New_Test"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_contact(module):
contact = {}
contact_regex = '.*snmp-server\scontact\s(?P<contact>\S+).*'
command = 'show run snmp'
body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
try:
match_contact = re.match(contact_regex, body, re.DOTALL)
group_contact = match_contact.groupdict()
contact['contact'] = group_contact["contact"]
except AttributeError:
contact = {}
return contact
def main():
argument_spec = dict(
contact=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present')
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
contact = module.params['contact']
state = module.params['state']
existing = get_snmp_contact(module)
changed = False
proposed = dict(contact=contact)
end_state = existing
commands = []
if state == 'absent':
if existing and existing['contact'] == contact:
commands.append('no snmp-server contact')
elif state == 'present':
if not existing or existing['contact'] != contact:
commands.append('snmp-server contact {0}'.format(contact))
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
end_state = get_snmp_contact(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,855,997,090,906,414,000 | 3,069,950,891,068,613,600 | 29.030227 | 82 | 0.585221 | false |
daniponi/django
|
tests/custom_pk/models.py
|
282
|
1272
|
# -*- coding: utf-8 -*-
"""
Using a custom primary key
By default, Django adds an ``"id"`` field to each model. But you can override
this behavior by explicitly adding ``primary_key=True`` to a field.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from .fields import MyAutoField
@python_2_unicode_compatible
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column='code')
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
class Meta:
ordering = ('last_name', 'first_name')
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Business(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = 'businesses'
def __str__(self):
return self.name
@python_2_unicode_compatible
class Bar(models.Model):
id = MyAutoField(primary_key=True, db_index=True)
def __str__(self):
return repr(self.pk)
class Foo(models.Model):
bar = models.ForeignKey(Bar, models.CASCADE)
|
bsd-3-clause
| 6,800,935,809,634,790,000 | 631,986,306,915,640,200 | 23.941176 | 77 | 0.688679 | false |
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/django/contrib/gis/geos/base.py
|
197
|
1660
|
from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
|
bsd-3-clause
| -4,153,868,892,588,374,000 | -2,518,478,325,442,564,600 | 29.740741 | 94 | 0.654819 | false |
kwilliams-mo/iris
|
lib/iris/tests/test_plot.py
|
1
|
32122
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coords as coords
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
import iris.tests.stock
import iris.tests.test_mapping as test_mapping
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord('time').guess_bounds()
return cube
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord('time')
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
cube.remove_coord('time')
self._check(cube)
@iris.tests.skip_data
class TestMissingCS(tests.GraphicsTest):
@iris.tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines()
self.check_graphic()
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=['level_height', 'grid_longitude'])
self.check_graphic()
plt_method(self.cube, coords=['grid_longitude', 'level_height'])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=['grid_longitude', 'altitude'])
self.check_graphic()
plt_method(self.cube, coords=['altitude', 'grid_longitude'])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ['altitude', 'grid_longitude']
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord('sigma'), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(self.cube1d.coord('sigma'),
self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord([0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name='longitude',
units='degrees_north')
lat = iris.coords.AuxCoord([45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name='latitude',
units='degrees_north')
self.draw_method(lon, lat)
plt.gca().coastlines()
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename('some phenomenon')
cube2.rename('some other phenomenon')
cube1.units = iris.unit.Unit('no_unit')
cube2.units = iris.unit.Unit('no_unit')
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord('time'), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_coords_deprecated(self):
# ensure a warning is raised if the old coords keyword argument is
# used, and make sure the plot produced is consistent with the old
# interface
msg = 'Missing deprecation warning for coords keyword.'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.draw_method(self.cube1d, coords=['sigma'])
self.assertEqual(len(w), 1, msg)
self.check_graphic()
def test_coords_deprecation_too_many(self):
# in deprecation mode, too many coords is an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d, coords=['sigma', 'sigma'])
def test_coords_deprecation_invalid_span(self):
# in deprecation mode, a coordinate that doesn't span data is an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d, coords=['time'])
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord('longitude')
y = self.cube.coord('height')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord('longitude')
y = self.cube.coord('latitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
plt.gca().coastlines()
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord('latitude')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord('height')
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Rel Humidity')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord('height')[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord('model_level_number')
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = qplt.scatter
@iris.tests.skip_data
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube.coord('depth'), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord('depth'))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'testing',
'small_theta_colpex.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord('time')
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord('forecast_period')
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name='forecast_period',
units=iris.unit.Unit('hours')
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(('PP', 'COLPEX', 'small_eastward_wind.pp'))
wind = iris.load_cube(path, 'eastward_wind')
# Remove bounds from all coords that have them.
wind.coord('grid_latitude').bounds = None
wind.coord('grid_longitude').bounds = None
wind.coord('level_height').bounds = None
wind.coord('sigma').bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('time')
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('forecast_period')
return cube
class SliceMixin(object):
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@iris.tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@iris.tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@iris.tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@iris.tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, '__warningregistry__'):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
new_key = '_'.join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError('A attribute called {!r} '
'already exists.'.format(new_key))
def override_with_decorated_methods(attr_dict, target_dict,
decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(base.__dict__, local,
ignore_warnings)
return type.__new__(cls, name, bases, local)
@iris.tests.skip_data
class TestPcolorNoBounds(tests.GraphicsTest, SliceMixin):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
__metaclass__ = CheckForWarningsMetaclass
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@iris.tests.skip_data
class TestPcolormeshNoBounds(tests.GraphicsTest, SliceMixin):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
__metaclass__ = CheckForWarningsMetaclass
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
class Slice1dMixin(object):
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel('Phenomenon time')
self.check_graphic()
@iris.tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@iris.tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr(object):
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@iris.tests.skip_data
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('PP', 'COLPEX',
'theta_and_orog_subset.pp'))
self.cube = load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.plot
self.contourf = LambdaStr('iris.plot.contourf',
lambda cube, *args, **kwargs:
iris.plot.contourf(cube, *args, **kwargs))
self.contour = LambdaStr('iris.plot.contour',
lambda cube, *args, **kwargs:
iris.plot.contour(cube, *args, **kwargs))
self.points = LambdaStr('iris.plot.points',
lambda cube, *args, **kwargs:
iris.plot.points(cube, c=cube.data,
*args, **kwargs))
self.plot = LambdaStr('iris.plot.plot',
lambda cube, *args, **kwargs:
iris.plot.plot(cube, *args, **kwargs))
self.results = {'yx': ([self.contourf, ['grid_latitude',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'grid_latitude']],
[self.contour, ['grid_latitude',
'grid_longitude']],
[self.contour, ['grid_longitude',
'grid_latitude']],
[self.points, ['grid_latitude',
'grid_longitude']],
[self.points, ['grid_longitude',
'grid_latitude']],),
'zx': ([self.contourf, ['model_level_number',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'model_level_number']],
[self.contour, ['model_level_number',
'grid_longitude']],
[self.contour, ['grid_longitude',
'model_level_number']],
[self.points, ['model_level_number',
'grid_longitude']],
[self.points, ['grid_longitude',
'model_level_number']],),
'tx': ([self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],),
'x': ([self.plot, ['grid_longitude']],),
'y': ([self.plot, ['grid_latitude']],)
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, coords in results:
draw_method(cube, coords=coords)
try:
self.check_graphic()
except AssertionError, err:
self.fail('Draw method %r failed with coords: %r. '
'Assertion message: %s' % (draw_method, coords, err))
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, coords in results:
draw_method(cube.coord(coords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = 'Draw method {!r} failed with coords: {!r}. ' \
'Assertion message: {!s}'
self.fail(msg.format(draw_method, coords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results['yx'])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results['zx'])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results['tx'])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results['x'])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results['y'])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, 'contourf')
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude'])
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude',
'grid_latitude'])
self.assertRaises(iris.exceptions.CoordinateNotFoundError, draw_fn,
cube, coords=['grid_longitude', 'wibble'])
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(pts, standard_name='model_level_number',
attributes={'positive': 'up'})
self.draw('contourf', cube, coords=['grid_latitude', x])
@iris.tests.skip_data
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('NetCDF', 'rotated', 'xy',
'rotPole_landAreaFraction.nc'))
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines()
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord('grid_latitude')
rlon = self.cube.coord('grid_longitude')
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines()
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord('latitude')
lon = self.cube.coord('longitude')
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines()
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=['grid_longitude', 'grid_latitude'])
plt.gca().coastlines()
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=['longitude', 'latitude'])
plt.gca().coastlines()
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=['grid_latitude', 'grid_longitude'])
self.check_graphic()
iplt.contourf(self.cube, coords=['latitude', 'longitude'])
self.check_graphic()
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(range(10), [0] * 10, [iris.symbols.CLOUD_COVER[i]
for i in range(10)], 0.375)
self.check_graphic()
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(coords.AuxCoord(points=cube.data,
standard_name='latitude',
units='degrees'), [0, 1])
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array([lat.points, lat.points + 1,
lat.points + 2, lat.points + 3]).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord('latitude')
lon = cube.coord('longitude')
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
@iris.tests.skip_data
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
|
gpl-3.0
| 1,296,956,660,698,751,500 | -2,428,364,700,187,651,000 | 34.221491 | 79 | 0.578731 | false |
google/orchestra
|
orchestra/google/marketing_platform/operators/display_video_360.py
|
1
|
18902
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import csv
import os
from random import randint
import tempfile
import time
from urllib.parse import urlparse
import requests
from airflow import models
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.contrib.hooks.bigquery_hook import BigQueryBaseCursor
from airflow.models import BaseOperator
from orchestra.google.marketing_platform.hooks.display_video_360 import (
GoogleDisplayVideo360Hook
)
from orchestra.google.marketing_platform.utils import erf_utils
from orchestra.google.marketing_platform.utils.schema.sdf import (
SDF_VERSIONED_SCHEMA_TYPES
)
logger = logging.getLogger(__name__)
class GoogleDisplayVideo360CreateReportOperator(BaseOperator):
"""Creates and runs a new Display & Video 360 query.
Attributes:
report: The query body to create the report from. (templated)
Can receive a json string representing the report or reference to a
template file. Template references are recognized by a string ending in
'.json'.
api_version: The DV360 API version.
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
XComs:
query_id: The query ID for the report created.
"""
template_fields = ['params', 'report']
template_ext = ['.json']
def __init__(self,
report,
api_version='v1',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360CreateReportOperator, self).__init__(*args, **kwargs)
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.hook = None
def execute(self, context):
if self.hook is None:
self.hook = GoogleDisplayVideo360Hook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
report_body = json.loads(self.report)
request = self.hook.get_service().queries().createquery(body=report_body)
response = request.execute()
context['task_instance'].xcom_push('query_id', response['queryId'])
class GoogleDisplayVideo360RunReportOperator(BaseOperator):
"""Runs a stored query to generate a report.
Attributes:
api_version: The DV360 API version.
query_id: The ID of the query to run. (templated)
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
"""
template_fields = ['query_id']
def __init__(self,
query_id,
api_version='v1',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360RunReportOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.service = None
self.query_id = query_id
def execute(self, context):
if self.service is None:
hook = GoogleDisplayVideo360Hook(
api_version=self.api_version,
gcp_conn_id=self.conn_id,
delegate_to=self.delegate_to
)
self.service = hook.get_service()
request = self.service.queries().runquery(
queryId=self.query_id, body={})
request.execute()
class GoogleDisplayVideo360DownloadReportOperator(BaseOperator):
"""Downloads a Display & Video 360 report into Google Cloud Storage.
Attributes:
report_url: The Google Cloud Storage url where the latest report is stored.
(templated)
destination_bucket: The destination Google cloud storage bucket where the
report should be written to. (templated)
destination_object: The destination name of the object in the destination
Google cloud storage bucket. (templated)
If the destination points to an existing folder, the report will be
written under the specified folder.
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
XComs:
destination_bucket: The Google cloud storage bucket the report was written
to.
destination_object: The Google cloud storage URI for the report.
"""
template_fields = ['report_url', 'destination_bucket', 'destination_object']
def __init__(self,
report_url,
destination_bucket,
destination_object=None,
chunk_size=5 * 1024 * 1024,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360DownloadReportOperator, self).__init__(*args, **kwargs)
self.report_url = report_url
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.chunk_size = chunk_size
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.hook = None
@staticmethod
def _download_report(source_url, destination_file, chunk_size):
response = requests.head(source_url)
content_length = int(response.headers['Content-Length'])
start_byte = 0
while start_byte < content_length:
end_byte = start_byte + chunk_size - 1
if end_byte >= content_length:
end_byte = content_length - 1
headers = {'Range': 'bytes=%s-%s' % (start_byte, end_byte)}
response = requests.get(source_url, stream=True, headers=headers)
chunk = response.raw.read()
destination_file.write(chunk)
start_byte = end_byte + 1
destination_file.close()
@staticmethod
def _get_destination_uri(destination_object, report_url):
report_file_name = urlparse(report_url).path.split('/')[2]
if destination_object is None:
return report_file_name
if destination_object.endswith('/'):
return destination_object + report_file_name
return destination_object
def execute(self, context):
if self.hook is None:
self.hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
# TODO(efolgar): Directly stream to storage instead of temp file
self._download_report(self.report_url, temp_file, self.chunk_size)
destination_object_name = self._get_destination_uri(
self.destination_object, self.report_url)
self.hook.upload(
bucket=self.destination_bucket,
object=destination_object_name,
filename=temp_file.name,
multipart=True)
context['task_instance'].xcom_push(
'destination_bucket', self.destination_bucket)
context['task_instance'].xcom_push(
'destination_object', destination_object_name)
finally:
temp_file.close()
os.unlink(temp_file.name)
class GoogleDisplayVideo360DeleteReportOperator(BaseOperator):
"""Deletes Display & Video 360 queries and any associated reports.
Attributes:
api_version: The DV360 API version.
query_id: The DV360 query id to delete. (templated)
query_title: The DV360 query title to delete. (templated)
Any query with a matching title will be deleted.
ignore_if_missing: If True, return success even if the query is missing.
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
"""
template_fields = ['query_id', 'query_title']
ui_color = '#ffd1dc'
def __init__(self,
api_version='v1',
query_id=None,
query_title=None,
ignore_if_missing=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360DeleteReportOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.query_id = query_id
self.query_title = query_title
self.ignore_if_missing = ignore_if_missing
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.hook = None
def execute(self, context):
if self.hook is None:
self.hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
delegate_to=self.delegate_to)
if self.query_id is not None:
self.hook.deletequery(
self.query_id,
ignore_if_missing=self.ignore_if_missing)
if self.query_title is not None:
self.hook.deletequeries(
self.query_title,
ignore_if_missing=self.ignore_if_missing)
class GoogleDisplayVideo360ERFToBigQueryOperator(BaseOperator):
"""Upload Multiple Entity Read Files to specified big query dataset.
"""
def __init__(self,
gcp_conn_id='google_cloud_default',
report_body=None,
yesterday=False,
entity_type=None,
file_creation_date=None,
cloud_project_id=None,
bq_table=None,
schema=None,
gcs_bucket=None,
erf_bucket=None,
partner_ids=[],
write_disposition='WRITE_TRUNCATE',
*args,
**kwargs):
super(GoogleDisplayVideo360ERFToBigQueryOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.service = None
self.bq_hook = None
self.gcs_hook = None
self.report_body = report_body
self.erf_bucket = erf_bucket
self.yesterday = yesterday
self.cloud_project_id = cloud_project_id
self.bq_table = bq_table
self.gcs_bucket = gcs_bucket
self.schema = schema
self.entity_type = entity_type
self.erf_object = 'entity/%s.0.%s.json' % (file_creation_date, entity_type)
self.partner_ids = partner_ids
self.write_disposition = write_disposition
self.file_creation_date = file_creation_date
def execute(self, context):
if self.gcs_hook is None:
self.gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id)
if self.bq_hook is None:
self.bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id)
for i, partner_id in enumerate(self.partner_ids):
filename = erf_utils.download_and_transform_erf(self, partner_id)
entity_read_file_ndj = 'gs://%s/%s' % (self.gcs_bucket, filename)
if i > 0:
self.write_disposition = 'WRITE_APPEND'
bq_base_cursor = self.bq_hook.get_conn().cursor()
bq_base_cursor.run_load(
destination_project_dataset_table=self.bq_table,
schema_fields=self.schema,
source_uris=[entity_read_file_ndj],
source_format='NEWLINE_DELIMITED_JSON',
write_disposition=self.write_disposition)
self.gcs_hook.delete(self.gcs_bucket, filename)
class GoogleDisplayVideo360SDFToBigQueryOperator(BaseOperator):
"""Make a request to SDF API and upload the data to BQ."""
DEFAULT_SDF_TABLE_NAMES = {
'LINE_ITEM': 'SDFLineItem',
'AD_GROUP': 'SDFAdGroup',
'AD': 'SDFAd',
'INSERTION_ORDER': 'SDFInsertionOrder',
'CAMPAIGN': 'SDFCampaign'
}
SDF_API_RESPONSE_KEYS = {
'LINE_ITEM': 'lineItems',
'AD_GROUP': 'adGroups',
'AD': 'ads',
'INSERTION_ORDER': 'insertionOrders',
'CAMPAIGN': 'campaigns'
}
def __init__(self,
gcp_conn_id='google_cloud_default',
gcs_bucket=None,
schema=None,
bq_dataset=None,
write_disposition=None,
cloud_project_id=None,
file_types=None,
filter_ids=None,
api_version=None,
filter_type=None,
table_names=DEFAULT_SDF_TABLE_NAMES,
sdf_api_response_keys=SDF_API_RESPONSE_KEYS,
*args,
**kwargs):
super(GoogleDisplayVideo360SDFToBigQueryOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.service = None
self.hook = None
self.bq_hook = None
self.gcs_hook = None
self.gcs_bucket = gcs_bucket
self.schema = schema
self.bq_dataset = bq_dataset
self.write_disposition = write_disposition
self.cloud_project_id = cloud_project_id
self.file_types = file_types
self.filter_ids = filter_ids
self.api_version = api_version
self.filter_type = filter_type
self.table_names = table_names
self.sdf_api_response_keys = sdf_api_response_keys
def execute(self, context):
if self.hook is None:
self.hook = GoogleDisplayVideo360Hook(gcp_conn_id=self.gcp_conn_id)
if self.bq_hook is None:
self.bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id)
if self.gcs_hook is None:
self.gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.gcp_conn_id)
request_body = {'fileTypes': self.file_types, 'filterType': self.filter_type, 'filterIds': self.filter_ids,
'version': self.api_version}
logger.info('Request body: %s ' % request_body)
request = self.hook.get_service().sdf().download(body=request_body)
response = request.execute()
for file_type in self.file_types:
temp_file = None
try:
logger.info('Uploading SDF to GCS')
temp_file = tempfile.NamedTemporaryFile(delete=False)
response_key = self.sdf_api_response_keys.get(file_type)
temp_file.write(response[response_key].encode('utf-8'))
temp_file.close()
filename = '%d_%s_%s_%s.json' % (time.time() * 1e+9, randint(
1, 1000000), response_key, 'sdf')
self.gcs_hook.upload(self.gcs_bucket, filename, temp_file.name)
logger.info('SDF upload to GCS complete')
finally:
if temp_file:
temp_file.close()
os.unlink(temp_file.name)
sdf_file = 'gs://%s/%s' % (self.gcs_bucket, filename)
bq_table = self.table_names.get(file_type)
bq_table = '%s.%s' % (self.bq_dataset, bq_table)
schema = SDF_VERSIONED_SCHEMA_TYPES.get(self.api_version).get(file_type)
try:
bq_base_cursor = self.bq_hook.get_conn().cursor()
logger.info('Uploading SDF to BigQuery')
bq_base_cursor.run_load(
destination_project_dataset_table=bq_table,
schema_fields=schema,
source_uris=[sdf_file],
source_format='CSV',
skip_leading_rows=1,
write_disposition=self.write_disposition)
finally:
logger.info('Deleting SDF from GCS')
self.gcs_hook.delete(self.gcs_bucket, filename)
class GoogleDisplayVideo360RecordSDFAdvertiserOperator(BaseOperator):
"""
Get Partner and Advertiser Ids from a report and populate an airflow variable.
"""
template_fields = ['report_url', 'variable_name']
def __init__(self,
report_url,
variable_name,
gcp_conn_id='google_cloud_default',
*args,
**kwargs):
super(GoogleDisplayVideo360RecordSDFAdvertiserOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.service = None
self.report_url = report_url
self.variable_name = variable_name
def execute(self, context):
try:
report_file = tempfile.NamedTemporaryFile(delete=False)
file_download = requests.get(self.report_url, stream=True)
for chunk in file_download.iter_content(chunk_size=1024 * 1024):
report_file.write(chunk)
report_file.close()
advertisers = {}
with open(report_file.name, 'r') as f:
csv_reader = csv.DictReader(f)
for line in csv_reader:
advertiser_id = line["Advertiser ID"]
partner_id = line["Partner ID"]
if advertiser_id.strip():
try:
advertisers[partner_id].append(advertiser_id)
message = 'ADDING to key %s new advertiser %s' % (
partner_id, advertiser_id)
logger.info(message)
except KeyError:
advertisers[partner_id] = [advertiser_id]
message = 'CREATING new key %s with advertiser %s' % (
partner_id, advertiser_id)
logger.info(message)
else:
break
models.Variable.set(self.variable_name, json.dumps(advertisers))
finally:
if report_file:
report_file.close()
os.unlink(report_file.name)
|
apache-2.0
| -8,249,547,854,776,020,000 | -476,862,092,144,933,060 | 37.185859 | 115 | 0.585811 | false |
gilneidp/FinalProject
|
ALL_FILES/pox/misc/pidfile.py
|
44
|
2096
|
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Component to create PID files for running POX as a service
"""
from pox.core import core
import os
import atexit
_files = set()
_first_init = False
def _del_pidfiles ():
if not _files: return
try:
msg = "Cleaning up %i pidfile" % (len(_files),)
if len(_files) != 1: msg += 's'
log.debug(msg)
except:
pass
for f in list(_files):
shortname = f
if os.path.abspath(os.path.basename(f)) == f:
shortname = os.path.basename(f)
try:
os.remove(f)
except:
msg = "Couldn't delete pidfile '%s'" % (shortname,)
try:
log.exception(msg)
except:
print(msg)
_files.remove(f)
def _handle_DownEvent (event):
_del_pidfiles()
def launch (file, force = False, __INSTANCE__ = None):
global log
log = core.getLogger()
absfile = os.path.abspath(file)
if absfile in _files:
log.warn("pidfile '%s' specified multiple times", file)
return
global _first_init
if not _first_init:
try:
atexit.register(_del_pidfiles)
except:
log.info('atexit not available')
core.addListenerByName("DownEvent", _handle_DownEvent)
_first_init = True
if os.path.exists(absfile) and not force:
log.error("Aborting startup: pidfile '%s' exists "
"(use --force to override)", file)
return False
try:
f = open(absfile, 'w')
f.write("%s\n" % (os.getpid(),))
except:
log.exception("Failed to create pidfile '%s'", file)
return False
f.close()
_files.add(absfile)
|
mit
| -7,180,825,116,749,087,000 | 8,276,660,549,994,502,000 | 22.550562 | 74 | 0.650286 | false |
caot/intellij-community
|
python/lib/Lib/encodings/cp1140.py
|
593
|
13361
|
""" Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1140',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\u20ac' # 0x9F -> EURO SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
| -6,658,461,084,529,901,000 | 4,425,250,346,500,665,300 | 42.521173 | 103 | 0.528029 | false |
numerigraphe/odoo
|
addons/sale_mrp/tests/__init__.py
|
262
|
1085
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_move_explode
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -2,785,700,287,957,324,300 | -527,356,294,189,384,800 | 44.208333 | 78 | 0.619355 | false |
datalogics-robb/scons
|
bin/SConsDoc.py
|
2
|
9625
|
#!/usr/bin/env python
#
# Module for handling SCons documentation processing.
#
__doc__ = """
This module parses home-brew XML files that document various things
in SCons. Right now, it handles Builders, construction variables,
and Tools, but we expect it to get extended in the future.
In general, you can use any DocBook tag in the input, and this module
just adds processing various home-brew tags to try to make life a
little easier.
Builder example:
<builder name="VARIABLE">
<summary>
This is the summary description of an SCons Tool.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any builder may be interpolated
anywhere in the document by specifying the
&b-VARIABLE;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</builder>
Construction variable example:
<cvar name="VARIABLE">
<summary>
This is the summary description of a construction variable.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any construction variable may be interpolated
anywhere in the document by specifying the
&t-VARIABLE;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</cvar>
Tool example:
<tool name="VARIABLE">
<summary>
This is the summary description of an SCons Tool.
It will get placed in the man page,
and in the appropriate User's Guide appendix.
The name of any tool may be interpolated
anywhere in the document by specifying the
&t-VARIABLE;
element. It need not be on a line by itself.
Unlike normal XML, blank lines are significant in these
descriptions and serve to separate paragraphs.
They'll get replaced in DocBook output with appropriate tags
to indicate a new paragraph.
<example>
print "this is example code, it will be offset and indented"
</example>
</summary>
</tool>
"""
import os.path
import imp
import sys
import xml.sax.handler
class Item:
def __init__(self, name):
self.name = name
self.sort_name = name.lower()
if self.sort_name[0] == '_':
self.sort_name = self.sort_name[1:]
self.summary = []
self.sets = None
self.uses = None
def cmp_name(self, name):
if name[0] == '_':
name = name[1:]
return name.lower()
def __cmp__(self, other):
return cmp(self.sort_name, other.sort_name)
class Builder(Item):
pass
class Tool(Item):
def __init__(self, name):
Item.__init__(self, name)
self.entity = self.name.replace('+', 'X')
class ConstructionVariable(Item):
pass
class Chunk:
def __init__(self, tag, body=None):
self.tag = tag
if not body:
body = []
self.body = body
def __str__(self):
body = ''.join(self.body)
return "<%s>%s</%s>\n" % (self.tag, body, self.tag)
def append(self, data):
self.body.append(data)
class Summary:
def __init__(self):
self.body = []
self.collect = []
def append(self, data):
self.collect.append(data)
def end_para(self):
text = ''.join(self.collect)
paras = text.split('\n\n')
if paras == ['\n']:
return
if paras[0] == '':
self.body.append('\n')
paras = paras[1:]
paras[0] = '\n' + paras[0]
if paras[-1] == '':
paras = paras[:-1]
paras[-1] = paras[-1] + '\n'
last = '\n'
else:
last = None
sep = None
for p in paras:
c = Chunk("para", p)
if sep:
self.body.append(sep)
self.body.append(c)
sep = '\n'
if last:
self.body.append(last)
def begin_chunk(self, chunk):
self.end_para()
self.collect = chunk
def end_chunk(self):
self.body.append(self.collect)
self.collect = []
class SConsDocHandler(xml.sax.handler.ContentHandler,
xml.sax.handler.ErrorHandler):
def __init__(self):
self._start_dispatch = {}
self._end_dispatch = {}
keys = self.__class__.__dict__.keys()
start_tag_method_names = filter(lambda k: k[:6] == 'start_', keys)
end_tag_method_names = filter(lambda k: k[:4] == 'end_', keys)
for method_name in start_tag_method_names:
tag = method_name[6:]
self._start_dispatch[tag] = getattr(self, method_name)
for method_name in end_tag_method_names:
tag = method_name[4:]
self._end_dispatch[tag] = getattr(self, method_name)
self.stack = []
self.collect = []
self.current_object = []
self.builders = {}
self.tools = {}
self.cvars = {}
def startElement(self, name, attrs):
try:
start_element_method = self._start_dispatch[name]
except KeyError:
self.characters('<%s>' % name)
else:
start_element_method(attrs)
def endElement(self, name):
try:
end_element_method = self._end_dispatch[name]
except KeyError:
self.characters('</%s>' % name)
else:
end_element_method()
#
#
def characters(self, chars):
self.collect.append(chars)
def begin_collecting(self, chunk):
self.collect = chunk
def end_collecting(self):
self.collect = []
def begin_chunk(self):
pass
def end_chunk(self):
pass
#
#
#
def begin_xxx(self, obj):
self.stack.append(self.current_object)
self.current_object = obj
def end_xxx(self):
self.current_object = self.stack.pop()
#
#
#
def start_scons_doc(self, attrs):
pass
def end_scons_doc(self):
pass
def start_builder(self, attrs):
name = attrs.get('name')
try:
builder = self.builders[name]
except KeyError:
builder = Builder(name)
self.builders[name] = builder
self.begin_xxx(builder)
def end_builder(self):
self.end_xxx()
def start_tool(self, attrs):
name = attrs.get('name')
try:
tool = self.tools[name]
except KeyError:
tool = Tool(name)
self.tools[name] = tool
self.begin_xxx(tool)
def end_tool(self):
self.end_xxx()
def start_cvar(self, attrs):
name = attrs.get('name')
try:
cvar = self.cvars[name]
except KeyError:
cvar = ConstructionVariable(name)
self.cvars[name] = cvar
self.begin_xxx(cvar)
def end_cvar(self):
self.end_xxx()
def start_summary(self, attrs):
summary = Summary()
self.current_object.summary = summary
self.begin_xxx(summary)
self.begin_collecting(summary)
def end_summary(self):
self.current_object.end_para()
self.end_xxx()
def start_example(self, attrs):
example = Chunk("programlisting")
self.current_object.begin_chunk(example)
def end_example(self):
self.current_object.end_chunk()
def start_uses(self, attrs):
self.begin_collecting([])
def end_uses(self):
self.current_object.uses = ''.join(self.collect).split()
self.current_object.uses.sort()
self.end_collecting()
def start_sets(self, attrs):
self.begin_collecting([])
def end_sets(self):
self.current_object.sets = ''.join(self.collect).split()
self.current_object.sets.sort()
self.end_collecting()
# Stuff for the ErrorHandler portion.
def error(self, exception):
linenum = exception._linenum - self.preamble_lines
sys.stderr.write('%s:%d:%d: %s (error)\n' % (self.filename, linenum, exception._colnum, ''.join(exception.args)))
def fatalError(self, exception):
linenum = exception._linenum - self.preamble_lines
sys.stderr.write('%s:%d:%d: %s (fatalError)\n' % (self.filename, linenum, exception._colnum, ''.join(exception.args)))
def set_file_info(self, filename, preamble_lines):
self.filename = filename
self.preamble_lines = preamble_lines
# lifted from Ka-Ping Yee's way cool pydoc module.
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except ImportError, e:
sys.stderr.write("Could not import %s: %s\n" % (path, e))
return None
file.close()
return module
|
mit
| -7,760,240,495,900,307,000 | 909,267,900,168,522,900 | 28.166667 | 126 | 0.589299 | false |
TyberiusPrime/pysam
|
tests/tabix_test.py
|
2
|
43143
|
#!/usr/bin/env python
'''unit testing code for pysam.
Execute in the :file:`tests` directory as it requires the Makefile
and data files located there.
'''
import sys
import os
import shutil
import gzip
import pysam
import unittest
import glob
import re
import copy
from TestUtils import checkURL
DATADIR = 'tabix_data'
IS_PYTHON3 = sys.version_info[0] >= 3
def myzip_open(infile, mode="r"):
'''open compressed file and decode.'''
def _convert(f):
for l in f:
yield l.decode("ascii")
if IS_PYTHON3:
if mode == "r":
return _convert(gzip.open(infile, "r"))
else:
return gzip.open(mode)
def loadAndConvert(filename, encode=True):
'''load data from filename and convert all fields to string.
Filename can be either plain or compressed (ending in .gz).
'''
data = []
if filename.endswith(".gz"):
with gzip.open(filename) as inf:
for line in inf:
line = line.decode("ascii")
if line.startswith("#"):
continue
d = line.strip().split("\t")
data.append(d)
else:
with open(filename) as f:
for line in f:
if line.startswith("#"):
continue
d = line.strip().split("\t")
data.append(d)
return data
def splitToBytes(s):
'''split string and return list of bytes.'''
return [x.encode("ascii") for x in s.split("\t")]
def checkBinaryEqual(filename1, filename2):
'''return true if the two files are binary equal.'''
if os.path.getsize(filename1) != os.path.getsize(filename2):
return False
with open(filename1, "rb") as infile:
d1 = infile.read()
with open(filename2, "rb") as infile:
d2 = infile.read()
found = False
for c1, c2 in zip(d1, d2):
if c1 != c2:
break
else:
found = True
return found
class TestIndexing(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
filename_idx = os.path.join(DATADIR, "example.gtf.gz.tbi")
def setUp(self):
self.tmpfilename = "tmp_%i.gtf.gz" % id(self)
shutil.copyfile(self.filename, self.tmpfilename)
def testIndexPreset(self):
'''test indexing via preset.'''
pysam.tabix_index(self.tmpfilename, preset="gff")
checkBinaryEqual(self.tmpfilename + ".tbi", self.filename_idx)
def tearDown(self):
os.unlink(self.tmpfilename)
os.unlink(self.tmpfilename + ".tbi")
class TestCompression(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
filename_idx = os.path.join(DATADIR, "example.gtf.gz.tbi")
preset = "gff"
def setUp(self):
self.tmpfilename = "tmp_TestCompression_%i" % id(self)
with gzip.open(self.filename, "rb") as infile, \
open(self.tmpfilename, "wb") as outfile:
outfile.write(infile.read())
def testCompression(self):
'''see also issue 106'''
pysam.tabix_compress(self.tmpfilename, self.tmpfilename + ".gz")
checkBinaryEqual(self.tmpfilename, self.tmpfilename + ".gz")
def testIndexPresetUncompressed(self):
'''test indexing via preset.'''
pysam.tabix_index(self.tmpfilename, preset=self.preset)
# check if uncompressed file has been removed
self.assertEqual(os.path.exists(self.tmpfilename), False)
checkBinaryEqual(self.tmpfilename + ".gz", self.filename)
checkBinaryEqual(self.tmpfilename + ".gz.tbi", self.filename_idx)
def testIndexPresetCompressed(self):
'''test indexing via preset.'''
pysam.tabix_compress(self.tmpfilename, self.tmpfilename + ".gz")
pysam.tabix_index(self.tmpfilename + ".gz", preset=self.preset)
checkBinaryEqual(self.tmpfilename + ".gz", self.filename)
checkBinaryEqual(self.tmpfilename + ".gz.tbi", self.filename_idx)
def tearDown(self):
try:
os.unlink(self.tmpfilename)
os.unlink(self.tmpfilename + ".gz")
os.unlink(self.tmpfilename + ".gz.tbi")
except OSError:
pass
class TestCompressionSam(TestCompression):
filename = os.path.join(DATADIR, "example.sam.gz")
filename_index = os.path.join(DATADIR, "example.sam.gz.tbi")
preset = "sam"
class TestCompressionBed(TestCompression):
filename = os.path.join(DATADIR, "example.bed.gz")
filename_index = os.path.join(DATADIR, "example.bed.gz.tbi")
preset = "bed"
class TestCompressionVCF(TestCompression):
filename = os.path.join(DATADIR, "example.vcf.gz")
filename_index = os.path.join(DATADIR, "example.vcf.gz.tbi")
preset = "vcf"
class IterationTest(unittest.TestCase):
with_comments = False
def setUp(self):
lines = []
with gzip.open(self.filename, "rb") as inf:
for line in inf:
line = line.decode('ascii')
if line.startswith("#"):
if not self.with_comments:
continue
lines.append(line)
# creates index of contig, start, end, adds content without newline.
self.compare = [
(x[0][0], int(x[0][3]), int(x[0][4]), x[1])
for x in [(y.split("\t"), y[:-1]) for y in lines
if not y.startswith("#")]]
self.comments = [x[:-1] for x in lines if x.startswith("#")]
def getSubset(self, contig=None, start=None, end=None):
if contig is None:
# all lines
subset = [x[3] for x in self.compare]
else:
if start is not None and end is None:
# until end of contig
subset = [x[3]
for x in self.compare if x[0] == contig
and x[2] > start]
elif start is None and end is not None:
# from start of contig
subset = [x[3]
for x in self.compare if x[0] == contig
and x[1] <= end]
elif start is None and end is None:
subset = [x[3] for x in self.compare if x[0] == contig]
else:
# all within interval
subset = [x[3] for x in self.compare if x[0] == contig
and min(x[2], end) - max(x[1], start) > 0]
if self.with_comments:
subset.extend(self.comments)
return subset
def checkPairwise(self, result, ref):
'''check pairwise results.
'''
result.sort()
ref.sort()
a = set(result)
b = set(ref)
self.assertEqual(
len(result), len(ref),
"unexpected number of results: "
"result=%i, expected ref=%i, differences are %s: %s"
% (len(result), len(ref),
a.difference(b),
b.difference(a)))
for x, d in enumerate(list(zip(result, ref))):
self.assertEqual(
d[0], d[1],
"unexpected results in pair %i:\n'%s', expected\n'%s'" %
(x, d[0], d[1]))
class TestGZFile(IterationTest):
filename = os.path.join(DATADIR, "example.gtf.gz")
with_comments = True
def setUp(self):
IterationTest.setUp(self)
self.gzfile = pysam.GZIterator(self.filename)
def testAll(self):
result = list(self.gzfile)
ref = self.getSubset()
self.checkPairwise(result, ref)
class TestIterationWithoutComments(IterationTest):
'''test iterating with TabixFile.fetch() when
there are no comments in the file.'''
filename = os.path.join(DATADIR,
"example.gtf.gz")
def setUp(self):
IterationTest.setUp(self)
self.tabix = pysam.TabixFile(self.filename)
def tearDown(self):
self.tabix.close()
def testRegionStrings(self):
"""test if access with various region strings
works"""
self.assertEqual(218, len(list(
self.tabix.fetch("chr1"))))
self.assertEqual(218, len(list(
self.tabix.fetch("chr1", 1000))))
self.assertEqual(218, len(list(
self.tabix.fetch("chr1", end=1000000))))
self.assertEqual(218, len(list(
self.tabix.fetch("chr1", 1000, 1000000))))
def testAll(self):
result = list(self.tabix.fetch())
ref = self.getSubset()
self.checkPairwise(result, ref)
def testPerContig(self):
for contig in ("chr1", "chr2", "chr1", "chr2"):
result = list(self.tabix.fetch(contig))
ref = self.getSubset(contig)
self.checkPairwise(result, ref)
def testPerContigToEnd(self):
end = None
for contig in ("chr1", "chr2", "chr1", "chr2"):
for start in range(0, 200000, 1000):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testPerContigFromStart(self):
start = None
for contig in ("chr1", "chr2", "chr1", "chr2"):
for end in range(0, 200000, 1000):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testPerContig2(self):
start, end = None, None
for contig in ("chr1", "chr2", "chr1", "chr2"):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testPerInterval(self):
start, end = None, None
for contig in ("chr1", "chr2", "chr1", "chr2"):
for start in range(0, 200000, 2000):
for end in range(start, start + 2000, 500):
result = list(self.tabix.fetch(contig, start, end))
ref = self.getSubset(contig, start, end)
self.checkPairwise(result, ref)
def testInvalidIntervals(self):
# invalid intervals (start > end)
self.assertRaises(ValueError, self.tabix.fetch, "chr1", 0, -10)
self.assertRaises(ValueError, self.tabix.fetch, "chr1", 200, 0)
# out of range intervals
self.assertRaises(ValueError, self.tabix.fetch, "chr1", -10, 200)
self.assertRaises(ValueError, self.tabix.fetch, "chr1", -10, -20)
# unknown chromosome
self.assertRaises(ValueError, self.tabix.fetch, "chrUn")
# out of range access
# to be implemented
# self.assertRaises(IndexError, self.tabix.fetch, "chr1", 1000000, 2000000)
# raise no error for empty intervals
self.tabix.fetch("chr1", 100, 100)
def testGetContigs(self):
self.assertEqual(sorted(self.tabix.contigs), ["chr1", "chr2"])
# check that contigs is read-only
self.assertRaises(
AttributeError, setattr, self.tabix, "contigs", ["chr1", "chr2"])
def testHeader(self):
ref = []
with gzip.open(self.filename) as inf:
for x in inf:
x = x.decode("ascii")
if not x.startswith("#"):
break
ref.append(x[:-1].encode('ascii'))
header = list(self.tabix.header)
self.assertEqual(ref, header)
def testReopening(self):
'''test repeated opening of the same file.'''
def func1():
# opens any tabix file
with pysam.TabixFile(self.filename) as inf:
pass
for i in range(1000):
func1()
class TestIterationWithComments(TestIterationWithoutComments):
'''test iterating with TabixFile.fetch() when
there are comments in the file.
Tests will create plenty of warnings on stderr.
'''
filename = os.path.join(DATADIR, "example_comments.gtf.gz")
def setUp(self):
TestIterationWithoutComments.setUp(self)
class TestParser(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
def setUp(self):
self.tabix = pysam.TabixFile(self.filename)
self.compare = loadAndConvert(self.filename)
def tearDown(self):
self.tabix.close()
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
c = self.compare[x]
self.assertEqual(c, list(r))
self.assertEqual(len(c), len(r))
# test indexing
for y in range(0, len(r)):
self.assertEqual(c[y], r[y])
# test slicing access
for y in range(0, len(r) - 1):
for cc in range(y + 1, len(r)):
self.assertEqual(c[y:cc],
r[y:cc])
self.assertEqual("\t".join(map(str, c)),
str(r))
def testWrite(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
c = list(r)
for y in range(len(r)):
r[y] = "test_%05i" % y
c[y] = "test_%05i" % y
self.assertEqual([x for x in c], list(r))
self.assertEqual("\t".join(c), str(r))
# check second assignment
for y in range(len(r)):
r[y] = "test_%05i" % y
self.assertEqual([x for x in c], list(r))
self.assertEqual("\t".join(c), str(r))
def testUnset(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
c = list(r)
e = list(r)
for y in range(len(r)):
r[y] = None
c[y] = None
e[y] = ""
self.assertEqual(c, list(r))
self.assertEqual("\t".join(e), str(r))
def testIteratorCompressed(self):
'''test iteration from compressed file.'''
with gzip.open(self.filename) as infile:
for x, r in enumerate(pysam.tabix_iterator(
infile, pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
def testIteratorUncompressed(self):
'''test iteration from uncompressed file.'''
tmpfilename = 'tmp_testIteratorUncompressed'
with gzip.open(self.filename, "rb") as infile, \
open(tmpfilename, "wb") as outfile:
outfile.write(infile.read())
with open(tmpfilename) as infile:
for x, r in enumerate(pysam.tabix_iterator(
infile, pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
os.unlink(tmpfilename)
def testCopy(self):
a = self.tabix.fetch(parser=pysam.asTuple()).next()
b = copy.copy(a)
self.assertEqual(a, b)
a = self.tabix.fetch(parser=pysam.asGTF()).next()
b = copy.copy(a)
self.assertEqual(a, b)
class TestGTF(TestParser):
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asGTF())):
c = self.compare[x]
self.assertEqual(len(c), len(r))
self.assertEqual(list(c), list(r))
self.assertEqual(c, str(r).split("\t"))
self.assertTrue(r.gene_id.startswith("ENSG"))
if r.feature != 'gene':
self.assertTrue(r.transcript_id.startswith("ENST"))
self.assertEqual(c[0], r.contig)
self.assertEqual("\t".join(map(str, c)),
str(r))
def testSetting(self):
for r in self.tabix.fetch(parser=pysam.asGTF()):
r.contig = r.contig + "_test"
r.source = r.source + "_test"
r.feature = r.feature + "_test"
r.start += 10
r.end += 10
r.score = 20
r.strand = "+"
r.frame = 0
r.attributes = 'gene_id "0001";'
class TestIterators(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
iterator = pysam.tabix_generic_iterator
parser = pysam.asTuple
is_compressed = False
def setUp(self):
self.tabix = pysam.TabixFile(self.filename)
self.compare = loadAndConvert(self.filename)
self.tmpfilename_uncompressed = 'tmp_TestIterators'
with gzip.open(self.filename, "rb") as infile, \
open(self.tmpfilename_uncompressed, "wb") as outfile:
outfile.write(infile.read())
def tearDown(self):
self.tabix.close()
os.unlink(self.tmpfilename_uncompressed)
def open(self):
if self.is_compressed:
infile = gzip.open(self.filename)
else:
infile = open(self.tmpfilename_uncompressed)
return infile
def testIteration(self):
with self.open() as infile:
for x, r in enumerate(self.iterator(infile, self.parser())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
def testClosedFile(self):
'''test for error when iterating from closed file.'''
infile = self.open()
infile.close()
# iterating from a closed file should raise a value error
self.assertRaises(ValueError, self.iterator, infile, self.parser())
def testClosedFileIteration(self):
'''test for error when iterating from file that has been closed'''
infile = self.open()
i = self.iterator(infile, self.parser())
x = i.next()
infile.close()
# Not implemented
# self.assertRaises(ValueError, i.next)
class TestIteratorsGenericCompressed(TestIterators):
is_compressed = True
class TestIteratorsFileCompressed(TestIterators):
iterator = pysam.tabix_file_iterator
is_compressed = True
class TestIteratorsFileUncompressed(TestIterators):
iterator = pysam.tabix_file_iterator
is_compressed = False
class TestIterationMalformattedGTFFiles(unittest.TestCase):
'''test reading from malformatted gtf files.'''
parser = pysam.asGTF
iterator = pysam.tabix_generic_iterator
parser = pysam.asGTF
def testGTFTooManyFields(self):
with gzip.open(os.path.join(
DATADIR,
"gtf_toomany_fields.gtf.gz")) as infile:
iterator = self.iterator(
infile,
parser=self.parser())
self.assertRaises(ValueError, iterator.next)
def testGTFTooFewFields(self):
with gzip.open(os.path.join(
DATADIR,
"gtf_toofew_fields.gtf.gz")) as infile:
iterator = self.iterator(
infile,
parser=self.parser())
self.assertRaises(ValueError, iterator.next)
class TestBed(unittest.TestCase):
filename = os.path.join(DATADIR, "example.bed.gz")
def setUp(self):
self.tabix = pysam.TabixFile(self.filename)
self.compare = loadAndConvert(self.filename)
def tearDown(self):
self.tabix.close()
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asBed())):
c = self.compare[x]
self.assertEqual(len(c), len(r))
self.assertEqual(c, str(r).split("\t"))
self.assertEqual(c[0], r.contig)
self.assertEqual(int(c[1]), r.start)
self.assertEqual(int(c[2]), r.end)
self.assertEqual(list(c), list(r))
self.assertEqual("\t".join(map(str, c)),
str(r))
def testWrite(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asBed())):
c = self.compare[x]
self.assertEqual(c, str(r).split("\t"))
self.assertEqual(list(c), list(r))
r.contig = "test"
self.assertEqual("test", r.contig)
self.assertEqual("test", r[0])
r.start += 1
self.assertEqual(int(c[1]) + 1, r.start)
self.assertEqual(str(int(c[1]) + 1), r[1])
r.end += 1
self.assertEqual(int(c[2]) + 1, r.end)
self.assertEqual(str(int(c[2]) + 1), r[2])
class TestVCF(unittest.TestCase):
filename = os.path.join(DATADIR, "example.vcf40")
def setUp(self):
self.tmpfilename = "tmp_%s.vcf" % id(self)
shutil.copyfile(self.filename, self.tmpfilename)
pysam.tabix_index(self.tmpfilename, preset="vcf")
def tearDown(self):
os.unlink(self.tmpfilename + ".gz")
if os.path.exists(self.tmpfilename + ".gz.tbi"):
os.unlink(self.tmpfilename + ".gz.tbi")
if IS_PYTHON3:
class TestUnicode(unittest.TestCase):
'''test reading from a file with non-ascii characters.'''
filename = os.path.join(DATADIR, "example_unicode.vcf")
def setUp(self):
self.tmpfilename = "tmp_%s.vcf" % id(self)
shutil.copyfile(self.filename, self.tmpfilename)
pysam.tabix_index(self.tmpfilename, preset="vcf")
def testFromTabix(self):
# use ascii encoding - should raise error
with pysam.TabixFile(
self.tmpfilename + ".gz", encoding="ascii") as t:
results = list(t.fetch(parser=pysam.asVCF()))
self.assertRaises(UnicodeDecodeError, getattr, results[1], "id")
with pysam.TabixFile(
self.tmpfilename + ".gz", encoding="utf-8") as t:
results = list(t.fetch(parser=pysam.asVCF()))
self.assertEqual(getattr(results[1], "id"), u"Rene\xe9")
def testFromVCF(self):
self.vcf = pysam.VCF()
self.assertRaises(
UnicodeDecodeError,
self.vcf.connect, self.tmpfilename + ".gz", "ascii")
self.vcf.connect(self.tmpfilename + ".gz", encoding="utf-8")
v = self.vcf.getsamples()[0]
class TestVCFFromTabix(TestVCF):
columns = ("contig", "pos", "id",
"ref", "alt", "qual",
"filter", "info", "format")
def setUp(self):
TestVCF.setUp(self)
self.tabix = pysam.TabixFile(self.tmpfilename + ".gz")
self.compare = loadAndConvert(self.filename)
def tearDown(self):
self.tabix.close()
def testRead(self):
ncolumns = len(self.columns)
for x, r in enumerate(self.tabix.fetch(parser=pysam.asVCF())):
c = self.compare[x]
for y, field in enumerate(self.columns):
# it is ok to have a missing format column
if y == 8 and y == len(c):
continue
if field == "pos":
self.assertEqual(int(c[y]) - 1, getattr(r, field))
self.assertEqual(int(c[y]) - 1, r.pos)
else:
self.assertEqual(c[y], getattr(r, field),
"mismatch in field %s: %s != %s" %
(field, c[y], getattr(r, field)))
if len(c) == 8:
self.assertEqual(0, len(r))
else:
self.assertEqual(len(c), len(r) + ncolumns)
for y in range(len(c) - ncolumns):
self.assertEqual(c[ncolumns + y], r[y])
self.assertEqual("\t".join(map(str, c)),
str(r))
def testWrite(self):
ncolumns = len(self.columns)
for x, r in enumerate(self.tabix.fetch(parser=pysam.asVCF())):
c = self.compare[x]
# check unmodified string
cmp_string = str(r)
ref_string = "\t".join([x for x in c])
self.assertEqual(ref_string, cmp_string)
# set fields and compare field-wise
for y, field in enumerate(self.columns):
# it is ok to have a missing format column
if y == 8 and y == len(c):
continue
if field == "pos":
rpos = getattr(r, field)
self.assertEqual(int(c[y]) - 1, rpos)
self.assertEqual(int(c[y]) - 1, r.pos)
# increment pos by 1
setattr(r, field, rpos + 1)
self.assertEqual(getattr(r, field), rpos + 1)
c[y] = str(int(c[y]) + 1)
else:
setattr(r, field, "test_%i" % y)
c[y] = "test_%i" % y
self.assertEqual(c[y], getattr(r, field),
"mismatch in field %s: %s != %s" %
(field, c[y], getattr(r, field)))
if len(c) == 8:
self.assertEqual(0, len(r))
else:
self.assertEqual(len(c), len(r) + ncolumns)
for y in range(len(c) - ncolumns):
c[ncolumns + y] = "test_%i" % y
r[y] = "test_%i" % y
self.assertEqual(c[ncolumns + y], r[y])
class TestVCFFromVCF(TestVCF):
columns = ("chrom", "pos", "id",
"ref", "alt", "qual",
"filter", "info", "format")
# tests failing while parsing
fail_on_parsing = (
(5, "Flag fields should not have a value"),
(9, "aouao"),
(13, "aoeu"),
(18, "Error BAD_NUMBER_OF_PARAMETERS"),
(24, "Error HEADING_NOT_SEPARATED_BY_TABS"))
# tests failing on opening
fail_on_opening = ((24, "Error HEADING_NOT_SEPARATED_BY_TABS"),
)
fail_on_samples = []
check_samples = False
coordinate_offset = 1
# value returned for missing values
missing_value = "."
missing_quality = -1
def setUp(self):
TestVCF.setUp(self)
self.vcf = pysam.VCF()
self.compare = loadAndConvert(self.filename, encode=False)
def tearDown(self):
self.vcf.close()
def testConnecting(self):
fn = os.path.basename(self.filename)
for x, msg in self.fail_on_opening:
if "%i.vcf" % x == fn:
self.assertRaises(ValueError,
self.vcf.connect,
self.tmpfilename + ".gz")
else:
self.vcf.connect(self.tmpfilename + ".gz")
def get_iterator(self):
with open(self.filename) as f:
fn = os.path.basename(self.filename)
for x, msg in self.fail_on_opening:
if "%i.vcf" % x == fn:
self.assertRaises(ValueError, self.vcf.parse, f)
return
for vcf_code, msg in self.fail_on_parsing:
if "%i.vcf" % vcf_code == fn:
self.assertRaises((ValueError,
AssertionError),
list, self.vcf.parse(f))
return
# python 2.7
# self.assertRaisesRegexp(
# ValueError, re.compile(msg), self.vcf.parse, f)
return list(self.vcf.parse(f))
def get_field_value(self, record, field):
return record[field]
def sample2value(self, r, v):
return r, v
def alt2value(self, r, v):
if r == ".":
return [], v
else:
return r.split(","), list(v)
def filter2value(self, r, v):
if r == "PASS":
return [], v
elif r == ".":
return [], v
else:
return r.split(";"), v
def testParsing(self):
itr = self.get_iterator()
if itr is None:
return
fn = os.path.basename(self.filename)
for vcf_code, msg in self.fail_on_parsing:
if "%i.vcf" % vcf_code == fn:
self.assertRaises((ValueError,
AssertionError),
list, itr)
return
# python 2.7
# self.assertRaisesRegexp(
# ValueError, re.compile(msg), self.vcf.parse, f)
check_samples = self.check_samples
for vcf_code, msg in self.fail_on_samples:
if "%i.vcf" % vcf_code == fn:
check_samples = False
for x, r in enumerate(itr):
c = self.compare[x]
for y, field in enumerate(self.columns):
# it is ok to have a missing format column
if y == 8 and y == len(c):
continue
val = self.get_field_value(r, field)
if field == "pos":
self.assertEqual(int(c[y]) - self.coordinate_offset,
val)
elif field == "alt" or field == "alts":
cc, vv = self.alt2value(c[y], val)
if cc != vv:
# import pdb; pdb.set_trace()
pass
self.assertEqual(
cc, vv,
"mismatch in field %s: expected %s, got %s" %
(field, cc, vv))
elif field == "filter":
cc, vv = self.filter2value(c[y], val)
self.assertEqual(
cc, vv,
"mismatch in field %s: expected %s, got %s" %
(field, cc, vv))
elif field == "info":
# tests for info field not implemented
pass
elif field == "qual" and c[y] == ".":
self.assertEqual(
self.missing_quality, val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
elif field == "format":
# format field converted to list
self.assertEqual(
c[y].split(":"), list(val),
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
elif type(val) in (int, float):
if c[y] == ".":
self.assertEqual(
None, val,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
else:
self.assertAlmostEqual(
float(c[y]), float(val), 2,
"mismatch in field %s: expected %s, got %s" %
(field, c[y], val))
else:
if c[y] == ".":
ref_val = self.missing_value
else:
ref_val = c[y]
self.assertEqual(
ref_val, val,
"mismatch in field %s: expected %s(%s), got %s(%s)" %
(field, ref_val, type(ref_val), val, type(val)))
# parse samples
if check_samples:
if len(c) == 8:
for x, s in enumerate(r.samples):
self.assertEqual(
[], r.samples[s].values(),
"mismatch in sample {}: "
"expected [], got {}, src={}, line={}".format(
s, r.samples[s].values(),
r.samples[s].items(), r))
else:
for x, s in enumerate(r.samples):
ref, comp = self.sample2value(
c[9 + x],
r.samples[s])
self.compare_samples(ref, comp, s, r)
def compare_samples(self, ref, comp, s, r):
if ref != comp:
# check if GT not at start, not VCF conform and
# not supported by cbcf.pyx
k = r.format.keys()
if "GT" in k and k[0] != "GT":
return
# perform an element-wise checto work around rounding differences
for a, b in zip(re.split("[:,;]", ref),
re.split("[:,;]", comp)):
is_float = True
try:
a = float(a)
b = float(b)
except ValueError:
is_float = False
if is_float:
self.assertAlmostEqual(
a, b, 2,
"mismatch in sample {}: "
"expected {}, got {}, src={}, line={}"
.format(
s, ref, comp,
r.samples[s].items(), r))
else:
self.assertEqual(
a, b,
"mismatch in sample {}: "
"expected {}, got {}, src={}, line={}"
.format(
s, ref, comp,
r.samples[s].items(), r))
############################################################################
# create a test class for each example vcf file.
# Two samples are created -
# 1. Testing pysam/tabix access
# 2. Testing the VCF class
vcf_files = glob.glob(os.path.join(DATADIR, "vcf", "*.vcf"))
for vcf_file in vcf_files:
n = "VCFFromTabixTest_%s" % os.path.basename(vcf_file[:-4])
globals()[n] = type(n, (TestVCFFromTabix,), dict(filename=vcf_file,))
n = "VCFFromVCFTest_%s" % os.path.basename(vcf_file[:-4])
globals()[n] = type(n, (TestVCFFromVCF,), dict(filename=vcf_file,))
class TestVCFFromVariantFile(TestVCFFromVCF):
columns = ("chrom", "pos", "id",
"ref", "alts", "qual",
"filter", "info", "format")
fail_on_parsing = []
fail_on_opening = []
coordinate_offset = 0
check_samples = True
fail_on_samples = [
(9, "PL field not defined. Expected to be scalar, but is array"),
(12, "PL field not defined. Expected to be scalar, but is array"),
(18, "PL field not defined. Expected to be scalar, but is array"),
]
# value returned for missing values
missing_value = None
missing_quality = None
vcf = None
def filter2value(self, r, v):
if r == "PASS":
return ["PASS"], list(v)
elif r == ".":
return [], list(v)
else:
return r.split(";"), list(v)
def alt2value(self, r, v):
if r == ".":
return None, v
else:
return r.split(","), list(v)
def sample2value(self, r, smp):
def convert_field(f):
if f is None:
return "."
elif isinstance(f, tuple):
return ",".join(map(convert_field, f))
else:
return str(f)
v = smp.values()
if 'GT' in smp:
alleles = [str(a) if a is not None else '.' for a in smp.allele_indices]
v[0] = '/|'[smp.phased].join(alleles)
comp = ":".join(map(convert_field, v))
if comp.endswith(":."):
comp = comp[:-2]
return r, comp
def setUp(self):
TestVCF.setUp(self)
self.compare = loadAndConvert(self.filename, encode=False)
def tearDown(self):
if self.vcf:
self.vcf.close()
self.vcf = None
def get_iterator(self):
self.vcf = pysam.VariantFile(self.filename)
return self.vcf.fetch()
def get_field_value(self, record, field):
return getattr(record, field)
for vcf_file in vcf_files:
n = "TestVCFFromVariantFile_%s" % os.path.basename(vcf_file[:-4])
globals()[n] = type(n, (TestVCFFromVariantFile,), dict(filename=vcf_file,))
class TestRemoteFileHTTP(unittest.TestCase):
url = "http://genserv.anat.ox.ac.uk/downloads/pysam/test/example_htslib.gtf.gz"
region = "chr1:1-1000"
local = os.path.join(DATADIR, "example.gtf.gz")
def setUp(self):
if not checkURL(self.url):
self.remote_file = None
return
self.remote_file = pysam.TabixFile(self.url, "r")
self.local_file = pysam.TabixFile(self.local, "r")
def tearDown(self):
if self.remote_file is None:
return
self.remote_file.close()
self.local_file.close()
def testFetchAll(self):
if self.remote_file is None:
return
remote_result = list(self.remote_file.fetch())
local_result = list(self.local_file.fetch())
self.assertEqual(len(remote_result), len(local_result))
for x, y in zip(remote_result, local_result):
self.assertEqual(x, y)
def testHeader(self):
if self.remote_file is None:
return
self.assertEqual(list(self.local_file.header), [])
self.assertRaises(AttributeError,
getattr,
self.remote_file,
"header")
class TestIndexArgument(unittest.TestCase):
filename_src = os.path.join(DATADIR, "example.vcf.gz")
filename_dst = "tmp_example.vcf.gz"
index_src = os.path.join(DATADIR, "example.vcf.gz.tbi")
index_dst = "tmp_index_example.vcf.gz.tbi"
preset = "vcf"
def testFetchAll(self):
shutil.copyfile(self.filename_src, self.filename_dst)
shutil.copyfile(self.index_src, self.index_dst)
with pysam.TabixFile(
self.filename_src, "r", index=self.index_src) as same_basename_file:
same_basename_results = list(same_basename_file.fetch())
with pysam.TabixFile(
self.filename_dst, "r", index=self.index_dst) as diff_index_file:
diff_index_result = list(diff_index_file.fetch())
self.assertEqual(len(same_basename_results), len(diff_index_result))
for x, y in zip(same_basename_results, diff_index_result):
self.assertEqual(x, y)
os.unlink(self.filename_dst)
os.unlink(self.index_dst)
def _TestMultipleIteratorsHelper(filename, multiple_iterators):
'''open file within scope, return iterator.'''
tabix = pysam.TabixFile(filename)
iterator = tabix.fetch(parser=pysam.asGTF(),
multiple_iterators=multiple_iterators)
tabix.close()
return iterator
class TestBackwardsCompatibility(unittest.TestCase):
"""check if error is raised if a tabix file from an
old version is accessed from pysam"""
def check(self, filename, raises=None):
with pysam.TabixFile(filename) as tf:
ref = loadAndConvert(filename)
if raises is None:
self.assertEqual(len(list(tf.fetch())), len(ref))
else:
self.assertRaises(raises, tf.fetch)
def testVCF0v23(self):
self.check(os.path.join(DATADIR, "example_0v23.vcf.gz"),
ValueError)
def testBED0v23(self):
self.check(os.path.join(DATADIR, "example_0v23.bed.gz"),
ValueError)
def testVCF0v26(self):
self.check(os.path.join(DATADIR, "example_0v26.vcf.gz"),
ValueError)
def testBED0v26(self):
self.check(os.path.join(DATADIR, "example_0v26.bed.gz"),
ValueError)
def testVCF(self):
self.check(os.path.join(DATADIR, "example.vcf.gz"))
def testBED(self):
self.check(os.path.join(DATADIR, "example.bed.gz"))
def testEmpty(self):
self.check(os.path.join(DATADIR, "empty.bed.gz"))
class TestMultipleIterators(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
def testJoinedIterators(self):
# two iterators working on the same file
with pysam.TabixFile(self.filename) as tabix:
a = tabix.fetch(parser=pysam.asGTF()).next()
b = tabix.fetch(parser=pysam.asGTF()).next()
# the first two lines differ only by the feature field
self.assertEqual(a.feature, "UTR")
self.assertEqual(b.feature, "exon")
self.assertEqual(re.sub("UTR", "", str(a)),
re.sub("exon", "", str(b)))
def testDisjointIterators(self):
# two iterators working on the same file
with pysam.TabixFile(self.filename) as tabix:
a = tabix.fetch(parser=pysam.asGTF(), multiple_iterators=True).next()
b = tabix.fetch(parser=pysam.asGTF(), multiple_iterators=True).next()
# both iterators are at top of file
self.assertEqual(str(a), str(b))
def testScope(self):
# technically it does not really test if the scope is correct
i = _TestMultipleIteratorsHelper(self.filename,
multiple_iterators=True)
self.assertTrue(i.next())
i = _TestMultipleIteratorsHelper(self.filename,
multiple_iterators=False)
self.assertRaises(IOError, i.next)
def testDoubleFetch(self):
with pysam.TabixFile(self.filename) as f:
for a, b in zip(f.fetch(multiple_iterators=True),
f.fetch(multiple_iterators=True)):
self.assertEqual(str(a), str(b))
class TestContextManager(unittest.TestCase):
filename = os.path.join(DATADIR, "example.gtf.gz")
def testManager(self):
with pysam.TabixFile(self.filename) as tabixfile:
tabixfile.fetch()
self.assertEqual(tabixfile.closed, True)
if __name__ == "__main__":
unittest.main()
|
mit
| -6,562,526,742,773,220,000 | -747,608,911,327,343,100 | 31.438346 | 84 | 0.521406 | false |
harvardinformatics/jobTree
|
src/jobTreeStats.py
|
3
|
32075
|
#!/usr/bin/env python
# Copyright (C) 2011 by Benedict Paten ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Reports the state of your given job tree.
"""
import cPickle
import os
from random import choice
import string
import sys
import time
import xml.etree.ElementTree as ET # not cElementTree so as to allow caching
from xml.dom import minidom # For making stuff pretty
from sonLib.bioio import logger
from sonLib.bioio import logFile
from sonLib.bioio import getBasicOptionParser
from sonLib.bioio import parseBasicOptions
from sonLib.bioio import TempFileTree
from jobTree.src.master import getEnvironmentFileName, getJobFileDirName
from jobTree.src.master import getStatsFileName, getConfigFileName
from jobTree.src.master import getStatsCacheFileName
class JTTag(object):
""" Convenience object that stores xml attributes as object attributes.
"""
def __init__(self, tree):
""" Given an ElementTree tag, build a convenience object.
"""
for name in ["total_time", "median_clock", "total_memory",
"median_wait", "total_number", "average_time",
"median_memory", "min_number_per_slave", "average_wait",
"total_clock", "median_time", "min_time", "min_wait",
"max_clock", "max_wait", "total_wait", "min_clock",
"average_memory", "max_number_per_slave", "max_memory",
"average_memory", "max_number_per_slave", "max_memory",
"median_number_per_slave", "average_number_per_slave",
"max_time", "average_clock", "min_memory", "min_clock",
]:
setattr(self, name, self.__get(tree, name))
self.name = tree.tag
def __get(self, tag, name):
if name in tag.attrib:
value = tag.attrib[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a
class ColumnWidths(object):
""" Convenience object that stores the width of columns for printing.
Helps make things pretty.
"""
def __init__(self):
self.categories = ["time", "clock", "wait", "memory"]
self.fields_count = ["count", "min", "med", "ave", "max", "total"]
self.fields = ["min", "med", "ave", "max", "total"]
self.data = {}
for category in self.categories:
for field in self.fields_count:
self.setWidth(category, field, 8)
def title(self, category):
""" Return the total printed length of this category item.
"""
return sum(
map(lambda x: self.getWidth(category, x), self.fields))
def getWidth(self, category, field):
category = category.lower()
return self.data["%s_%s" % (category, field)]
def setWidth(self, category, field, width):
category = category.lower()
self.data["%s_%s" % (category, field)] = width
def report(self):
for c in self.categories:
for f in self.fields:
print '%s %s %d' % (c, f, self.getWidth(c, f))
def initializeOptions(parser):
##########################################
# Construct the arguments.
##########################################
parser.add_option("--jobTree", dest="jobTree", default='./jobTree',
help="Directory containing the job tree. Can also be specified as the single argument to the script. Default=%default")
parser.add_option("--outputFile", dest="outputFile", default=None,
help="File in which to write results")
parser.add_option("--raw", action="store_true", default=False,
help="output the raw xml data.")
parser.add_option("--pretty", "--human", action="store_true", default=False,
help=("if not raw, prettify the numbers to be "
"human readable."))
parser.add_option("--categories",
help=("comma separated list from [time, clock, wait, "
"memory]"))
parser.add_option("--sortCategory", default="time",
help=("how to sort Target list. may be from [alpha, "
"time, clock, wait, memory, count]. "
"default=%(default)s"))
parser.add_option("--sortField", default="med",
help=("how to sort Target list. may be from [min, "
"med, ave, max, total]. "
"default=%(default)s"))
parser.add_option("--sortReverse", "--reverseSort", default=False,
action="store_true",
help="reverse sort order.")
parser.add_option("--cache", default=False, action="store_true",
help="stores a cache to speed up data display.")
def checkOptions(options, args, parser):
""" Check options, throw parser.error() if something goes wrong
"""
logger.info("Parsed arguments")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
assert len(args) <= 1 # Only jobtree may be specified as argument
if len(args) == 1: # Allow jobTree directory as arg
options.jobTree = args[0]
logger.info("Checking if we have files for job tree")
if options.jobTree == None:
parser.error("Specify --jobTree")
if not os.path.exists(options.jobTree):
parser.error("--jobTree %s does not exist"
% options.jobTree)
if not os.path.isdir(options.jobTree):
parser.error("--jobTree %s is not a directory"
% options.jobTree)
if not os.path.isfile(getConfigFileName(options.jobTree)):
parser.error("A valid job tree must contain the config file")
if not os.path.isfile(getStatsFileName(options.jobTree)):
parser.error("The job-tree was run without the --stats flag, "
"so no stats were created")
defaultCategories = ["time", "clock", "wait", "memory"]
if options.categories is None:
options.categories = defaultCategories
else:
options.categories = map(lambda x: x.lower(),
options.categories.split(","))
for c in options.categories:
if c not in defaultCategories:
parser.error("Unknown category %s. Must be from %s"
% (c, str(defaultCategories)))
extraSort = ["count", "alpha"]
if options.sortCategory is not None:
if (options.sortCategory not in defaultCategories and
options.sortCategory not in extraSort):
parser.error("Unknown --sortCategory %s. Must be from %s"
% (options.sortCategory,
str(defaultCategories + extraSort)))
sortFields = ["min", "med", "ave", "max", "total"]
if options.sortField is not None:
if (options.sortField not in sortFields):
parser.error("Unknown --sortField %s. Must be from %s"
% (options.sortField, str(sortFields)))
logger.info("Checked arguments")
def prettyXml(elem):
""" Return a pretty-printed XML string for the ElementTree Element.
"""
roughString = ET.tostring(elem, "utf-8")
reparsed = minidom.parseString(roughString)
return reparsed.toprettyxml(indent=" ")
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s
def prettyMemory(k, field=None, isBytes=False):
""" Given input k as kilobytes, return a nicely formatted string.
"""
from math import floor
if isBytes:
k /= 1024
if k < 1024:
return padStr("%gK" % k, field)
if k < (1024 * 1024):
return padStr("%.1fM" % (k / 1024.0), field)
if k < (1024 * 1024 * 1024):
return padStr("%.1fG" % (k / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024):
return padStr("%.1fT" % (k / 1024.0 / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024 * 1024):
return padStr("%.1fP" % (k / 1024.0 / 1024.0 / 1024.0 / 1024.0), field)
def prettyTime(t, field=None):
""" Given input t as seconds, return a nicely formatted string.
"""
from math import floor
pluralDict = {True: "s", False: ""}
if t < 120:
return padStr("%ds" % t, field)
if t < 120 * 60:
m = floor(t / 60.)
s = t % 60
return padStr("%dm%ds" % (m, s), field)
if t < 25 * 60 * 60:
h = floor(t / 60. / 60.)
m = floor((t - (h * 60. * 60.)) / 60.)
s = t % 60
return padStr("%dh%gm%ds" % (h, m, s), field)
if t < 7 * 24 * 60 * 60:
d = floor(t / 24. / 60. / 60.)
h = floor((t - (d * 24. * 60. * 60.)) / 60. / 60.)
m = floor((t
- (d * 24. * 60. * 60.)
- (h * 60. * 60.))
/ 60.)
s = t % 60
dPlural = pluralDict[d > 1]
return padStr("%dday%s%dh%dm%ds" % (d, dPlural, h, m, s), field)
w = floor(t / 7. / 24. / 60. / 60.)
d = floor((t - (w * 7 * 24 * 60 * 60)) / 24. / 60. / 60.)
h = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.))
/ 60. / 60.)
m = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.)
- (h * 60. * 60.))
/ 60.)
s = t % 60
wPlural = pluralDict[w > 1]
dPlural = pluralDict[d > 1]
return padStr("%dweek%s%dday%s%dh%dm%ds" % (w, wPlural, d,
dPlural, h, m, s), field)
def reportTime(t, options, field=None):
""" Given t seconds, report back the correct format as string.
"""
if options.pretty:
return prettyTime(t, field=field)
else:
if field is not None:
return "%*.2f" % (field, t)
else:
return "%.2f" % t
def reportMemory(k, options, field=None, isBytes=False):
""" Given k kilobytes, report back the correct format as string.
"""
if options.pretty:
return prettyMemory(int(k), field=field, isBytes=isBytes)
else:
if isBytes:
k /= 1024.
if field is not None:
return "%*dK" % (field - 1, k) # -1 for the "K"
else:
return "%dK" % int(k)
def reportNumber(n, options, field=None):
""" Given n an integer, report back the correct format as string.
"""
if field is not None:
return "%*g" % (field, n)
else:
return "%g" % n
def refineData(root, options):
""" walk down from the root and gather up the important bits.
"""
slave = JTTag(root.find("slave"))
target = JTTag(root.find("target"))
targetTypesTree = root.find("target_types")
targetTypes = []
for child in targetTypesTree:
targetTypes.append(JTTag(child))
return root, slave, target, targetTypes
def sprintTag(key, tag, options, columnWidths=None):
""" Generate a pretty-print ready string from a JTTag().
"""
if columnWidths is None:
columnWidths = ColumnWidths()
header = " %7s " % decorateTitle("Count", options)
sub_header = " %7s " % "n"
tag_str = " %s" % reportNumber(tag.total_number, options, field=7)
out_str = ""
if key == "target":
out_str += " %-12s | %7s%7s%7s%7s\n" % ("Slave Jobs", "min",
"med", "ave", "max")
slave_str = "%s| " % (" " * 14)
for t in [tag.min_number_per_slave, tag.median_number_per_slave,
tag.average_number_per_slave, tag.max_number_per_slave]:
slave_str += reportNumber(t, options, field=7)
out_str += slave_str + "\n"
if "time" in options.categories:
header += "| %*s " % (columnWidths.title("time"),
decorateTitle("Time", options))
sub_header += decorateSubHeader("Time", columnWidths, options)
tag_str += " | "
for t, width in [
(tag.min_time, columnWidths.getWidth("time", "min")),
(tag.median_time, columnWidths.getWidth("time", "med")),
(tag.average_time, columnWidths.getWidth("time", "ave")),
(tag.max_time, columnWidths.getWidth("time", "max")),
(tag.total_time, columnWidths.getWidth("time", "total")),
]:
tag_str += reportTime(t, options, field=width)
if "clock" in options.categories:
header += "| %*s " % (columnWidths.title("clock"),
decorateTitle("Clock", options))
sub_header += decorateSubHeader("Clock", columnWidths, options)
tag_str += " | "
for t, width in [
(tag.min_clock, columnWidths.getWidth("clock", "min")),
(tag.median_clock, columnWidths.getWidth("clock", "med")),
(tag.average_clock, columnWidths.getWidth("clock", "ave")),
(tag.max_clock, columnWidths.getWidth("clock", "max")),
(tag.total_clock, columnWidths.getWidth("clock", "total")),
]:
tag_str += reportTime(t, options, field=width)
if "wait" in options.categories:
header += "| %*s " % (columnWidths.title("wait"),
decorateTitle("Wait", options))
sub_header += decorateSubHeader("Wait", columnWidths, options)
tag_str += " | "
for t, width in [
(tag.min_wait, columnWidths.getWidth("wait", "min")),
(tag.median_wait, columnWidths.getWidth("wait", "med")),
(tag.average_wait, columnWidths.getWidth("wait", "ave")),
(tag.max_wait, columnWidths.getWidth("wait", "max")),
(tag.total_wait, columnWidths.getWidth("wait", "total")),
]:
tag_str += reportTime(t, options, field=width)
if "memory" in options.categories:
header += "| %*s " % (columnWidths.title("memory"),
decorateTitle("Memory", options))
sub_header += decorateSubHeader("Memory", columnWidths, options)
tag_str += " | "
for t, width in [
(tag.min_memory, columnWidths.getWidth("memory", "min")),
(tag.median_memory, columnWidths.getWidth("memory", "med")),
(tag.average_memory, columnWidths.getWidth("memory", "ave")),
(tag.max_memory, columnWidths.getWidth("memory", "max")),
(tag.total_memory, columnWidths.getWidth("memory", "total")),
]:
tag_str += reportMemory(t, options, field=width)
out_str += header + "\n"
out_str += sub_header + "\n"
out_str += tag_str + "\n"
return out_str
def decorateTitle(title, options):
""" Add a marker to TITLE if the TITLE is sorted on.
"""
if title.lower() == options.sortCategory:
return "%s*" % title
else:
return title
def decorateSubHeader(title, columnWidths, options):
""" Add a marker to the correct field if the TITLE is sorted on.
"""
title = title.lower()
if title != options.sortCategory:
s = "| %*s%*s%*s%*s%*s " % (
columnWidths.getWidth(title, "min"), "min",
columnWidths.getWidth(title, "med"), "med",
columnWidths.getWidth(title, "ave"), "ave",
columnWidths.getWidth(title, "max"), "max",
columnWidths.getWidth(title, "total"), "total")
return s
else:
s = "| "
for field, width in [("min", columnWidths.getWidth(title, "min")),
("med", columnWidths.getWidth(title, "med")),
("ave", columnWidths.getWidth(title, "ave")),
("max", columnWidths.getWidth(title, "max")),
("total", columnWidths.getWidth(title, "total"))]:
if options.sortField == field:
s += "%*s*" % (width - 1, field)
else:
s += "%*s" % (width, field)
s += " "
return s
def get(tree, name):
""" Return a float value attribute NAME from TREE.
"""
if name in tree.attrib:
value = tree.attrib[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a
def sortTargets(targetTypes, options):
""" Return a targetTypes all sorted.
"""
longforms = {"med": "median",
"ave": "average",
"min": "min",
"total": "total",
"max": "max",}
sortField = longforms[options.sortField]
if (options.sortCategory == "time" or
options.sortCategory == "clock" or
options.sortCategory == "wait" or
options.sortCategory == "memory"
):
return sorted(
targetTypes,
key=lambda tag: getattr(tag, "%s_%s"
% (sortField, options.sortCategory)),
reverse=options.sortReverse)
elif options.sortCategory == "alpha":
return sorted(
targetTypes, key=lambda tag: tag.name,
reverse=options.sortReverse)
elif options.sortCategory == "count":
return sorted(targetTypes, key=lambda tag: tag.total_number,
reverse=options.sortReverse)
def reportPrettyData(root, slave, target, target_types, options):
""" print the important bits out.
"""
out_str = "Batch System: %s\n" % root.attrib["batch_system"]
out_str += ("Default CPU: %s Default Memory: %s\n"
"Job Time: %s Max CPUs: %s Max Threads: %s\n" % (
reportNumber(get(root, "default_cpu"), options),
reportMemory(get(root, "default_memory"), options, isBytes=True),
reportTime(get(root, "job_time"), options),
reportNumber(get(root, "max_cpus"), options),
reportNumber(get(root, "max_threads"), options),
))
out_str += ("Total Clock: %s Total Runtime: %s\n" % (
reportTime(get(root, "total_clock"), options),
reportTime(get(root, "total_run_time"), options),
))
target_types = sortTargets(target_types, options)
columnWidths = computeColumnWidths(target_types, slave, target, options)
out_str += "Slave\n"
out_str += sprintTag("slave", slave, options, columnWidths=columnWidths)
out_str += "Target\n"
out_str += sprintTag("target", target, options, columnWidths=columnWidths)
for t in target_types:
out_str += " %s\n" % t.name
out_str += sprintTag(t.name, t, options, columnWidths=columnWidths)
return out_str
def computeColumnWidths(target_types, slave, target, options):
""" Return a ColumnWidths() object with the correct max widths.
"""
cw = ColumnWidths()
for t in target_types:
updateColumnWidths(t, cw, options)
updateColumnWidths(slave, cw, options)
updateColumnWidths(target, cw, options)
return cw
def updateColumnWidths(tag, cw, options):
""" Update the column width attributes for this tag's fields.
"""
longforms = {"med": "median",
"ave": "average",
"min": "min",
"total": "total",
"max": "max",}
for category in ["time", "clock", "wait", "memory"]:
if category in options.categories:
for field in ["min", "med", "ave", "max", "total"]:
t = getattr(tag, "%s_%s" % (longforms[field], category))
if category in ["time", "clock", "wait"]:
s = reportTime(t, options,
field=cw.getWidth(category, field)).strip()
else:
s = reportMemory(t, options,
field=cw.getWidth(category, field)).strip()
if len(s) >= cw.getWidth(category, field):
# this string is larger than max, width must be increased
cw.setWidth(category, field, len(s) + 1)
def buildElement(element, items, itemName):
""" Create an element for output.
"""
def __round(i):
if i < 0:
logger.debug("I got a less than 0 value: %s" % i)
return 0.0
return i
itemTimes = [ __round(float(item.attrib["time"])) for item in items ]
itemTimes.sort()
itemClocks = [ __round(float(item.attrib["clock"])) for item in items ]
itemClocks.sort()
itemWaits = [ __round(__round(float(item.attrib["time"])) -
__round(float(item.attrib["clock"])))
for item in items ]
itemWaits.sort()
itemMemory = [ __round(float(item.attrib["memory"])) for item in items ]
itemMemory.sort()
assert len(itemClocks) == len(itemTimes)
assert len(itemClocks) == len(itemWaits)
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
return ET.SubElement(
element, itemName,
{"total_number":str(len(items)),
"total_time":str(sum(itemTimes)),
"median_time":str(itemTimes[len(itemTimes)/2]),
"average_time":str(sum(itemTimes)/len(itemTimes)),
"min_time":str(min(itemTimes)),
"max_time":str(max(itemTimes)),
"total_clock":str(sum(itemClocks)),
"median_clock":str(itemClocks[len(itemClocks)/2]),
"average_clock":str(sum(itemClocks)/len(itemClocks)),
"min_clock":str(min(itemClocks)),
"max_clock":str(max(itemClocks)),
"total_wait":str(sum(itemWaits)),
"median_wait":str(itemWaits[len(itemWaits)/2]),
"average_wait":str(sum(itemWaits)/len(itemWaits)),
"min_wait":str(min(itemWaits)),
"max_wait":str(max(itemWaits)),
"total_memory":str(sum(itemMemory)),
"median_memory":str(itemMemory[len(itemMemory)/2]),
"average_memory":str(sum(itemMemory)/len(itemMemory)),
"min_memory":str(min(itemMemory)),
"max_memory":str(max(itemMemory))
})
def createSummary(element, containingItems, containingItemName, getFn):
itemCounts = [len(getFn(containingItem)) for
containingItem in containingItems]
itemCounts.sort()
if len(itemCounts) == 0:
itemCounts.append(0)
element.attrib["median_number_per_%s" %
containingItemName] = str(itemCounts[len(itemCounts) / 2])
element.attrib["average_number_per_%s" %
containingItemName] = str(float(sum(itemCounts)) /
len(itemCounts))
element.attrib["min_number_per_%s" %
containingItemName] = str(min(itemCounts))
element.attrib["max_number_per_%s" %
containingItemName] = str(max(itemCounts))
def getSettings(options):
""" Collect and return the stats and config data.
"""
config_file = getConfigFileName(options.jobTree)
stats_file = getStatsFileName(options.jobTree)
try:
config = ET.parse(config_file).getroot()
except ET.ParseError:
sys.stderr.write("The config file xml, %s, is empty.\n" % config_file)
raise
try:
stats = ET.parse(stats_file).getroot() # Try parsing the whole file.
except ET.ParseError: # If it doesn't work then we build the file incrementally
sys.stderr.write("The job tree stats file is incomplete or corrupt, "
"we'll try instead to parse what's in the file "
"incrementally until we reach an error.\n")
fH = open(stats_file, 'r') # Open the file for editing
stats = ET.Element("stats")
try:
for event, elem in ET.iterparse(fH):
if elem.tag == 'slave':
stats.append(elem)
except ET.ParseError:
pass # Do nothing at this point
finally:
fH.close()
return config, stats
def processData(config, stats, options):
##########################################
# Collate the stats and report
##########################################
if stats.find("total_time") == None: # Hack to allow unfinished jobtrees.
ET.SubElement(stats, "total_time", { "time":"0.0", "clock":"0.0"})
collatedStatsTag = ET.Element(
"collated_stats",
{"total_run_time":stats.find("total_time").attrib["time"],
"total_clock":stats.find("total_time").attrib["clock"],
"batch_system":config.attrib["batch_system"],
"job_time":config.attrib["job_time"],
"default_memory":config.attrib["default_memory"],
"default_cpu":config.attrib["default_cpu"],
"max_cpus":config.attrib["max_cpus"],
"max_threads":config.attrib["max_threads"] })
# Add slave info
slaves = stats.findall("slave")
buildElement(collatedStatsTag, slaves, "slave")
# Add aggregated target info
targets = []
for slave in slaves:
targets += slave.findall("target")
def fn4(job):
return list(slave.findall("target"))
createSummary(buildElement(collatedStatsTag, targets, "target"),
slaves, "slave", fn4)
# Get info for each target
targetNames = set()
for target in targets:
targetNames.add(target.attrib["class"])
targetTypesTag = ET.SubElement(collatedStatsTag, "target_types")
for targetName in targetNames:
targetTypes = [ target for target in targets
if target.attrib["class"] == targetName ]
targetTypeTag = buildElement(targetTypesTag, targetTypes, targetName)
return collatedStatsTag
def reportData(xml_tree, options):
# Now dump it all out to file
if options.raw:
out_str = prettyXml(xml_tree)
else:
root, slave, target, target_types = refineData(xml_tree, options)
out_str = reportPrettyData(root, slave, target, target_types, options)
if options.outputFile != None:
fileHandle = open(options.outputFile, "w")
fileHandle.write(out_str)
fileHandle.close()
# Now dump onto the screen
print out_str
def getNullFile():
""" Guaranteed to return a valid path to a file that does not exist.
"""
charSet = string.ascii_lowercase + "0123456789"
prefix = os.getcwd()
nullFile = "null_%s" % "".join(choice(charSet) for x in xrange(6))
while os.path.exists(os.path.join(prefix, nullFile)):
nullFile = "null_%s" % "".join(choice(charSet) for x in xrange(6))
return os.path.join(os.getcwd(), nullFile)
def getPreferredStatsCacheFileName(options):
""" Determine if the jobtree or the os.getcwd() version should be used.
If no good option exists, return a nonexistent file path.
Note you MUST check to see if the return value exists before using.
"""
null_file = getNullFile()
location_jt = getStatsCacheFileName(options.jobTree)
location_local = os.path.abspath(os.path.join(os.getcwd(),
".stats_cache.pickle"))
# start by looking for the current directory cache.
if os.path.exists(location_local):
loc_file = open(location_local, "r")
data, loc = cPickle.load(loc_file)
if getStatsFileName(options.jobTree) != loc:
# the local cache is from looking up a *different* jobTree
location_local = null_file
if os.path.exists(location_jt) and not os.path.exists(location_local):
# use the jobTree directory version
return location_jt
elif not os.path.exists(location_jt) and os.path.exists(location_local):
# use the os.getcwd() version
return location_local
elif os.path.exists(location_jt) and os.path.exists(location_local):
# check file modify times and use the most recent version
mtime_jt = os.path.getmtime(location_jt)
mtime_local = os.path.getmtime(location_local)
if mtime_jt > mtime_local:
return location_jt
else:
return location_local
else:
return null_file
def unpackData(options):
"""unpackData() opens up the pickle of the last run and pulls out
all the relevant data.
"""
cache_file = getPreferredStatsCacheFileName(options)
if not os.path.exists(cache_file):
return None
if os.path.exists(cache_file):
f = open(cache_file, "r")
try:
data, location = cPickle.load(f)
except EOFError:
# bad cache.
return None
finally:
f.close()
if location == getStatsFileName(options.jobTree):
return data
return None
def packData(data, options):
""" packData stores all of the data in the appropriate pickle cache file.
"""
stats_file = getStatsFileName(options.jobTree)
cache_file = getStatsCacheFileName(options.jobTree)
try:
# try to write to the jobTree directory
payload = (data, stats_file)
f = open(cache_file, "wb")
cPickle.dump(payload, f, 2) # 2 is binary format
f.close()
except IOError:
if not options.cache:
return
# try to write to the current working directory only if --cache
cache_file = os.path.abspath(os.path.join(os.getcwd(),
".stats_cache.pickle"))
payload = (data, stats_file)
f = open(cache_file, "wb")
cPickle.dump(payload, f, 2) # 2 is binary format
f.close()
def cacheAvailable(options):
""" Check to see if a cache is available, return it.
"""
if not os.path.exists(getStatsFileName(options.jobTree)):
return None
cache_file = getPreferredStatsCacheFileName(options)
if not os.path.exists(cache_file):
return None
# check the modify times on the files, see if the cache should be recomputed
mtime_stats = os.path.getmtime(getStatsFileName(options.jobTree))
mtime_cache = os.path.getmtime(cache_file)
if mtime_stats > mtime_cache:
# recompute cache
return None
# cache is fresh, return the cache
return unpackData(options)
def main():
""" Reports stats on the job-tree, use with --stats option to jobTree.
"""
parser = getBasicOptionParser(
"usage: %prog [--jobTree] JOB_TREE_DIR [options]", "%prog 0.1")
initializeOptions(parser)
options, args = parseBasicOptions(parser)
checkOptions(options, args, parser)
collatedStatsTag = cacheAvailable(options)
if collatedStatsTag is None:
config, stats = getSettings(options)
collatedStatsTag = processData(config, stats, options)
reportData(collatedStatsTag, options)
packData(collatedStatsTag, options)
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
main()
|
mit
| -3,833,424,136,008,637,400 | -2,796,842,831,290,505,000 | 40.016624 | 141 | 0.574217 | false |
z-jason/anki
|
aqt/forms/dconf.py
|
1
|
20860
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/dconf.ui'
#
# Created: Sun Mar 30 10:19:28 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(494, 454)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_31 = QtGui.QLabel(Dialog)
self.label_31.setObjectName(_fromUtf8("label_31"))
self.horizontalLayout_2.addWidget(self.label_31)
self.dconf = QtGui.QComboBox(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dconf.sizePolicy().hasHeightForWidth())
self.dconf.setSizePolicy(sizePolicy)
self.dconf.setObjectName(_fromUtf8("dconf"))
self.horizontalLayout_2.addWidget(self.dconf)
self.confOpts = QtGui.QToolButton(Dialog)
self.confOpts.setMaximumSize(QtCore.QSize(16777215, 32))
self.confOpts.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/gears.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.confOpts.setIcon(icon)
self.confOpts.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.confOpts.setArrowType(QtCore.Qt.NoArrow)
self.confOpts.setObjectName(_fromUtf8("confOpts"))
self.horizontalLayout_2.addWidget(self.confOpts)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.count = QtGui.QLabel(Dialog)
self.count.setStyleSheet(_fromUtf8("* { color: red }"))
self.count.setText(_fromUtf8(""))
self.count.setAlignment(QtCore.Qt.AlignCenter)
self.count.setWordWrap(True)
self.count.setObjectName(_fromUtf8("count"))
self.verticalLayout.addWidget(self.count)
self.tabWidget = QtGui.QTabWidget(Dialog)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_27 = QtGui.QLabel(self.tab)
self.label_27.setObjectName(_fromUtf8("label_27"))
self.gridLayout.addWidget(self.label_27, 5, 2, 1, 1)
self.label_24 = QtGui.QLabel(self.tab)
self.label_24.setObjectName(_fromUtf8("label_24"))
self.gridLayout.addWidget(self.label_24, 5, 0, 1, 1)
self.lrnFactor = QtGui.QSpinBox(self.tab)
self.lrnFactor.setMinimum(130)
self.lrnFactor.setMaximum(999)
self.lrnFactor.setObjectName(_fromUtf8("lrnFactor"))
self.gridLayout.addWidget(self.lrnFactor, 5, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.tab)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 1, 0, 1, 1)
self.lrnEasyInt = QtGui.QSpinBox(self.tab)
self.lrnEasyInt.setMinimum(1)
self.lrnEasyInt.setObjectName(_fromUtf8("lrnEasyInt"))
self.gridLayout.addWidget(self.lrnEasyInt, 4, 1, 1, 1)
self.lrnGradInt = QtGui.QSpinBox(self.tab)
self.lrnGradInt.setMinimum(1)
self.lrnGradInt.setObjectName(_fromUtf8("lrnGradInt"))
self.gridLayout.addWidget(self.lrnGradInt, 3, 1, 1, 1)
self.newplim = QtGui.QLabel(self.tab)
self.newplim.setText(_fromUtf8(""))
self.newplim.setObjectName(_fromUtf8("newplim"))
self.gridLayout.addWidget(self.newplim, 2, 2, 1, 1)
self.label_5 = QtGui.QLabel(self.tab)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.tab)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.newPerDay = QtGui.QSpinBox(self.tab)
self.newPerDay.setMaximum(9999)
self.newPerDay.setObjectName(_fromUtf8("newPerDay"))
self.gridLayout.addWidget(self.newPerDay, 2, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.tab)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1)
self.lrnSteps = QtGui.QLineEdit(self.tab)
self.lrnSteps.setObjectName(_fromUtf8("lrnSteps"))
self.gridLayout.addWidget(self.lrnSteps, 0, 1, 1, 2)
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.newOrder = QtGui.QComboBox(self.tab)
self.newOrder.setObjectName(_fromUtf8("newOrder"))
self.gridLayout.addWidget(self.newOrder, 1, 1, 1, 2)
self.bury = QtGui.QCheckBox(self.tab)
self.bury.setObjectName(_fromUtf8("bury"))
self.gridLayout.addWidget(self.bury, 6, 0, 1, 3)
self.label_9 = QtGui.QLabel(self.tab)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 4, 2, 1, 1)
self.label_7 = QtGui.QLabel(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
self.label_7.setSizePolicy(sizePolicy)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 3, 2, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.tab_3)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_20 = QtGui.QLabel(self.tab_3)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.gridLayout_3.addWidget(self.label_20, 1, 0, 1, 1)
self.easyBonus = QtGui.QSpinBox(self.tab_3)
self.easyBonus.setMinimum(100)
self.easyBonus.setMaximum(1000)
self.easyBonus.setSingleStep(5)
self.easyBonus.setObjectName(_fromUtf8("easyBonus"))
self.gridLayout_3.addWidget(self.easyBonus, 1, 1, 1, 1)
self.label_21 = QtGui.QLabel(self.tab_3)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.gridLayout_3.addWidget(self.label_21, 1, 2, 1, 1)
self.label_34 = QtGui.QLabel(self.tab_3)
self.label_34.setObjectName(_fromUtf8("label_34"))
self.gridLayout_3.addWidget(self.label_34, 2, 2, 1, 1)
self.revPerDay = QtGui.QSpinBox(self.tab_3)
self.revPerDay.setMinimum(0)
self.revPerDay.setMaximum(9999)
self.revPerDay.setObjectName(_fromUtf8("revPerDay"))
self.gridLayout_3.addWidget(self.revPerDay, 0, 1, 1, 1)
self.label_33 = QtGui.QLabel(self.tab_3)
self.label_33.setObjectName(_fromUtf8("label_33"))
self.gridLayout_3.addWidget(self.label_33, 2, 0, 1, 1)
self.label_37 = QtGui.QLabel(self.tab_3)
self.label_37.setObjectName(_fromUtf8("label_37"))
self.gridLayout_3.addWidget(self.label_37, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.tab_3)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_3.addWidget(self.label_3, 3, 0, 1, 1)
self.maxIvl = QtGui.QSpinBox(self.tab_3)
self.maxIvl.setMinimum(1)
self.maxIvl.setMaximum(99999)
self.maxIvl.setObjectName(_fromUtf8("maxIvl"))
self.gridLayout_3.addWidget(self.maxIvl, 3, 1, 1, 1)
self.label_23 = QtGui.QLabel(self.tab_3)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.gridLayout_3.addWidget(self.label_23, 3, 2, 1, 1)
self.revplim = QtGui.QLabel(self.tab_3)
self.revplim.setText(_fromUtf8(""))
self.revplim.setObjectName(_fromUtf8("revplim"))
self.gridLayout_3.addWidget(self.revplim, 0, 2, 1, 1)
self.fi1 = QtGui.QDoubleSpinBox(self.tab_3)
self.fi1.setDecimals(0)
self.fi1.setMinimum(0.0)
self.fi1.setMaximum(999.0)
self.fi1.setSingleStep(1.0)
self.fi1.setProperty("value", 100.0)
self.fi1.setObjectName(_fromUtf8("fi1"))
self.gridLayout_3.addWidget(self.fi1, 2, 1, 1, 1)
self.buryRev = QtGui.QCheckBox(self.tab_3)
self.buryRev.setObjectName(_fromUtf8("buryRev"))
self.gridLayout_3.addWidget(self.buryRev, 4, 0, 1, 3)
self.verticalLayout_4.addLayout(self.gridLayout_3)
spacerItem1 = QtGui.QSpacerItem(20, 152, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.tabWidget.addTab(self.tab_3, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tab_2)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_17 = QtGui.QLabel(self.tab_2)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_2.addWidget(self.label_17, 0, 0, 1, 1)
self.lapSteps = QtGui.QLineEdit(self.tab_2)
self.lapSteps.setObjectName(_fromUtf8("lapSteps"))
self.gridLayout_2.addWidget(self.lapSteps, 0, 1, 1, 2)
self.label = QtGui.QLabel(self.tab_2)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 1, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.tab_2)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 3, 0, 1, 1)
self.leechThreshold = QtGui.QSpinBox(self.tab_2)
self.leechThreshold.setObjectName(_fromUtf8("leechThreshold"))
self.gridLayout_2.addWidget(self.leechThreshold, 3, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.tab_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_11.sizePolicy().hasHeightForWidth())
self.label_11.setSizePolicy(sizePolicy)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 3, 2, 1, 1)
self.label_12 = QtGui.QLabel(self.tab_2)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout_2.addWidget(self.label_12, 4, 0, 1, 1)
self.lapMinInt = QtGui.QSpinBox(self.tab_2)
self.lapMinInt.setMinimum(1)
self.lapMinInt.setMaximum(99)
self.lapMinInt.setObjectName(_fromUtf8("lapMinInt"))
self.gridLayout_2.addWidget(self.lapMinInt, 2, 1, 1, 1)
self.label_13 = QtGui.QLabel(self.tab_2)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_2.addWidget(self.label_13, 2, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.tab_2)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_2.addWidget(self.label_14, 2, 2, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.leechAction = QtGui.QComboBox(self.tab_2)
self.leechAction.setObjectName(_fromUtf8("leechAction"))
self.leechAction.addItem(_fromUtf8(""))
self.leechAction.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.leechAction)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.gridLayout_2.addLayout(self.horizontalLayout, 4, 1, 1, 2)
self.label_28 = QtGui.QLabel(self.tab_2)
self.label_28.setObjectName(_fromUtf8("label_28"))
self.gridLayout_2.addWidget(self.label_28, 1, 2, 1, 1)
self.lapMult = QtGui.QSpinBox(self.tab_2)
self.lapMult.setMaximum(100)
self.lapMult.setSingleStep(5)
self.lapMult.setObjectName(_fromUtf8("lapMult"))
self.gridLayout_2.addWidget(self.lapMult, 1, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_2)
spacerItem3 = QtGui.QSpacerItem(20, 72, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem3)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.tab_5 = QtGui.QWidget()
self.tab_5.setObjectName(_fromUtf8("tab_5"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.tab_5)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.label_25 = QtGui.QLabel(self.tab_5)
self.label_25.setObjectName(_fromUtf8("label_25"))
self.gridLayout_5.addWidget(self.label_25, 0, 0, 1, 1)
self.maxTaken = QtGui.QSpinBox(self.tab_5)
self.maxTaken.setMinimum(30)
self.maxTaken.setMaximum(3600)
self.maxTaken.setSingleStep(10)
self.maxTaken.setObjectName(_fromUtf8("maxTaken"))
self.gridLayout_5.addWidget(self.maxTaken, 0, 1, 1, 1)
self.label_26 = QtGui.QLabel(self.tab_5)
self.label_26.setObjectName(_fromUtf8("label_26"))
self.gridLayout_5.addWidget(self.label_26, 0, 2, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_5)
self.showTimer = QtGui.QCheckBox(self.tab_5)
self.showTimer.setObjectName(_fromUtf8("showTimer"))
self.verticalLayout_6.addWidget(self.showTimer)
self.autoplaySounds = QtGui.QCheckBox(self.tab_5)
self.autoplaySounds.setObjectName(_fromUtf8("autoplaySounds"))
self.verticalLayout_6.addWidget(self.autoplaySounds)
self.replayQuestion = QtGui.QCheckBox(self.tab_5)
self.replayQuestion.setChecked(False)
self.replayQuestion.setObjectName(_fromUtf8("replayQuestion"))
self.verticalLayout_6.addWidget(self.replayQuestion)
spacerItem4 = QtGui.QSpacerItem(20, 199, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem4)
self.tabWidget.addTab(self.tab_5, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.tab_4)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.label_22 = QtGui.QLabel(self.tab_4)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.verticalLayout_5.addWidget(self.label_22)
self.desc = QtGui.QTextEdit(self.tab_4)
self.desc.setObjectName(_fromUtf8("desc"))
self.verticalLayout_5.addWidget(self.desc)
self.tabWidget.addTab(self.tab_4, _fromUtf8(""))
self.verticalLayout.addWidget(self.tabWidget)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Help|QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.RestoreDefaults)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.dconf, self.confOpts)
Dialog.setTabOrder(self.confOpts, self.tabWidget)
Dialog.setTabOrder(self.tabWidget, self.lrnSteps)
Dialog.setTabOrder(self.lrnSteps, self.newOrder)
Dialog.setTabOrder(self.newOrder, self.newPerDay)
Dialog.setTabOrder(self.newPerDay, self.lrnGradInt)
Dialog.setTabOrder(self.lrnGradInt, self.lrnEasyInt)
Dialog.setTabOrder(self.lrnEasyInt, self.lrnFactor)
Dialog.setTabOrder(self.lrnFactor, self.bury)
Dialog.setTabOrder(self.bury, self.revPerDay)
Dialog.setTabOrder(self.revPerDay, self.easyBonus)
Dialog.setTabOrder(self.easyBonus, self.fi1)
Dialog.setTabOrder(self.fi1, self.maxIvl)
Dialog.setTabOrder(self.maxIvl, self.buryRev)
Dialog.setTabOrder(self.buryRev, self.lapSteps)
Dialog.setTabOrder(self.lapSteps, self.lapMult)
Dialog.setTabOrder(self.lapMult, self.lapMinInt)
Dialog.setTabOrder(self.lapMinInt, self.leechThreshold)
Dialog.setTabOrder(self.leechThreshold, self.leechAction)
Dialog.setTabOrder(self.leechAction, self.maxTaken)
Dialog.setTabOrder(self.maxTaken, self.showTimer)
Dialog.setTabOrder(self.showTimer, self.autoplaySounds)
Dialog.setTabOrder(self.autoplaySounds, self.replayQuestion)
Dialog.setTabOrder(self.replayQuestion, self.buttonBox)
Dialog.setTabOrder(self.buttonBox, self.desc)
def retranslateUi(self, Dialog):
self.label_31.setText(_("Options group:"))
self.label_27.setText(_("%"))
self.label_24.setText(_("Starting ease"))
self.label_8.setText(_("Order"))
self.label_5.setText(_("Easy interval"))
self.label_4.setText(_("Graduating interval"))
self.label_6.setText(_("New cards/day"))
self.label_2.setText(_("Steps (in minutes)"))
self.bury.setText(_("Bury related new cards until the next day"))
self.label_9.setText(_("days"))
self.label_7.setText(_("days"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _("New Cards"))
self.label_20.setText(_("Easy bonus"))
self.label_21.setText(_("%"))
self.label_34.setText(_("%"))
self.label_33.setText(_("Interval modifier"))
self.label_37.setText(_("Maximum reviews/day"))
self.label_3.setText(_("Maximum interval"))
self.label_23.setText(_("days"))
self.buryRev.setText(_("Bury related reviews until the next day"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _("Reviews"))
self.label_17.setText(_("Steps (in minutes)"))
self.label.setText(_("New interval"))
self.label_10.setText(_("Leech threshold"))
self.label_11.setText(_("lapses"))
self.label_12.setText(_("Leech action"))
self.label_13.setText(_("Minimum interval"))
self.label_14.setText(_("days"))
self.leechAction.setItemText(0, _("Suspend Card"))
self.leechAction.setItemText(1, _("Tag Only"))
self.label_28.setText(_("%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _("Lapses"))
self.label_25.setText(_("Ignore answer times longer than"))
self.label_26.setText(_("seconds"))
self.showTimer.setText(_("Show answer timer"))
self.autoplaySounds.setText(_("Automatically play audio"))
self.replayQuestion.setText(_("When answer shown, replay both question and answer audio"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _("General"))
self.label_22.setText(_("Description to show on study screen (current deck only):"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _("Description"))
import icons_rc
|
agpl-3.0
| 6,135,821,399,201,983,000 | -6,920,666,509,044,721,000 | 52.901809 | 135 | 0.671381 | false |
Stanford-Online/edx-platform
|
lms/djangoapps/mobile_api/decorators.py
|
22
|
2476
|
"""
Decorators for Mobile APIs.
"""
import functools
from django.http import Http404
from opaque_keys.edx.keys import CourseKey
from rest_framework import status
from rest_framework.response import Response
from lms.djangoapps.courseware.courses import get_course_with_access
from lms.djangoapps.courseware.courseware_access_exception import CoursewareAccessException
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from openedx.core.lib.api.view_utils import view_auth_classes
from xmodule.modulestore.django import modulestore
def mobile_course_access(depth=0):
"""
Method decorator for a mobile API endpoint that verifies the user has access to the course in a mobile context.
"""
def _decorator(func):
"""Outer method decorator."""
@functools.wraps(func)
def _wrapper(self, request, *args, **kwargs):
"""
Expects kwargs to contain 'course_id'.
Passes the course descriptor to the given decorated function.
Raises 404 if access to course is disallowed.
"""
course_id = CourseKey.from_string(kwargs.pop('course_id'))
with modulestore().bulk_operations(course_id):
try:
course = get_course_with_access(
request.user,
'load_mobile',
course_id,
depth=depth,
check_if_enrolled=True,
)
except CoursewareAccessException as error:
return Response(data=error.to_json(), status=status.HTTP_404_NOT_FOUND)
except CourseAccessRedirect as error:
# If the redirect contains information about the triggering AccessError,
# return the information contained in the AccessError.
if error.access_error is not None:
return Response(data=error.access_error.to_json(), status=status.HTTP_404_NOT_FOUND)
# Raise a 404 if the user does not have course access
raise Http404
return func(self, request, course=course, *args, **kwargs)
return _wrapper
return _decorator
def mobile_view(is_user=False):
"""
Function and class decorator that abstracts the authentication and permission checks for mobile api views.
"""
return view_auth_classes(is_user)
|
agpl-3.0
| -2,369,479,246,066,439,000 | -1,024,188,920,910,267,300 | 39.590164 | 115 | 0.626817 | false |
cyanna/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_videos.py
|
17
|
15670
|
#-*- coding: utf-8 -*-
"""
Unit tests for video-related REST APIs.
"""
# pylint: disable=attribute-defined-outside-init
import csv
import json
import dateutil.parser
import re
from StringIO import StringIO
from django.conf import settings
from django.test.utils import override_settings
from mock import Mock, patch
from edxval.api import create_profile, create_video, get_video_info
from contentstore.models import VideoUploadConfig
from contentstore.views.videos import KEY_EXPIRATION_IN_SECONDS, VIDEO_ASSET_TYPE, StatusDisplayStrings
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory
class VideoUploadTestMixin(object):
"""
Test cases for the video upload feature
"""
def get_url_for_course_key(self, course_key):
"""Return video handler URL for the given course"""
return reverse_course_url(self.VIEW_NAME, course_key)
def setUp(self):
super(VideoUploadTestMixin, self).setUp()
self.url = self.get_url_for_course_key(self.course.id)
self.test_token = "test_token"
self.course.video_upload_pipeline = {
"course_video_upload_token": self.test_token,
}
self.save_course()
self.profiles = ["profile1", "profile2"]
self.previous_uploads = [
{
"edx_video_id": "test1",
"client_video_id": "test1.mp4",
"duration": 42.0,
"status": "upload",
"encoded_videos": [],
},
{
"edx_video_id": "test2",
"client_video_id": "test2.mp4",
"duration": 128.0,
"status": "file_complete",
"encoded_videos": [
{
"profile": "profile1",
"url": "http://example.com/profile1/test2.mp4",
"file_size": 1600,
"bitrate": 100,
},
{
"profile": "profile2",
"url": "http://example.com/profile2/test2.mov",
"file_size": 16000,
"bitrate": 1000,
},
],
},
{
"edx_video_id": "non-ascii",
"client_video_id": u"nón-ascii-näme.mp4",
"duration": 256.0,
"status": "transcode_active",
"encoded_videos": [
{
"profile": "profile1",
"url": u"http://example.com/profile1/nón-ascii-näme.mp4",
"file_size": 3200,
"bitrate": 100,
},
]
},
]
# Ensure every status string is tested
self.previous_uploads += [
{
"edx_video_id": "status_test_{}".format(status),
"client_video_id": "status_test.mp4",
"duration": 3.14,
"status": status,
"encoded_videos": [],
}
for status in (
StatusDisplayStrings._STATUS_MAP.keys() + # pylint:disable=protected-access
["non_existent_status"]
)
]
for profile in self.profiles:
create_profile(profile)
for video in self.previous_uploads:
create_video(video)
modulestore().save_asset_metadata(
AssetMetadata(
self.course.id.make_asset_key(VIDEO_ASSET_TYPE, video["edx_video_id"])
),
self.user.id
)
def _get_previous_upload(self, edx_video_id):
"""Returns the previous upload with the given video id."""
return next(
video
for video in self.previous_uploads
if video["edx_video_id"] == edx_video_id
)
def test_anon_user(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_put(self):
response = self.client.put(self.url)
self.assertEqual(response.status_code, 405)
def test_invalid_course_key(self):
response = self.client.get(
self.get_url_for_course_key("Non/Existent/Course")
)
self.assertEqual(response.status_code, 404)
def test_non_staff_user(self):
client, __ = self.create_non_staff_authed_user_client()
response = client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_video_pipeline_not_enabled(self):
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] = False
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_video_pipeline_not_configured(self):
settings.VIDEO_UPLOAD_PIPELINE = None
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_course_not_configured(self):
self.course.video_upload_pipeline = {}
self.save_course()
self.assertEqual(self.client.get(self.url).status_code, 404)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideosHandlerTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the main video upload endpoint"""
VIEW_NAME = "videos_handler"
def test_get_json(self):
response = self.client.get_json(self.url)
self.assertEqual(response.status_code, 200)
response_videos = json.loads(response.content)["videos"]
self.assertEqual(len(response_videos), len(self.previous_uploads))
for i, response_video in enumerate(response_videos):
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(
set(response_video.keys()),
set(["edx_video_id", "client_video_id", "created", "duration", "status"])
)
dateutil.parser.parse(response_video["created"])
for field in ["edx_video_id", "client_video_id", "duration"]:
self.assertEqual(response_video[field], original_video[field])
self.assertEqual(
response_video["status"],
StatusDisplayStrings.get(original_video["status"])
)
def test_get_html(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertRegexpMatches(response["Content-Type"], "^text/html(;.*)?$")
# Crude check for presence of data in returned HTML
for video in self.previous_uploads:
self.assertIn(video["edx_video_id"], response.content)
def test_post_non_json(self):
response = self.client.post(self.url, {"files": []})
self.assertEqual(response.status_code, 400)
def test_post_malformed_json(self):
response = self.client.post(self.url, "{", content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_post_invalid_json(self):
def assert_bad(content):
"""Make request with content and assert that response is 400"""
response = self.client.post(
self.url,
json.dumps(content),
content_type="application/json"
)
self.assertEqual(response.status_code, 400)
# Top level missing files key
assert_bad({})
# Entry missing file_name
assert_bad({"files": [{"content_type": "video/mp4"}]})
# Entry missing content_type
assert_bad({"files": [{"file_name": "test.mp4"}]})
@override_settings(AWS_ACCESS_KEY_ID="test_key_id", AWS_SECRET_ACCESS_KEY="test_secret")
@patch("boto.s3.key.Key")
@patch("boto.s3.connection.S3Connection")
def test_post_success(self, mock_conn, mock_key):
files = [
{
"file_name": "first.mp4",
"content_type": "video/mp4",
},
{
"file_name": "second.webm",
"content_type": "video/webm",
},
{
"file_name": "third.mov",
"content_type": "video/quicktime",
},
{
"file_name": "fourth.mp4",
"content_type": "video/mp4",
},
]
bucket = Mock()
mock_conn.return_value = Mock(get_bucket=Mock(return_value=bucket))
mock_key_instances = [
Mock(
generate_url=Mock(
return_value="http://example.com/url_{}".format(file_info["file_name"])
)
)
for file_info in files
]
# If extra calls are made, return a dummy
mock_key.side_effect = mock_key_instances + [Mock()]
response = self.client.post(
self.url,
json.dumps({"files": files}),
content_type="application/json"
)
self.assertEqual(response.status_code, 200)
response_obj = json.loads(response.content)
mock_conn.assert_called_once_with(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
self.assertEqual(len(response_obj["files"]), len(files))
self.assertEqual(mock_key.call_count, len(files))
for i, file_info in enumerate(files):
# Ensure Key was set up correctly and extract id
key_call_args, __ = mock_key.call_args_list[i]
self.assertEqual(key_call_args[0], bucket)
path_match = re.match(
(
settings.VIDEO_UPLOAD_PIPELINE["ROOT_PATH"] +
"/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})$"
),
key_call_args[1]
)
self.assertIsNotNone(path_match)
video_id = path_match.group(1)
mock_key_instance = mock_key_instances[i]
mock_key_instance.set_metadata.assert_any_call(
"course_video_upload_token",
self.test_token
)
mock_key_instance.set_metadata.assert_any_call(
"client_video_id",
file_info["file_name"]
)
mock_key_instance.set_metadata.assert_any_call("course_key", unicode(self.course.id))
mock_key_instance.generate_url.assert_called_once_with(
KEY_EXPIRATION_IN_SECONDS,
"PUT",
headers={"Content-Type": file_info["content_type"]}
)
# Ensure asset store was updated and the created_by field was set
asset_metadata = modulestore().find_asset_metadata(
self.course.id.make_asset_key(VIDEO_ASSET_TYPE, video_id)
)
self.assertIsNotNone(asset_metadata)
self.assertEquals(asset_metadata.created_by, self.user.id)
# Ensure VAL was updated
val_info = get_video_info(video_id)
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["client_video_id"], file_info["file_name"])
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["duration"], 0)
self.assertEqual(val_info["courses"], [unicode(self.course.id)])
# Ensure response is correct
response_file = response_obj["files"][i]
self.assertEqual(response_file["file_name"], file_info["file_name"])
self.assertEqual(response_file["upload_url"], mock_key_instance.generate_url())
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideoUrlsCsvTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the CSV download endpoint for video uploads"""
VIEW_NAME = "video_encodings_download"
def setUp(self):
super(VideoUrlsCsvTestCase, self).setUp()
VideoUploadConfig(profile_whitelist="profile1").save()
def _check_csv_response(self, expected_profiles):
"""
Check that the response is a valid CSV response containing rows
corresponding to previous_uploads and including the expected profiles.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename={course}_video_urls.csv".format(course=self.course.id.course)
)
response_reader = StringIO(response.content)
reader = csv.DictReader(response_reader, dialect=csv.excel)
self.assertEqual(
reader.fieldnames,
(
["Name", "Duration", "Date Added", "Video ID", "Status"] +
["{} URL".format(profile) for profile in expected_profiles]
)
)
rows = list(reader)
self.assertEqual(len(rows), len(self.previous_uploads))
for i, row in enumerate(rows):
response_video = {
key.decode("utf-8"): value.decode("utf-8") for key, value in row.items()
}
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(response_video["Name"], original_video["client_video_id"])
self.assertEqual(response_video["Duration"], str(original_video["duration"]))
dateutil.parser.parse(response_video["Date Added"])
self.assertEqual(response_video["Video ID"], original_video["edx_video_id"])
self.assertEqual(response_video["Status"], StatusDisplayStrings.get(original_video["status"]))
for profile in expected_profiles:
response_profile_url = response_video["{} URL".format(profile)]
original_encoded_for_profile = next(
(
original_encoded
for original_encoded in original_video["encoded_videos"]
if original_encoded["profile"] == profile
),
None
)
if original_encoded_for_profile:
self.assertEqual(response_profile_url, original_encoded_for_profile["url"])
else:
self.assertEqual(response_profile_url, "")
def test_basic(self):
self._check_csv_response(["profile1"])
def test_profile_whitelist(self):
VideoUploadConfig(profile_whitelist="profile1,profile2").save()
self._check_csv_response(["profile1", "profile2"])
def test_non_ascii_course(self):
course = CourseFactory.create(
number=u"nón-äscii",
video_upload_pipeline={
"course_video_upload_token": self.test_token,
}
)
response = self.client.get(self.get_url_for_course_key(course.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename=video_urls.csv; filename*=utf-8''n%C3%B3n-%C3%A4scii_video_urls.csv"
)
|
agpl-3.0
| -5,334,378,433,386,850,000 | -4,863,203,381,462,383,000 | 39.061381 | 106 | 0.563585 | false |
TheParrotsAreComing/PAS
|
TestingAssets/Files/delete_foster.py
|
2
|
3543
|
import time
import sys
import _mysql
import random
import string
import re
import os
import urllib.parse
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import selenium.webdriver.chrome.service as service
from shutil import copyfile
try:
# Check to see if it was added
db=_mysql.connect('localhost','root','root','paws_db')
rand_fname=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_lname=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_mail=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
db.query("INSERT INTO fosters (first_name,last_name,address,email,created,is_deleted) VALUES(\""+rand_fname+"\",\""+rand_lname+"\",\"55 Gato Way\",\""+rand_mail+"@mail.com\",NOW(),true);");
db.store_result()
db.query("SELECT id,first_name FROM fosters where last_name=\""+rand_lname+"\" AND email=\""+rand_mail+"@mail.com\"")
r=db.store_result()
k=r.fetch_row(1,1)
a_id = k[0].get('id')
service = service.Service('D:\ChromeDriver\chromedriver')
service.start()
capabilities = {'chrome.binary': 'C:\Program Files (x86)\Google\Chrome\Application\chrome'} # Chrome path is different for everyone
driver = webdriver.Remote(service.service_url, capabilities)
driver.set_window_size(sys.argv[1], sys.argv[2]);
curfilePath = os.path.abspath(__file__)
curDir = os.path.abspath(os.path.join(curfilePath,os.pardir)) # this will return current directory in which python file resides.
parentDir = os.path.abspath(os.path.join(curDir,os.pardir))
grandParentDir = os.path.abspath(os.path.join(parentDir,os.pardir))
webroot = os.path.join(grandParentDir,"webroot","files","fosters",a_id)
rand_default=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_new=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
file_path_1 = urllib.parse.urljoin('files/fosters/',a_id+"/"+rand_default)
db.query('INSERT INTO files (entity_type,entity_id,is_photo,file_path,mime_type,file_size,file_ext,created,is_deleted,original_filename) VALUES(4,'+a_id+',0,"'+file_path_1+'","application/pdf",78237,"pdf",NOW(),0,"test_doc_1");')
db.store_result()
db.query('SELECT id FROM files where file_path="'+file_path_1+'"')
r=db.store_result()
k=r.fetch_row(1,1)
file_1_id = k[0].get('id')
if not os.path.exists(webroot):
os.makedirs(webroot)
copyfile(os.getcwd()+"/doc/test_doc_1.pdf", os.path.join(webroot,rand_default+".pdf"))
for root,dir,files in os.walk(webroot):
for f in files:
os.chmod(os.path.join(root, f), 777)
driver.get('http://localhost:8765');
driver.find_element_by_id('email').send_keys('[email protected]')
driver.find_element_by_id('password').send_keys('password')
driver.find_element_by_css_selector('input[type="submit"]').click()
driver.get('http://localhost:8765/fosters/view/'+a_id)
driver.find_element_by_css_selector('a[data-ix="attachment-notification"]').click()
print("pass") #Not Implemented Yet
sys.exit(0)
driver.find_element_by_css_selector('div.picture-file[data-file-id="'+file_2_id+'"]').click()
driver.find_element_by_id("mark-profile-pic-btn").click()
driver.get('http://localhost:8765/fosters/view/'+a_id)
new_img = driver.find_element_by_css_selector('img.cat-profile-pic')
img_src = new_img.get_attribute('src')
if rand_new in img_src:
print("pass")
else:
print("fail")
driver.quit()
except Exception as e:
print(e)
print("fail")
|
mit
| 5,277,761,255,407,123,000 | -3,204,389,069,610,873,300 | 31.805556 | 230 | 0.711262 | false |
lyceel/engine
|
build/android/pylib/base/test_run_factory.py
|
45
|
2028
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib.gtest import gtest_test_instance
from pylib.gtest import local_device_gtest_run
from pylib.instrumentation import instrumentation_test_instance
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_instrumentation_test_run
from pylib.remote.device import remote_device_environment
from pylib.remote.device import remote_device_gtest_run
from pylib.remote.device import remote_device_instrumentation_test_run
from pylib.remote.device import remote_device_uirobot_test_run
from pylib.uirobot import uirobot_test_instance
def CreateTestRun(_args, env, test_instance, error_func):
if isinstance(env, local_device_environment.LocalDeviceEnvironment):
if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
return local_device_gtest_run.LocalDeviceGtestRun(env, test_instance)
if isinstance(test_instance,
instrumentation_test_instance.InstrumentationTestInstance):
return (local_device_instrumentation_test_run
.LocalDeviceInstrumentationTestRun(env, test_instance))
if isinstance(env, remote_device_environment.RemoteDeviceEnvironment):
if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
return remote_device_gtest_run.RemoteDeviceGtestTestRun(
env, test_instance)
if isinstance(test_instance,
instrumentation_test_instance.InstrumentationTestInstance):
return (remote_device_instrumentation_test_run
.RemoteDeviceInstrumentationTestRun(env, test_instance))
if isinstance(test_instance, uirobot_test_instance.UirobotTestInstance):
return remote_device_uirobot_test_run.RemoteDeviceUirobotTestRun(
env, test_instance)
error_func('Unable to create test run for %s tests in %s environment'
% (str(test_instance), str(env)))
|
bsd-3-clause
| 1,184,075,442,263,387,400 | -8,388,949,731,769,252,000 | 48.463415 | 77 | 0.770217 | false |
mrphlip/lrrbot
|
lrrbot/desertbus_moderator_actions.py
|
2
|
4185
|
"""
This module has basically nothing to do with actual lrrbot functionality...
It's just piggy-backing off it to share its code and steal its event loop.
Because that's easier than making this a separate process.
"""
import asyncio
import datetime
import sqlalchemy
from common import pubsub
from common import utils
from common import time as ctime
from common import gdata
from common.config import config
import logging
import time
import irc.client
log = logging.getLogger("desertbus_moderator_actions")
SPREADSHEET = "1KEEcv-hGEIwkHARpK-X6TBWUT3x8HpgG0i4tk16_Ysw"
WATCHCHANNEL = 'desertbus'
WATCHAS = 'mrphlip' # because lrrbot isn't a mod in the channel
DESERTBUS_START = config["timezone"].localize(datetime.datetime(2020, 11, 13, 10, 0))
class ModeratorActions:
def __init__(self, lrrbot, loop):
self.lrrbot = lrrbot
self.loop = loop
self.last_chat = {}
if config['log_desertbus_moderator_actions']:
self.lrrbot.reactor.add_global_handler("pubmsg", self.record_db_chat, -2)
self.lrrbot.reactor.add_global_handler("all_events", self.drop_db_events, -1)
self.lrrbot.reactor.add_global_handler("welcome", self.on_connect, 2)
self.lrrbot.reactor.scheduler.execute_every(60, self.clear_chat)
users = self.lrrbot.metadata.tables["users"]
with self.lrrbot.engine.begin() as conn:
selfrow = conn.execute(sqlalchemy.select([users.c.id]).where(users.c.name == WATCHAS)).first()
targetrow = conn.execute(sqlalchemy.select([users.c.id]).where(users.c.name == WATCHCHANNEL)).first()
if selfrow is not None and targetrow is not None:
self_channel_id, = selfrow
target_channel_id, = targetrow
topic = "chat_moderator_actions.%s.%s" % (self_channel_id, target_channel_id)
self.lrrbot.pubsub.subscribe([topic], WATCHAS)
pubsub.signals.signal(topic).connect(self.on_message)
@utils.swallow_errors
def on_message(self, sender, message):
log.info("Got message: %r", message['data'])
action = message['data']['moderation_action']
args = message['data']['args']
mod = message['data']['created_by']
if action == 'timeout':
user = args[0]
action = "Timeout: %s" % ctime.nice_duration(int(args[1]))
reason = args[2] if len(args) >= 3 else ''
last = self.last_chat.get(user.lower(), [''])[0]
elif action == 'ban':
user = args[0]
action = "Ban"
reason = args[1] if len(args) >= 2 else ''
last = self.last_chat.get(user.lower(), [''])[0]
elif action == 'unban':
user = args[0]
action = "Unban"
reason = ''
last = ''
elif action == 'untimeout':
user = args[0]
action = "Untimeout"
reason = ''
last = ''
elif action == 'delete':
user = args[0]
action = "Delete message"
reason = ''
last = args[1]
else:
user = ''
reason = repr(args)
last = ''
now = datetime.datetime.now(config["timezone"])
data = [
now.strftime("%Y-%m-%d %H:%M:%S"), # Timestamp
self.nice_time(now - DESERTBUS_START), # Timestamp (hours bussed)
user, # Offender's Username
mod, # Moderator
action, # Enforcement option/length
reason, # What was the cause of the enforcement action?
last, # Last Line
]
log.debug("Add row: %r", data)
asyncio.ensure_future(gdata.add_rows_to_spreadsheet(SPREADSHEET, [data]), loop=self.loop).add_done_callback(utils.check_exception)
def nice_time(self, s):
if isinstance(s, datetime.timedelta):
s = s.days * 86400 + s.seconds
if s < 0:
return "-" + self.nice_time(-s)
return "%d:%02d:%02d" % (s // 3600, (s // 60) % 60, s % 60)
@utils.swallow_errors
def record_db_chat(self, conn, event):
if event.target == "#" + WATCHCHANNEL:
source = irc.client.NickMask(event.source)
self.last_chat[source.nick.lower()] = (event.arguments[0], time.time())
return "NO MORE"
@utils.swallow_errors
def drop_db_events(self, conn, event):
if event.target == "#" + WATCHCHANNEL and event.type != "action":
return "NO MORE"
@utils.swallow_errors
def clear_chat(self):
cutoff = time.time() - 10*60
to_remove = [k for k, v in self.last_chat.items() if v[1] < cutoff]
for i in to_remove:
del self.last_chat[i]
def on_connect(self, conn, event):
conn.join("#" + WATCHCHANNEL)
|
apache-2.0
| 1,240,339,116,874,925,600 | -6,458,166,154,610,417,000 | 30.704545 | 132 | 0.670968 | false |
Crach1015/plugin.video.superpack
|
zip/plugin.video.SportsDevil/lib/addonInstaller.py
|
25
|
3511
|
# -*- coding: utf-8 -*-
import os
import xbmc, xbmcaddon
import common
import urllib
import zipfile
from traceback import print_exc
from dialogs.dialogProgress import DialogProgress
from utils.fileUtils import getFileContent, clearDirectory
from utils.regexUtils import findall
PACKAGE_DIR = "special://home/addons/packages/"
INSTALL_DIR = "special://home/addons/"
DICT = {
'veetle': 'https://github.com/sissbruecker/xbmc-veetle-plugin/archive/master.zip',
'jtv': 'https://divingmules-repo.googlecode.com/files/plugin.video.jtv.archives-0.3.6.zip',
'youtube': 'http://ftp.hosteurope.de/mirror/xbmc.org/addons/frodo/plugin.video.youtube/plugin.video.youtube-4.4.4.zip'
}
def install(key):
entry = DICT[key]
return _install_addon(entry)
def _install_addon(url):
ri = AddonInstaller()
compressed = ri.download(url)
if compressed:
addonId = ri.install(compressed)
if addonId:
xbmc.sleep(100)
xbmc.executebuiltin('UpdateLocalAddons')
xbmc.sleep(100)
try:
_N_ = xbmcaddon.Addon(id=addonId)
common.showNotification(_N_.getAddonInfo("name"), 'Addon installed', 2000, _N_.getAddonInfo("icon"))
return True
except:
pass
return False
def isInstalled(addonId):
try:
_N_ = xbmcaddon.Addon(id=addonId)
return True
except:
return False
class AddonInstaller:
def download(self, url, destination=PACKAGE_DIR):
try:
dlg = DialogProgress()
dlg.create('SportsDevil - Installing external addon')
destination = xbmc.translatePath(destination) + os.path.basename(url)
def _report_hook(count, blocksize, totalsize):
percent = int(float(count * blocksize * 100) / totalsize)
dlg.update(percent, url, destination)
fp, _ = urllib.urlretrieve(url, destination, _report_hook)
return fp
except:
print_exc()
dlg.close()
return ""
def extract(self, fileOrPath, directory):
try:
if not directory.endswith(':') and not os.path.exists(directory):
os.mkdir(directory)
zf = zipfile.ZipFile(fileOrPath)
for _, name in enumerate(zf.namelist()):
if name.endswith('/'):
path = os.path.join(directory, name)
if os.path.exists(path):
clearDirectory(path)
else:
os.makedirs(path, 0777)
else:
outfile = open(os.path.join(directory, name), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
return zf.filelist
except:
print_exc()
return None
def install(self, filename):
destination = xbmc.translatePath(INSTALL_DIR)
files = self.extract(filename, destination)
if files:
addonXml = filter(lambda x: x.filename.endswith('addon.xml'), files)
if addonXml:
path = os.path.join(destination, addonXml[0].filename)
content = getFileContent(path)
addonId = findall(content, '<addon id="([^"]+)"')
if addonId:
return addonId[0]
return None
|
gpl-2.0
| 6,424,668,376,361,283,000 | -734,165,850,533,039,100 | 32.438095 | 126 | 0.561948 | false |
Beeblio/django
|
tests/custom_managers_regress/models.py
|
38
|
1195
|
"""
Regression tests for custom manager classes.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class RestrictedManager(models.Manager):
"""
A manager that filters out non-public instances.
"""
def get_queryset(self):
return super(RestrictedManager, self).get_queryset().filter(is_public=True)
@python_2_unicode_compatible
class RelatedModel(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class RestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.ForeignKey(RelatedModel)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class OneToOneRestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
|
bsd-3-clause
| -7,483,890,800,136,016,000 | -2,820,375,868,362,842,000 | 23.895833 | 83 | 0.709623 | false |
MakerDAO/click
|
tests/test_commands.py
|
29
|
7114
|
# -*- coding: utf-8 -*-
import re
import click
def test_other_command_invoke(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd, arg=42)
@click.command()
@click.argument('arg', type=click.INT)
def other_cmd(arg):
click.echo(arg)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == '42\n'
def test_other_command_forward(runner):
cli = click.Group()
@cli.command()
@click.option('--count', default=1)
def test(count):
click.echo('Count: %d' % count)
@cli.command()
@click.option('--count', default=1)
@click.pass_context
def dist(ctx, count):
ctx.forward(test)
ctx.invoke(test, count=42)
result = runner.invoke(cli, ['dist'])
assert not result.exception
assert result.output == 'Count: 1\nCount: 42\n'
def test_auto_shorthelp(runner):
@click.group()
def cli():
pass
@cli.command()
def short():
"""This is a short text."""
@cli.command()
def special_chars():
"""Login and store the token in ~/.netrc."""
@cli.command()
def long():
"""This is a long text that is too long to show as short help
and will be truncated instead."""
result = runner.invoke(cli, ['--help'])
assert re.search(
r'Commands:\n\s+'
r'long\s+This is a long text that is too long to show\.\.\.\n\s+'
r'short\s+This is a short text\.\n\s+'
r'special_chars\s+Login and store the token in ~/.netrc\.\s*',
result.output) is not None
def test_default_maps(runner):
@click.group()
def cli():
pass
@cli.command()
@click.option('--name', default='normal')
def foo(name):
click.echo(name)
result = runner.invoke(cli, ['foo'], default_map={
'foo': {'name': 'changed'}
})
assert not result.exception
assert result.output == 'changed\n'
def test_group_with_args(runner):
@click.group()
@click.argument('obj')
def cli(obj):
click.echo('obj=%s' % obj)
@cli.command()
def move():
click.echo('move')
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert 'Show this message and exit.' in result.output
result = runner.invoke(cli, ['obj1'])
assert result.exit_code == 2
assert 'Error: Missing command.' in result.output
result = runner.invoke(cli, ['obj1', '--help'])
assert result.exit_code == 0
assert 'Show this message and exit.' in result.output
result = runner.invoke(cli, ['obj1', 'move'])
assert result.exit_code == 0
assert result.output == 'obj=obj1\nmove\n'
def test_base_command(runner):
import optparse
@click.group()
def cli():
pass
class OptParseCommand(click.BaseCommand):
def __init__(self, name, parser, callback):
click.BaseCommand.__init__(self, name)
self.parser = parser
self.callback = callback
def parse_args(self, ctx, args):
try:
opts, args = parser.parse_args(args)
except Exception as e:
ctx.fail(str(e))
ctx.args = args
ctx.params = vars(opts)
def get_usage(self, ctx):
return self.parser.get_usage()
def get_help(self, ctx):
return self.parser.format_help()
def invoke(self, ctx):
ctx.invoke(self.callback, ctx.args, **ctx.params)
parser = optparse.OptionParser(usage='Usage: foo test [OPTIONS]')
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
def test_callback(args, filename, verbose):
click.echo(' '.join(args))
click.echo(filename)
click.echo(verbose)
cli.add_command(OptParseCommand('test', parser, test_callback))
result = runner.invoke(cli, ['test', '-f', 'test.txt', '-q',
'whatever.txt', 'whateverelse.txt'])
assert not result.exception
assert result.output.splitlines() == [
'whatever.txt whateverelse.txt',
'test.txt',
'False',
]
result = runner.invoke(cli, ['test', '--help'])
assert not result.exception
assert result.output.splitlines() == [
'Usage: foo test [OPTIONS]',
'',
'Options:',
' -h, --help show this help message and exit',
' -f FILE, --file=FILE write report to FILE',
' -q, --quiet don\'t print status messages to stdout',
]
def test_object_propagation(runner):
for chain in False, True:
@click.group(chain=chain)
@click.option('--debug/--no-debug', default=False)
@click.pass_context
def cli(ctx, debug):
if ctx.obj is None:
ctx.obj = {}
ctx.obj['DEBUG'] = debug
@cli.command()
@click.pass_context
def sync(ctx):
click.echo('Debug is %s' % (ctx.obj['DEBUG'] and 'on' or 'off'))
result = runner.invoke(cli, ['sync'])
assert result.exception is None
assert result.output == 'Debug is off\n'
def test_other_command_invoke_with_defaults(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd)
@click.command()
@click.option('--foo', type=click.INT, default=42)
@click.pass_context
def other_cmd(ctx, foo):
assert ctx.info_name == 'other_cmd'
click.echo(foo)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == '42\n'
def test_invoked_subcommand(runner):
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo('no subcommand, use default')
ctx.invoke(sync)
else:
click.echo('invoke subcommand')
@cli.command()
def sync():
click.echo('in subcommand')
result = runner.invoke(cli, ['sync'])
assert not result.exception
assert result.output == 'invoke subcommand\nin subcommand\n'
result = runner.invoke(cli)
assert not result.exception
assert result.output == 'no subcommand, use default\nin subcommand\n'
def test_unprocessed_options(runner):
@click.command(context_settings=dict(
ignore_unknown_options=True
))
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
@click.option('--verbose', '-v', count=True)
def cli(verbose, args):
click.echo('Verbosity: %s' % verbose)
click.echo('Args: %s' % '|'.join(args))
result = runner.invoke(cli, ['-foo', '-vvvvx', '--muhaha', 'x', 'y', '-x'])
assert not result.exception
assert result.output.splitlines() == [
'Verbosity: 4',
'Args: -foo|-x|--muhaha|x|y|-x',
]
|
bsd-3-clause
| 3,586,204,982,806,629,400 | 6,825,536,349,445,392,000 | 26.898039 | 79 | 0.577453 | false |
maestro-hybrid-cloud/horizon
|
openstack_dashboard/dashboards/project/network_topology/subnets/tables.py
|
33
|
1052
|
# Copyright 2015 Cisco Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.project.networks.subnets import tables
class DeleteSubnet(tables.DeleteSubnet):
failure_url = 'horizon:project:network_topology:network'
class SubnetsTable(tables.SubnetsTable):
class Meta(object):
name = "subnets"
verbose_name = _("Subnets")
row_actions = (DeleteSubnet, )
table_actions = (DeleteSubnet, )
|
apache-2.0
| -1,382,401,979,865,741,800 | 5,825,544,043,742,010,000 | 34.066667 | 78 | 0.721483 | false |
invisiblearts/DRCN
|
drcn_main.py
|
1
|
3349
|
# The MIT License (MIT)
#
# Copyright (c) 2016 invisiblearts
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from keras.models import Sequential, Model
from keras.layers import Convolution2D, Input, merge
from keras.callbacks import ModelCheckpoint
from keras.utils.io_utils import HDF5Matrix
from keras.optimizers import Adam
from drcn_merge import DRCN_Merge
BATCH_SIZE = 20
input_data = Input(batch_shape=(BATCH_SIZE, 1, 41, 41), name='data')
def func_iterator(x, func, times):
assert isinstance(times, int)
if times == 1:
return func(x)
return func_iterator(x, func, times-1)
def conv(channels=256, **kwargs):
return Convolution2D(channels, 3, 3, 'he_normal', border_mode='same', activation='relu', **kwargs)
embed_net = Sequential([conv(batch_input_shape=(BATCH_SIZE, 1, 41, 41)), conv()], name='Embedding Net')
infer_net = Sequential([conv(batch_input_shape=(BATCH_SIZE, 256, 41, 41))], name='Inference Net')
recons_net = Sequential([conv(batch_input_shape=(BATCH_SIZE, 256, 41, 41)), conv(1)], name='Reconstruction Net')
features = embed_net(input_data)
recurrence_list = []
reconstruct_list = []
for i in range(10):
recurrence_list.append(func_iterator(features, infer_net, i+1))
reconstruct_list.append(merge([recons_net(recurrence_list[i]), input_data]))
merged = merge(reconstruct_list, mode='concat', concat_axis=1)
DRCNMerge = DRCN_Merge(10)
out = DRCNMerge(merged)
DRCN_Model = Model(input=input_data, output=out, name='DRCN Final Model')
DRCN_Model.compile(optimizer=Adam(lr=0.00001, beta_1=0.9, beta_2=0.999), loss='mae')
train_data = HDF5Matrix('train_DRCN_data.h5', 'data', 0, 470)
train_label = HDF5Matrix('train_DRCN_label.h5', 'label', 0, 470)
test_data = HDF5Matrix('train_DRCN_data.h5', 'data', 470, 500)
test_label = HDF5Matrix('train_DRCN_label.h5', 'label', 470, 500)
with open('DRCN.yaml', 'w') as fp:
fp.write(DRCN_Model.to_yaml())
hist = DRCN_Model.fit(
train_data, train_label,
batch_size=BATCH_SIZE, nb_epoch=200,
validation_data=[test_data, test_label], shuffle='batch',
callbacks=[ModelCheckpoint('DRCN_weights.{epoch:02d}-{val_loss:.6f}.hdf5',
monitor='val_loss', verbose=0, save_best_only=False, mode='auto')])
DRCN_Model.save_weights('DRCN_weights.h5')
with open('DRCN_history.txt', 'w') as fp:
fp.write(str(hist.history))
|
mit
| -6,670,092,665,389,268,000 | 8,373,073,077,059,873,000 | 39.841463 | 112 | 0.727381 | false |
rismalrv/edx-platform
|
lms/djangoapps/ccx/views.py
|
13
|
20949
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from contextlib import contextmanager
from copy import deepcopy
from cStringIO import StringIO
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from courseware.grades import iterate_grades_for
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.models import CourseEnrollment
from instructor.offline_gradecalc import student_grades # pylint: disable=import-error
from instructor.views.api import _split_input_list # pylint: disable=import-error
from instructor.views.tools import get_student_from_identifier # pylint: disable=import-error
from instructor.enrollment import (
enroll_email,
unenroll_email,
get_email_params,
)
from .models import CustomCourseForEdX
from .overrides import (
clear_override_for_ccx,
get_override_for_ccx,
override_field_for_ccx,
clear_ccx_field_info_from_ccx_map,
bulk_delete_ccx_override_fields,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
course_key = ccx.course_id
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(
_('You must be a CCX Coach to access this view.'))
course = get_course_by_id(course_key, depth=None)
# if there is a ccx, we must validate that it is the ccx for this coach
if ccx is not None:
coach_ccx = get_ccx_for_coach(course, request.user)
if coach_ccx is None or coach_ccx.id != ccx.id:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, ccx.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator, is_active=True)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, ccx.id) # pylint: disable=no-member
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section['min_count']:
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def validate_date(year, month, day, hour, minute):
"""
avoid corrupting db if bad dates come in
"""
valid = True
if year < 0:
valid = False
if month < 1 or month > 12:
valid = False
if day < 1 or day > 31:
valid = False
if hour < 0 or hour > 23:
valid = False
if minute < 0 or minute > 59:
valid = False
return valid
def parse_date(datestring):
"""
Generate a UTC datetime.datetime object from a string of the form
'YYYY-MM-DD HH:MM'. If string is empty or `None`, returns `None`.
"""
if datestring:
date, time = datestring.split(' ')
year, month, day = map(int, date.split('-'))
hour, minute = map(int, time.split(':'))
if validate_date(year, month, day, hour, minute):
return datetime.datetime(
year, month, day, hour, minute, tzinfo=pytz.UTC)
return None
def get_ccx_for_coach(course, coach):
"""
Looks to see if user is coach of a CCX for this course. Returns the CCX or
None.
"""
ccxs = CustomCourseForEdX.objects.filter(
course_id=course.id,
coach=coach
)
# XXX: In the future, it would be nice to support more than one ccx per
# coach per course. This is a place where that might happen.
if ccxs.exists():
return ccxs[0]
return None
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
"""
for child in node.get_children():
start = get_override_for_ccx(ccx, child, 'start', None)
if start:
start = str(start)[:-9]
due = get_override_for_ccx(ccx, child, 'due', None)
if due:
due = str(due)[:-9]
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, mimetype='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
auto_enroll = True if 'auto-enroll' in request.POST else False
email_students = True if 'email-students' in request.POST else False
for identifier in identifiers:
user = None
email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
validate_email(email)
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll, course_key=course_key, display_name=ccx.display_name)
if action == 'Enroll':
enroll_email(
course_key,
email,
auto_enroll=auto_enroll,
email_students=email_students,
email_params=email_params
)
if action == "Unenroll":
unenroll_email(course_key, email, email_students=email_students, email_params=email_params)
except ValidationError:
log.info('Invalid user name or email when trying to invite students: %s', email)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def validate_student_email(email):
"""
validate student's email id
"""
error_message = None
try:
validate_email(email)
except ValidationError:
log.info(
'Invalid user name or email when trying to enroll student: %s',
email
)
if email:
error_message = _(
'Could not find a user with name or email "{email}" '
).format(email=email)
else:
error_message = _(
'Please enter a valid username or email.'
)
return error_message
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
user = email = None
error_message = ""
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
try:
user = get_student_from_identifier(student_id)
except User.DoesNotExist:
email = student_id
error_message = validate_student_email(email)
if email and not error_message:
error_message = _(
'Could not find a user with name or email "{email}" '
).format(email=email)
else:
email = user.email
error_message = validate_student_email(email)
if error_message is None:
if action == 'add':
# by decree, no emails sent to students added this way
# by decree, any students added this way are auto_enrolled
enroll_email(course_key, email, auto_enroll=True, email_students=False)
elif action == 'revoke':
unenroll_email(course_key, email, email_students=False)
else:
messages.error(request, error_message)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@contextmanager
def ccx_course(ccx_locator):
"""Create a context in which the course identified by course_locator exists
"""
course = get_course_by_id(ccx_locator)
yield course
def prep_course_for_grading(course, request):
"""Set up course module for overrides to function properly"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
course._field_data_cache = {} # pylint: disable=protected-access
course.set_grading_policy(course.grading_policy)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
student_info = [
{
'username': student.username,
'id': student.id,
'email': student.email,
'grade_summary': student_grades(student, request, course),
'realname': student.profile.name,
}
for student in enrolled_students
]
return render_to_response('courseware/gradebook.html', {
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
return HttpResponse(buf.getvalue(), content_type='text/plain')
|
agpl-3.0
| 3,177,625,451,801,885,000 | 5,345,924,412,131,230,000 | 33.118893 | 118 | 0.611915 | false |
Salat-Cx65/python-for-android
|
python-build/python-libs/xmpppy/xmpp/__init__.py
|
212
|
1795
|
# $Id: __init__.py,v 1.9 2005/03/07 09:34:51 snakeru Exp $
"""
All features of xmpppy library contained within separate modules.
At present there are modules:
simplexml - XML handling routines
protocol - jabber-objects (I.e. JID and different stanzas and sub-stanzas) handling routines.
debug - Jacob Lundquist's debugging module. Very handy if you like colored debug.
auth - Non-SASL and SASL stuff. You will need it to auth as a client or transport.
transports - low level connection handling. TCP and TLS currently. HTTP support planned.
roster - simple roster for use in clients.
dispatcher - decision-making logic. Handles all hooks. The first who takes control over fresh stanzas.
features - different stuff that didn't worths separating into modules
browser - DISCO server framework. Allows to build dynamic disco tree.
filetransfer - Currently contains only IBB stuff. Can be used for bot-to-bot transfers.
Most of the classes that is defined in all these modules is an ancestors of
class PlugIn so they share a single set of methods allowing you to compile
a featured XMPP client. For every instance of PlugIn class the 'owner' is the class
in what the plug was plugged. While plugging in such instance usually sets some
methods of owner to it's own ones for easy access. All session specific info stored
either in instance of PlugIn or in owner's instance. This is considered unhandy
and there are plans to port 'Session' class from xmppd.py project for storing all
session-related info. Though if you are not accessing instances variables directly
and use only methods for access all values you should not have any problems.
"""
import simplexml,protocol,debug,auth,transports,roster,dispatcher,features,browser,filetransfer,commands
from client import *
from protocol import *
|
apache-2.0
| 8,659,908,726,418,316,000 | -4,304,877,867,753,010,700 | 56.903226 | 104 | 0.793872 | false |
tcheehow/MissionPlanner
|
Lib/site-packages/numpy/distutils/from_template.py
|
51
|
7890
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separeted words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
__all__ = ['process_str','process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while 1:
m = routine_start_re.search(astr,ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr,start,m.end()):
while 1:
i = astr.rfind('\n',ind,start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr,m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start,end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace('\,','@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = adict.keys()
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr,names):
substr = substr.replace('\>','@rightarrow@')
substr = substr.replace('\<','@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>",substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace('\,','@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r,names.get(r,None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@',',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"\
" for <%s=%s>. Ignoring." % (base_rule,
','.join(rules[base_rule]),
r,thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name,(k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@','>')
newstr = newstr.replace('@leftarrow@','<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]],names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]",re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid.readlines():
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
print ('Including file',fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file,'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname,'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
|
gpl-3.0
| 96,827,613,452,679,090 | -2,379,768,156,368,204 | 29.820313 | 100 | 0.560963 | false |
SlideAtlas/SlideAtlas-Server
|
testing/unit/test_models.py
|
1
|
3917
|
import os
import sys
import logging
from bson import ObjectId
logging.basicConfig(level=logging.INFO)
slideatlaspath = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(slideatlaspath)
from slideatlas import models
from slideatlas.models import Image
from slideatlas.models import ImageStore, View, Session
import base64
def test_image_access():
obj = ImageStore.objects(dbname="demo")[0]
assert(obj != None)
print obj._cls, obj.label
with obj:
img = Image.objects()[2]
assert(img!=None)
logger.info("Found image labelled %s"%(img.label))
def test_view_access():
obj = ImageStore.objects(dbname="demo")[0]
assert(obj != None)
print obj._cls, obj.label
with obj:
aview = View.objects(image=ObjectId("4e6ec90183ff8d11c8000001"))[0]
assert(aview != None)
logger.info("Found view : %s"%(str(aview.__dict__)))
def test_sess_access():
obj = ImageStore.objects(dbname="ptiffayodhya")[0]
assert(obj != None)
print obj._cls, obj.label
with obj:
asess = Session.objects.first()
assert(asess != None)
logger.info("Found sess : %s"%(str(asess.__dict__)))
def test_collection_access():
""" Snippet to test collection access """
all_collections_query = models.Collection.objects\
.no_dereference()
can_admin_collections = all_collections_query.can_access(models.Operation.admin)
for col in all_collections_query:
print col.label
def test_and_fix__macro_thumbs():
# params
viewcol = View._get_collection()
which = "macro"
force = False
made = 0
errored = 0
skipped = 0
total = 0
for viewobj in viewcol.find():
total = total + 1
logger.info("Total: %d" % total)
try:
# Make thumbnail
if "thumbs" not in viewobj:
viewobj["thumbs"] = {}
if force or which not in viewobj["thumbs"]:
# Refresh the thumbnail
if which not in ["macro"]:
# Only know how to make macro image
# Todo: add support for label supported
raise Exception("%s thumbnail creation not supported" % which)
# Make the macro thumb
# Get the image store and image id and off load the request
istore = models.ImageStore.objects.get(id=viewobj["ViewerRecords"][0]["Database"])
# All image stores support macro thumb
with istore:
thumbimgdata = istore.make_thumb(
models.Image.objects.get(id=viewobj["ViewerRecords"][0]["Image"]))
viewcol.update({"_id": viewobj["_id"]},
{"$set" : { "thumbs." + which: base64.b64encode(thumbimgdata)}})
made = made + 1
logger.info("Made: %d" % made)
else:
skipped = skipped + 1
logger.info("Skipped: %d" % skipped)
except Exception as e:
errored = errored + 1
logger.info("Errored: %d, %s" % (errored, e.message))
logger.info("Made: %d" % made)
logger.info("Skipped: %d" % skipped)
logger.info("Errored: %d" % errored)
if __name__ == "__main__":
"""
Run few tests
This class will be finally imported from tiff server
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# This is required so that model gets registered
from slideatlas import create_app
app = create_app()
# test_ptiff_tile_store()
# create_ptiff_store()
# test_getlist()
# test_items_mongoengine()
# test_modify_store()
# test_image_access()
# test_view_access()
# test_sess_access()
# test_collection_access()
with app.app_context():
test_and_fix__macro_thumbs()
|
apache-2.0
| 2,428,318,820,300,249,600 | 4,058,061,726,159,018,000 | 27.591241 | 98 | 0.580546 | false |
sudheesh001/pontoon
|
pontoon/sync/tasks.py
|
1
|
7172
|
import logging
from django.conf import settings
from django.db import connection, transaction
from django.utils import timezone
from pontoon.administration.vcs import CommitToRepositoryException
from pontoon.base.models import ChangedEntityLocale, Project, Repository
from pontoon.base.tasks import PontoonTask
from pontoon.sync.changeset import ChangeSet
from pontoon.sync.core import (
commit_changes,
pull_changes,
sync_project as perform_sync_project,
serial_task,
update_project_stats,
update_translations,
)
from pontoon.sync.models import ProjectSyncLog, RepositorySyncLog, SyncLog
from pontoon.sync.vcs_models import VCSProject
log = logging.getLogger(__name__)
def get_or_fail(ModelClass, message=None, **kwargs):
try:
return ModelClass.objects.get(**kwargs)
except ModelClass.DoesNotExist:
if message is not None:
log.error(message)
raise
@serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key='project={0}')
def sync_project(self, project_pk, sync_log_pk, no_pull=False, no_commit=False, force=False):
"""Fetch the project with the given PK and perform sync on it."""
db_project = get_or_fail(Project, pk=project_pk,
message='Could not sync project with pk={0}, not found.'.format(project_pk))
sync_log = get_or_fail(SyncLog, pk=sync_log_pk,
message=('Could not sync project {0}, log with pk={1} not found.'
.format(db_project.slug, sync_log_pk)))
log.info('Syncing project {0}.'.format(db_project.slug))
# Mark "now" at the start of sync to avoid messing with
# translations submitted during sync.
now = timezone.now()
project_sync_log = ProjectSyncLog.objects.create(
sync_log=sync_log,
project=db_project,
start_time=now
)
if not no_pull:
repos_changed = pull_changes(db_project)
else:
repos_changed = True # Assume changed.
# If the repos haven't changed since the last sync and there are
# no Pontoon-side changes for this project, quit early.
if not force and not repos_changed and not db_project.needs_sync:
log.info('Skipping project {0}, no changes detected.'.format(db_project.slug))
project_sync_log.skipped = True
project_sync_log.skipped_end_time = timezone.now()
project_sync_log.save(update_fields=('skipped', 'skipped_end_time'))
return
perform_sync_project(db_project, now)
for repo in db_project.repositories.all():
sync_project_repo.delay(
project_pk,
repo.pk,
project_sync_log.pk,
now,
no_pull=no_pull,
no_commit=no_commit
)
log.info('Synced resources for project {0}.'.format(db_project.slug))
@serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key='project={0},repo={1}')
def sync_project_repo(self, project_pk, repo_pk, project_sync_log_pk, now,
no_pull=False, no_commit=False):
db_project = get_or_fail(Project, pk=project_pk,
message='Could not sync project with pk={0}, not found.'.format(project_pk))
repo = get_or_fail(Repository, pk=repo_pk,
message='Could not sync repo with pk={0}, not found.'.format(project_pk))
project_sync_log = get_or_fail(ProjectSyncLog, pk=project_sync_log_pk,
message=('Could not sync project {0}, log with pk={1} not found.'
.format(db_project.slug, project_sync_log_pk)))
repo_sync_log = RepositorySyncLog.objects.create(
project_sync_log=project_sync_log,
repository=repo,
start_time=timezone.now()
)
# Pull VCS changes in case we're on a different worker than the one
# sync started on.
if not no_pull:
pull_changes(db_project)
if len(repo.locales) < 1:
log.warning('Could not sync repo `{0}`, no locales found within.'
.format(repo.url))
repo_sync_log.end_time = timezone.now()
repo_sync_log.save(update_fields=['end_time'])
return
vcs_project = VCSProject(db_project, locales=repo.locales)
for locale in repo.locales:
try:
with transaction.atomic():
changeset = ChangeSet(db_project, vcs_project, now)
update_translations(db_project, vcs_project, locale, changeset)
changeset.execute()
update_project_stats(db_project, vcs_project, changeset, locale)
# Clear out the "has_changed" markers now that we've finished
# syncing.
(ChangedEntityLocale.objects
.filter(entity__resource__project=db_project,
locale=locale,
when__lte=now)
.delete())
db_project.has_changed = False
db_project.save(update_fields=['has_changed'])
# Clean up any duplicate approvals at the end of sync right
# before we commit the transaction to avoid race conditions.
with connection.cursor() as cursor:
cursor.execute("""
UPDATE base_translation AS b
SET approved = FALSE, approved_date = NULL
WHERE
id IN
(SELECT trans.id FROM base_translation AS trans
LEFT JOIN base_entity AS ent ON ent.id = trans.entity_id
LEFT JOIN base_resource AS res ON res.id = ent.resource_id
WHERE locale_id = %(locale_id)s
AND res.project_id = %(project_id)s)
AND approved_date !=
(SELECT max(approved_date)
FROM base_translation
WHERE entity_id = b.entity_id
AND locale_id = b.locale_id
AND (plural_form = b.plural_form OR plural_form IS NULL));
""", {
'locale_id': locale.id,
'project_id': db_project.id
})
# Perform the commit last so that, if it succeeds, there is
# nothing after it to fail.
if not no_commit and locale in changeset.locales_to_commit:
commit_changes(db_project, vcs_project, changeset, locale)
except CommitToRepositoryException as err:
# Transaction aborted, log and move on to the next locale.
log.warning(
'Failed to sync locale {locale} for project {project} due to '
'commit error: {error}'.format(
locale=locale.code,
project=db_project.slug,
error=err,
)
)
repo_sync_log.end_time = timezone.now()
repo_sync_log.save()
log.info('Synced translations for project {0} in locales {1}.'.format(
db_project.slug, ','.join(locale.code for locale in repo.locales)
))
|
bsd-3-clause
| -1,199,731,863,962,535,700 | -8,889,815,019,302,970,000 | 39.292135 | 93 | 0.587144 | false |
mfherbst/spack
|
var/spack/repos/builtin/packages/perl-mozilla-ca/package.py
|
5
|
1577
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlMozillaCa(PerlPackage):
"""Mozilla's CA cert bundle in PEM format"""
homepage = "http://search.cpan.org/~abh/Mozilla-CA-20160104/lib/Mozilla/CA.pm"
url = "http://search.cpan.org/CPAN/authors/id/A/AB/ABH/Mozilla-CA-20160104.tar.gz"
version('20160104', '1b91edb15953a8188f011ab5ff433300')
|
lgpl-2.1
| -2,708,117,984,662,731,000 | 3,521,958,474,612,005,000 | 45.382353 | 91 | 0.679772 | false |
yb-kim/gemV
|
src/arch/x86/isa/insts/general_purpose/rotate_and_shift/__init__.py
|
91
|
2283
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["rotate",
"shift"]
microcode = ""
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
|
bsd-3-clause
| -6,778,676,996,698,870,000 | -8,722,698,343,786,721,000 | 50.886364 | 72 | 0.784494 | false |
LingxiaoJIA/gem5
|
tests/configs/tsunami-simple-atomic.py
|
64
|
2352
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from alpha_generic import *
root = LinuxAlphaFSSystemUniprocessor(mem_mode='atomic',
mem_class=SimpleMemory,
cpu_class=AtomicSimpleCPU).create_root()
|
bsd-3-clause
| -7,674,878,226,506,108,000 | -4,846,499,319,023,234,000 | 53.697674 | 78 | 0.771684 | false |
SIFTeam/enigma2
|
lib/python/Components/Sensors.py
|
27
|
2023
|
from Components.FanControl import fancontrol
class Sensors:
# (type, name, unit, directory)
TYPE_TEMPERATURE = 0
# (type, name, unit, fanid)
TYPE_FAN_RPM = 1
def __init__(self):
# (type, name, unit, sensor_specific_dict/list)
self.sensors_list = []
self.addSensors()
def getSensorsCount(self, type = None):
if type is None:
return len(self.sensors_list)
count = 0
for sensor in self.sensors_list:
if sensor[0] == type:
count += 1
return count
# returns a list of sensorids of type "type"
def getSensorsList(self, type = None):
if type is None:
return range(len(self.sensors_list))
list = []
for sensorid in range(len(self.sensors_list)):
if self.sensors_list[sensorid][0] == type:
list.append(sensorid)
return list
def getSensorType(self, sensorid):
return self.sensors_list[sensorid][0]
def getSensorName(self, sensorid):
return self.sensors_list[sensorid][1]
def getSensorValue(self, sensorid):
value = -1
sensor = self.sensors_list[sensorid]
if sensor[0] == self.TYPE_TEMPERATURE:
f = open("%s/value" % sensor[3], "r")
value = int(f.readline().strip())
f.close()
elif sensor[0] == self.TYPE_FAN_RPM:
value = fancontrol.getFanSpeed(sensor[3])
return value
def getSensorUnit(self, sensorid):
return self.sensors_list[sensorid][2]
def addSensors(self):
import os
if os.path.exists("/proc/stb/sensors"):
for dirname in os.listdir("/proc/stb/sensors"):
if dirname.find("temp", 0, 4) == 0:
f = open("/proc/stb/sensors/%s/name" % dirname, "r")
name = f.readline().strip()
f.close()
f = open("/proc/stb/sensors/%s/unit" % dirname, "r")
unit = f.readline().strip()
f.close()
self.sensors_list.append((self.TYPE_TEMPERATURE, name, unit, "/proc/stb/sensors/%s" % dirname))
for fanid in range(fancontrol.getFanCount()):
if fancontrol.hasRPMSensor(fanid):
self.sensors_list.append((self.TYPE_FAN_RPM, _("Fan %d") % (fanid + 1), "rpm", fanid))
sensors = Sensors()
|
gpl-2.0
| -2,315,343,997,172,671,000 | 4,045,452,832,435,036,000 | 27.111111 | 100 | 0.654474 | false |
lucalianas/ProMort
|
promort/reviews_manager/migrations/0012_auto_20170522_1045.py
|
2
|
2680
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from django.db import migrations
from uuid import uuid4
def _get_slide_index(slide_label):
return slide_label.split('-')[-1]
def update_rois_annotations(apps, schema_editor):
ROIsAnnotation = apps.get_model('reviews_manager', 'ROIsAnnotation')
for annotation in ROIsAnnotation.objects.all():
annotation_label = uuid4().hex
annotation.label = annotation_label
annotation.save()
for step in annotation.steps.all():
slide_index = _get_slide_index(step.slide.id)
step.label = '%s-%s' % (annotation_label, slide_index)
step.save()
def update_clinical_annotations(apps, schema_editor):
ClinicalAnnotation = apps.get_model('reviews_manager', 'ClinicalAnnotation')
for annotation in ClinicalAnnotation.objects.all():
if annotation.reviewer == annotation.rois_review.reviewer:
annotation_label = annotation.rois_review.label
else:
annotation_label = uuid4().hex
annotation.label = annotation_label
annotation.save()
for step in annotation.steps.all():
slide_index = _get_slide_index(step.slide.id)
step.label = '%s-%s' % (annotation_label, slide_index)
step.save()
class Migration(migrations.Migration):
dependencies = [
('reviews_manager', '0011_auto_20170522_1045'),
]
operations = [
migrations.RunPython(update_rois_annotations),
migrations.RunPython(update_clinical_annotations),
]
|
mit
| -7,129,969,811,098,780,000 | 4,596,902,040,151,571,500 | 37.84058 | 83 | 0.698134 | false |
Easy-as-Bit/p2pool
|
p2pool/work.py
|
52
|
23955
|
from __future__ import division
import base64
import random
import re
import sys
import time
from twisted.internet import defer
from twisted.python import log
import bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data
from bitcoin import helper, script, worker_interface
from util import forest, jsonrpc, variable, deferral, math, pack
import p2pool, p2pool.data as p2pool_data
class WorkerBridge(worker_interface.WorkerBridge):
COINBASE_NONCE_LENGTH = 8
def __init__(self, node, my_pubkey_hash, donation_percentage, merged_urls, worker_fee):
worker_interface.WorkerBridge.__init__(self)
self.recent_shares_ts_work = []
self.node = node
self.my_pubkey_hash = my_pubkey_hash
self.donation_percentage = donation_percentage
self.worker_fee = worker_fee
self.net = self.node.net.PARENT
self.running = True
self.pseudoshare_received = variable.Event()
self.share_received = variable.Event()
self.local_rate_monitor = math.RateMonitor(10*60)
self.local_addr_rate_monitor = math.RateMonitor(10*60)
self.removed_unstales_var = variable.Variable((0, 0, 0))
self.removed_doa_unstales_var = variable.Variable(0)
self.my_share_hashes = set()
self.my_doa_share_hashes = set()
self.tracker_view = forest.TrackerView(self.node.tracker, forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
my_count=lambda share: 1 if share.hash in self.my_share_hashes else 0,
my_doa_count=lambda share: 1 if share.hash in self.my_doa_share_hashes else 0,
my_orphan_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'orphan' else 0,
my_dead_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'doa' else 0,
)))
@self.node.tracker.verified.removed.watch
def _(share):
if share.hash in self.my_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance
self.removed_unstales_var.set((
self.removed_unstales_var.value[0] + 1,
self.removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0),
self.removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0),
))
if share.hash in self.my_doa_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
self.removed_doa_unstales_var.set(self.removed_doa_unstales_var.value + 1)
# MERGED WORK
self.merged_work = variable.Variable({})
@defer.inlineCallbacks
def set_merged_work(merged_url, merged_userpass):
merged_proxy = jsonrpc.HTTPProxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass)))
while self.running:
auxblock = yield deferral.retry('Error while calling merged getauxblock on %s:' % (merged_url,), 30)(merged_proxy.rpc_getauxblock)()
self.merged_work.set(math.merge_dicts(self.merged_work.value, {auxblock['chainid']: dict(
hash=int(auxblock['hash'], 16),
target='p2pool' if auxblock['target'] == 'p2pool' else pack.IntType(256).unpack(auxblock['target'].decode('hex')),
merged_proxy=merged_proxy,
)}))
yield deferral.sleep(1)
for merged_url, merged_userpass in merged_urls:
set_merged_work(merged_url, merged_userpass)
@self.merged_work.changed.watch
def _(new_merged_work):
print 'Got new merged mining work!'
# COMBINE WORK
self.current_work = variable.Variable(None)
def compute_work():
t = self.node.bitcoind_work.value
bb = self.node.best_block_header.value
if bb is not None and bb['previous_block'] == t['previous_block'] and self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(bb)) <= t['bits'].target:
print 'Skipping from block %x to block %x!' % (bb['previous_block'],
bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)))
t = dict(
version=bb['version'],
previous_block=bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)),
bits=bb['bits'], # not always true
coinbaseflags='',
height=t['height'] + 1,
time=bb['timestamp'] + 600, # better way?
transactions=[],
transaction_fees=[],
merkle_link=bitcoin_data.calculate_merkle_link([None], 0),
subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.node.bitcoind_work.value['height']),
last_update=self.node.bitcoind_work.value['last_update'],
)
self.current_work.set(t)
self.node.bitcoind_work.changed.watch(lambda _: compute_work())
self.node.best_block_header.changed.watch(lambda _: compute_work())
compute_work()
self.new_work_event = variable.Event()
@self.current_work.transitioned.watch
def _(before, after):
# trigger LP if version/previous_block/bits changed or transactions changed from nothing
if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or (not before['transactions'] and after['transactions']):
self.new_work_event.happened()
self.merged_work.changed.watch(lambda _: self.new_work_event.happened())
self.node.best_share_var.changed.watch(lambda _: self.new_work_event.happened())
def stop(self):
self.running = False
def get_stale_counts(self):
'''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)'''
my_shares = len(self.my_share_hashes)
my_doa_shares = len(self.my_doa_share_hashes)
delta = self.tracker_view.get_delta_to_last(self.node.best_share_var.value)
my_shares_in_chain = delta.my_count + self.removed_unstales_var.value[0]
my_doa_shares_in_chain = delta.my_doa_count + self.removed_doa_unstales_var.value
orphans_recorded_in_chain = delta.my_orphan_announce_count + self.removed_unstales_var.value[1]
doas_recorded_in_chain = delta.my_dead_announce_count + self.removed_unstales_var.value[2]
my_shares_not_in_chain = my_shares - my_shares_in_chain
my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain
return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain)
def get_user_details(self, username):
contents = re.split('([+/])', username)
assert len(contents) % 2 == 1
user, contents2 = contents[0], contents[1:]
desired_pseudoshare_target = None
desired_share_target = None
for symbol, parameter in zip(contents2[::2], contents2[1::2]):
if symbol == '+':
try:
desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(parameter))
except:
if p2pool.DEBUG:
log.err()
elif symbol == '/':
try:
desired_share_target = bitcoin_data.difficulty_to_target(float(parameter))
except:
if p2pool.DEBUG:
log.err()
if random.uniform(0, 100) < self.worker_fee:
pubkey_hash = self.my_pubkey_hash
else:
try:
pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT)
except: # XXX blah
pubkey_hash = self.my_pubkey_hash
return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
def preprocess_request(self, user):
if (self.node.p2p_node is None or len(self.node.p2p_node.peers) == 0) and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is not connected to any peers')
if time.time() > self.current_work.value['last_update'] + 60:
raise jsonrpc.Error_for_code(-12345)(u'lost contact with bitcoind')
user, pubkey_hash, desired_share_target, desired_pseudoshare_target = self.get_user_details(user)
return pubkey_hash, desired_share_target, desired_pseudoshare_target
def _estimate_local_hash_rate(self):
if len(self.recent_shares_ts_work) == 50:
hash_rate = sum(work for ts, work in self.recent_shares_ts_work[1:])//(self.recent_shares_ts_work[-1][0] - self.recent_shares_ts_work[0][0])
if hash_rate > 0:
return hash_rate
return None
def get_local_rates(self):
miner_hash_rates = {}
miner_dead_hash_rates = {}
datums, dt = self.local_rate_monitor.get_datums_in_last()
for datum in datums:
miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt
if datum['dead']:
miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt
return miner_hash_rates, miner_dead_hash_rates
def get_local_addr_rates(self):
addr_hash_rates = {}
datums, dt = self.local_addr_rate_monitor.get_datums_in_last()
for datum in datums:
addr_hash_rates[datum['pubkey_hash']] = addr_hash_rates.get(datum['pubkey_hash'], 0) + datum['work']/dt
return addr_hash_rates
def get_work(self, pubkey_hash, desired_share_target, desired_pseudoshare_target):
if self.node.best_share_var.value is None and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is downloading shares')
if self.merged_work.value:
tree, size = bitcoin_data.make_auxpow_tree(self.merged_work.value)
mm_hashes = [self.merged_work.value.get(tree.get(i), dict(hash=0))['hash'] for i in xrange(size)]
mm_data = '\xfa\xbemm' + bitcoin_data.aux_pow_coinbase_type.pack(dict(
merkle_root=bitcoin_data.merkle_hash(mm_hashes),
size=size,
nonce=0,
))
mm_later = [(aux_work, mm_hashes.index(aux_work['hash']), mm_hashes) for chain_id, aux_work in self.merged_work.value.iteritems()]
else:
mm_data = ''
mm_later = []
tx_hashes = [bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx)) for tx in self.current_work.value['transactions']]
tx_map = dict(zip(tx_hashes, self.current_work.value['transactions']))
previous_share = self.node.tracker.items[self.node.best_share_var.value] if self.node.best_share_var.value is not None else None
if previous_share is None:
share_type = p2pool_data.Share
else:
previous_share_type = type(previous_share)
if previous_share_type.SUCCESSOR is None or self.node.tracker.get_height(previous_share.hash) < self.node.net.CHAIN_LENGTH:
share_type = previous_share_type
else:
successor_type = previous_share_type.SUCCESSOR
counts = p2pool_data.get_desired_version_counts(self.node.tracker,
self.node.tracker.get_nth_parent_hash(previous_share.hash, self.node.net.CHAIN_LENGTH*9//10), self.node.net.CHAIN_LENGTH//10)
upgraded = counts.get(successor_type.VERSION, 0)/sum(counts.itervalues())
if upgraded > .65:
print 'Switchover imminent. Upgraded: %.3f%% Threshold: %.3f%%' % (upgraded*100, 95)
print
# Share -> NewShare only valid if 95% of hashes in [net.CHAIN_LENGTH*9//10, net.CHAIN_LENGTH] for new version
if counts.get(successor_type.VERSION, 0) > sum(counts.itervalues())*95//100:
share_type = successor_type
else:
share_type = previous_share_type
if desired_share_target is None:
desired_share_target = 2**256-1
local_hash_rate = self._estimate_local_hash_rate()
if local_hash_rate is not None:
desired_share_target = min(desired_share_target,
bitcoin_data.average_attempts_to_target(local_hash_rate * self.node.net.SHARE_PERIOD / 0.0167)) # limit to 1.67% of pool shares by modulating share difficulty
local_addr_rates = self.get_local_addr_rates()
lookbehind = 3600//self.node.net.SHARE_PERIOD
block_subsidy = self.node.bitcoind_work.value['subsidy']
if previous_share is not None and self.node.tracker.get_height(previous_share.hash) > lookbehind:
expected_payout_per_block = local_addr_rates.get(pubkey_hash, 0)/p2pool_data.get_pool_attempts_per_second(self.node.tracker, self.node.best_share_var.value, lookbehind) \
* block_subsidy*(1-self.donation_percentage/100) # XXX doesn't use global stale rate to compute pool hash
if expected_payout_per_block < self.node.net.PARENT.DUST_THRESHOLD:
desired_share_target = min(desired_share_target,
bitcoin_data.average_attempts_to_target((bitcoin_data.target_to_average_attempts(self.node.bitcoind_work.value['bits'].target)*self.node.net.SPREAD)*self.node.net.PARENT.DUST_THRESHOLD/block_subsidy)
)
if True:
share_info, gentx, other_transaction_hashes, get_share = share_type.generate_transaction(
tracker=self.node.tracker,
share_data=dict(
previous_share_hash=self.node.best_share_var.value,
coinbase=(script.create_push_script([
self.current_work.value['height'],
] + ([mm_data] if mm_data else []) + [
]) + self.current_work.value['coinbaseflags'])[:100],
nonce=random.randrange(2**32),
pubkey_hash=pubkey_hash,
subsidy=self.current_work.value['subsidy'],
donation=math.perfect_round(65535*self.donation_percentage/100),
stale_info=(lambda (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain):
'orphan' if orphans > orphans_recorded_in_chain else
'doa' if doas > doas_recorded_in_chain else
None
)(*self.get_stale_counts()),
desired_version=(share_type.SUCCESSOR if share_type.SUCCESSOR is not None else share_type).VOTING_VERSION,
),
block_target=self.current_work.value['bits'].target,
desired_timestamp=int(time.time() + 0.5),
desired_target=desired_share_target,
ref_merkle_link=dict(branch=[], index=0),
desired_other_transaction_hashes_and_fees=zip(tx_hashes, self.current_work.value['transaction_fees']),
net=self.node.net,
known_txs=tx_map,
base_subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.current_work.value['height']),
)
packed_gentx = bitcoin_data.tx_type.pack(gentx)
other_transactions = [tx_map[tx_hash] for tx_hash in other_transaction_hashes]
mm_later = [(dict(aux_work, target=aux_work['target'] if aux_work['target'] != 'p2pool' else share_info['bits'].target), index, hashes) for aux_work, index, hashes in mm_later]
if desired_pseudoshare_target is None:
target = 2**256-1
local_hash_rate = self._estimate_local_hash_rate()
if local_hash_rate is not None:
target = min(target,
bitcoin_data.average_attempts_to_target(local_hash_rate * 1)) # limit to 1 share response every second by modulating pseudoshare difficulty
else:
target = desired_pseudoshare_target
target = max(target, share_info['bits'].target)
for aux_work, index, hashes in mm_later:
target = max(target, aux_work['target'])
target = math.clip(target, self.node.net.PARENT.SANE_TARGET_RANGE)
getwork_time = time.time()
lp_count = self.new_work_event.times
merkle_link = bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0)
print 'New work for worker! Difficulty: %.06f Share difficulty: %.06f Total block value: %.6f %s including %i transactions' % (
bitcoin_data.target_to_difficulty(target),
bitcoin_data.target_to_difficulty(share_info['bits'].target),
self.current_work.value['subsidy']*1e-8, self.node.net.PARENT.SYMBOL,
len(self.current_work.value['transactions']),
)
ba = dict(
version=min(self.current_work.value['version'], 2),
previous_block=self.current_work.value['previous_block'],
merkle_link=merkle_link,
coinb1=packed_gentx[:-self.COINBASE_NONCE_LENGTH-4],
coinb2=packed_gentx[-4:],
timestamp=self.current_work.value['time'],
bits=self.current_work.value['bits'],
share_target=target,
)
received_header_hashes = set()
def got_response(header, user, coinbase_nonce):
assert len(coinbase_nonce) == self.COINBASE_NONCE_LENGTH
new_packed_gentx = packed_gentx[:-self.COINBASE_NONCE_LENGTH-4] + coinbase_nonce + packed_gentx[-4:] if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else packed_gentx
new_gentx = bitcoin_data.tx_type.unpack(new_packed_gentx) if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else gentx
header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header))
pow_hash = self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(header))
try:
if pow_hash <= header['bits'].target or p2pool.DEBUG:
helper.submit_block(dict(header=header, txs=[new_gentx] + other_transactions), False, self.node.factory, self.node.bitcoind, self.node.bitcoind_work, self.node.net)
if pow_hash <= header['bits'].target:
print
print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (self.node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX, header_hash)
print
except:
log.err(None, 'Error while processing potential block:')
user, _, _, _ = self.get_user_details(user)
assert header['previous_block'] == ba['previous_block']
assert header['merkle_root'] == bitcoin_data.check_merkle_link(bitcoin_data.hash256(new_packed_gentx), merkle_link)
assert header['bits'] == ba['bits']
on_time = self.new_work_event.times == lp_count
for aux_work, index, hashes in mm_later:
try:
if pow_hash <= aux_work['target'] or p2pool.DEBUG:
df = deferral.retry('Error submitting merged block: (will retry)', 10, 10)(aux_work['merged_proxy'].rpc_getauxblock)(
pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'),
bitcoin_data.aux_pow_type.pack(dict(
merkle_tx=dict(
tx=new_gentx,
block_hash=header_hash,
merkle_link=merkle_link,
),
merkle_link=bitcoin_data.calculate_merkle_link(hashes, index),
parent_block_header=header,
)).encode('hex'),
)
@df.addCallback
def _(result, aux_work=aux_work):
if result != (pow_hash <= aux_work['target']):
print >>sys.stderr, 'Merged block submittal result: %s Expected: %s' % (result, pow_hash <= aux_work['target'])
else:
print 'Merged block submittal result: %s' % (result,)
@df.addErrback
def _(err):
log.err(err, 'Error submitting merged block:')
except:
log.err(None, 'Error while processing merged mining POW:')
if pow_hash <= share_info['bits'].target and header_hash not in received_header_hashes:
last_txout_nonce = pack.IntType(8*self.COINBASE_NONCE_LENGTH).unpack(coinbase_nonce)
share = get_share(header, last_txout_nonce)
print 'GOT SHARE! %s %s prev %s age %.2fs%s' % (
user,
p2pool_data.format_hash(share.hash),
p2pool_data.format_hash(share.previous_hash),
time.time() - getwork_time,
' DEAD ON ARRIVAL' if not on_time else '',
)
self.my_share_hashes.add(share.hash)
if not on_time:
self.my_doa_share_hashes.add(share.hash)
self.node.tracker.add(share)
self.node.set_best_share()
try:
if (pow_hash <= header['bits'].target or p2pool.DEBUG) and self.node.p2p_node is not None:
self.node.p2p_node.broadcast_share(share.hash)
except:
log.err(None, 'Error forwarding block solution:')
self.share_received.happened(bitcoin_data.target_to_average_attempts(share.target), not on_time, share.hash)
if pow_hash > target:
print 'Worker %s submitted share with hash > target:' % (user,)
print ' Hash: %56x' % (pow_hash,)
print ' Target: %56x' % (target,)
elif header_hash in received_header_hashes:
print >>sys.stderr, 'Worker %s submitted share more than once!' % (user,)
else:
received_header_hashes.add(header_hash)
self.pseudoshare_received.happened(bitcoin_data.target_to_average_attempts(target), not on_time, user)
self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target)))
while len(self.recent_shares_ts_work) > 50:
self.recent_shares_ts_work.pop(0)
self.local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=user, share_target=share_info['bits'].target))
self.local_addr_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), pubkey_hash=pubkey_hash))
return on_time
return ba, got_response
|
gpl-3.0
| 3,717,028,211,829,021,700 | -6,478,541,570,779,632,000 | 54.580046 | 223 | 0.574911 | false |
yg257/Pangea
|
lib/boto-2.34.0/tests/integration/gs/test_storage_uri.py
|
135
|
6558
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Integration tests for StorageUri interface."""
import binascii
import re
import StringIO
from boto import storage_uri
from boto.exception import BotoClientError
from boto.gs.acl import SupportedPermissions as perms
from tests.integration.gs.testcase import GSTestCase
class GSStorageUriTest(GSTestCase):
def testHasVersion(self):
uri = storage_uri("gs://bucket/obj")
self.assertFalse(uri.has_version())
uri.version_id = "versionid"
self.assertTrue(uri.has_version())
uri = storage_uri("gs://bucket/obj")
# Generation triggers versioning.
uri.generation = 12345
self.assertTrue(uri.has_version())
uri.generation = None
self.assertFalse(uri.has_version())
# Zero-generation counts as a version.
uri = storage_uri("gs://bucket/obj")
uri.generation = 0
self.assertTrue(uri.has_version())
def testCloneReplaceKey(self):
b = self._MakeBucket()
k = b.new_key("obj")
k.set_contents_from_string("stringdata")
orig_uri = storage_uri("gs://%s/" % b.name)
uri = orig_uri.clone_replace_key(k)
self.assertTrue(uri.has_version())
self.assertRegexpMatches(str(uri.generation), r"[0-9]+")
def testSetAclXml(self):
"""Ensures that calls to the set_xml_acl functions succeed."""
b = self._MakeBucket()
k = b.new_key("obj")
k.set_contents_from_string("stringdata")
bucket_uri = storage_uri("gs://%s/" % b.name)
# Get a valid ACL for an object.
bucket_uri.object_name = "obj"
bucket_acl = bucket_uri.get_acl()
bucket_uri.object_name = None
# Add a permission to the ACL.
all_users_read_permission = ("<Entry><Scope type='AllUsers'/>"
"<Permission>READ</Permission></Entry>")
acl_string = re.sub(r"</Entries>",
all_users_read_permission + "</Entries>",
bucket_acl.to_xml())
# Test-generated owner IDs are not currently valid for buckets
acl_no_owner_string = re.sub(r"<Owner>.*</Owner>", "", acl_string)
# Set ACL on an object.
bucket_uri.set_xml_acl(acl_string, "obj")
# Set ACL on a bucket.
bucket_uri.set_xml_acl(acl_no_owner_string)
# Set the default ACL for a bucket.
bucket_uri.set_def_xml_acl(acl_no_owner_string)
# Verify all the ACLs were successfully applied.
new_obj_acl_string = k.get_acl().to_xml()
new_bucket_acl_string = bucket_uri.get_acl().to_xml()
new_bucket_def_acl_string = bucket_uri.get_def_acl().to_xml()
self.assertRegexpMatches(new_obj_acl_string, r"AllUsers")
self.assertRegexpMatches(new_bucket_acl_string, r"AllUsers")
self.assertRegexpMatches(new_bucket_def_acl_string, r"AllUsers")
def testPropertiesUpdated(self):
b = self._MakeBucket()
bucket_uri = storage_uri("gs://%s" % b.name)
key_uri = bucket_uri.clone_replace_name("obj")
key_uri.set_contents_from_string("data1")
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data1")
key_uri.set_contents_from_stream(StringIO.StringIO("data2"))
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
self.assertGreater(key_uri.generation, k.generation)
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data2")
key_uri.set_contents_from_file(StringIO.StringIO("data3"))
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
self.assertGreater(key_uri.generation, k.generation)
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data3")
def testCompose(self):
data1 = 'hello '
data2 = 'world!'
expected_crc = 1238062967
b = self._MakeBucket()
bucket_uri = storage_uri("gs://%s" % b.name)
key_uri1 = bucket_uri.clone_replace_name("component1")
key_uri1.set_contents_from_string(data1)
key_uri2 = bucket_uri.clone_replace_name("component2")
key_uri2.set_contents_from_string(data2)
# Simple compose.
key_uri_composite = bucket_uri.clone_replace_name("composite")
components = [key_uri1, key_uri2]
key_uri_composite.compose(components, content_type='text/plain')
self.assertEquals(key_uri_composite.get_contents_as_string(),
data1 + data2)
composite_key = key_uri_composite.get_key()
cloud_crc32c = binascii.hexlify(
composite_key.cloud_hashes['crc32c'])
self.assertEquals(cloud_crc32c, hex(expected_crc)[2:])
self.assertEquals(composite_key.content_type, 'text/plain')
# Compose disallowed between buckets.
key_uri1.bucket_name += '2'
try:
key_uri_composite.compose(components)
self.fail('Composing between buckets didn\'t fail as expected.')
except BotoClientError as err:
self.assertEquals(
err.reason, 'GCS does not support inter-bucket composing')
|
apache-2.0
| -3,697,513,872,295,753,700 | -2,447,353,468,270,893,000 | 39.732919 | 77 | 0.642421 | false |
craigds/mapnik2
|
scons/scons-local-1.2.0/SCons/compat/_scons_UserString.py
|
12
|
3505
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/compat/_scons_UserString.py 3842 2008/12/20 22:59:52 scons"
__doc__ = """
A user-defined wrapper around string objects
This class is "borrowed" from the Python 2.2 UserString and modified
slightly for use with SCons. It is *NOT* guaranteed to be fully compliant
with the standard UserString class from all later versions of Python.
In particular, it does not necessarily contain all of the methods found
in later versions.
"""
import types
StringType = types.StringType
if hasattr(types, 'UnicodeType'):
UnicodeType = types.UnicodeType
def is_String(obj):
return type(obj) in (StringType, UnicodeType)
else:
def is_String(obj):
return type(obj) is StringType
class UserString:
def __init__(self, seq):
if is_String(seq):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif is_String(other):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if is_String(other):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
|
lgpl-2.1
| 5,219,720,334,828,320,000 | 8,777,038,881,956,207,000 | 37.097826 | 92 | 0.661912 | false |
Licshee/shadowsocks
|
shadowsocks/daemon.py
|
694
|
5602
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
|
apache-2.0
| 6,636,112,338,927,362,000 | 131,890,263,664,133,650 | 25.932692 | 79 | 0.582114 | false |
defance/edx-platform
|
lms/djangoapps/courseware/user_state_client.py
|
27
|
14669
|
"""
An implementation of :class:`XBlockUserStateClient`, which stores XBlock Scope.user_state
data in a Django ORM model.
"""
import itertools
from operator import attrgetter
from time import time
try:
import simplejson as json
except ImportError:
import json
import dogstats_wrapper as dog_stats_api
from django.contrib.auth.models import User
from xblock.fields import Scope, ScopeBase
from courseware.models import StudentModule, StudentModuleHistory
from edx_user_state_client.interface import XBlockUserStateClient, XBlockUserState
class DjangoXBlockUserStateClient(XBlockUserStateClient):
"""
An interface that uses the Django ORM StudentModule as a backend.
A note on the format of state storage:
The state for an xblock is stored as a serialized JSON dictionary. The model
field that it is stored in can also take on a value of ``None``. To preserve
existing analytic uses, we will preserve the following semantics:
A state of ``None`` means that the user hasn't ever looked at the xblock.
A state of ``"{}"`` means that the XBlock has at some point stored state for
the current user, but that that state has been deleted.
Otherwise, the dictionary contains all data stored for the user.
None of these conditions should violate the semantics imposed by
XBlockUserStateClient (for instance, once all fields have been deleted from
an XBlock for a user, the state will be listed as ``None`` by :meth:`get_history`,
even though the actual stored state in the database will be ``"{}"``).
"""
# Use this sample rate for DataDog events.
API_DATADOG_SAMPLE_RATE = 0.1
class ServiceUnavailable(XBlockUserStateClient.ServiceUnavailable):
"""
This error is raised if the service backing this client is currently unavailable.
"""
pass
class PermissionDenied(XBlockUserStateClient.PermissionDenied):
"""
This error is raised if the caller is not allowed to access the requested data.
"""
pass
class DoesNotExist(XBlockUserStateClient.DoesNotExist):
"""
This error is raised if the caller has requested data that does not exist.
"""
pass
def __init__(self, user=None):
"""
Arguments:
user (:class:`~User`): An already-loaded django user. If this user matches the username
supplied to `set_many`, then that will reduce the number of queries made to store
the user state.
"""
self.user = user
def _get_student_modules(self, username, block_keys):
"""
Retrieve the :class:`~StudentModule`s for the supplied ``username`` and ``block_keys``.
Arguments:
username (str): The name of the user to load `StudentModule`s for.
block_keys (list of :class:`~UsageKey`): The set of XBlocks to load data for.
"""
course_key_func = attrgetter('course_key')
by_course = itertools.groupby(
sorted(block_keys, key=course_key_func),
course_key_func,
)
for course_key, usage_keys in by_course:
query = StudentModule.objects.chunked_filter(
'module_state_key__in',
usage_keys,
student__username=username,
course_id=course_key,
)
for student_module in query:
usage_key = student_module.module_state_key.map_into_course(student_module.course_id)
yield (student_module, usage_key)
def _ddog_increment(self, evt_time, evt_name):
"""
DataDog increment method.
"""
dog_stats_api.increment(
'DjangoXBlockUserStateClient.{}'.format(evt_name),
timestamp=evt_time,
sample_rate=self.API_DATADOG_SAMPLE_RATE,
)
def _ddog_histogram(self, evt_time, evt_name, value):
"""
DataDog histogram method.
"""
dog_stats_api.histogram(
'DjangoXBlockUserStateClient.{}'.format(evt_name),
value,
timestamp=evt_time,
sample_rate=self.API_DATADOG_SAMPLE_RATE,
)
def get_many(self, username, block_keys, scope=Scope.user_state, fields=None):
"""
Retrieve the stored XBlock state for the specified XBlock usages.
Arguments:
username: The name of the user whose state should be retrieved
block_keys ([UsageKey]): A list of UsageKeys identifying which xblock states to load.
scope (Scope): The scope to load data from
fields: A list of field values to retrieve. If None, retrieve all stored fields.
Yields:
XBlockUserState tuples for each specified UsageKey in block_keys.
field_state is a dict mapping field names to values.
"""
if scope != Scope.user_state:
raise ValueError("Only Scope.user_state is supported, not {}".format(scope))
block_count = state_length = 0
evt_time = time()
self._ddog_histogram(evt_time, 'get_many.blks_requested', len(block_keys))
modules = self._get_student_modules(username, block_keys)
for module, usage_key in modules:
if module.state is None:
self._ddog_increment(evt_time, 'get_many.empty_state')
continue
state = json.loads(module.state)
state_length += len(module.state)
self._ddog_histogram(evt_time, 'get_many.block_size', len(module.state))
# If the state is the empty dict, then it has been deleted, and so
# conformant UserStateClients should treat it as if it doesn't exist.
if state == {}:
continue
if fields is not None:
state = {
field: state[field]
for field in fields
if field in state
}
block_count += 1
yield XBlockUserState(username, usage_key, state, module.modified, scope)
# The rest of this method exists only to submit DataDog events.
# Remove it once we're no longer interested in the data.
finish_time = time()
self._ddog_histogram(evt_time, 'get_many.blks_out', block_count)
self._ddog_histogram(evt_time, 'get_many.response_time', (finish_time - evt_time) * 1000)
def set_many(self, username, block_keys_to_state, scope=Scope.user_state):
"""
Set fields for a particular XBlock.
Arguments:
username: The name of the user whose state should be retrieved
block_keys_to_state (dict): A dict mapping UsageKeys to state dicts.
Each state dict maps field names to values. These state dicts
are overlaid over the stored state. To delete fields, use
:meth:`delete` or :meth:`delete_many`.
scope (Scope): The scope to load data from
"""
if scope != Scope.user_state:
raise ValueError("Only Scope.user_state is supported")
# We do a find_or_create for every block (rather than re-using field objects
# that were queried in get_many) so that if the score has
# been changed by some other piece of the code, we don't overwrite
# that score.
if self.user is not None and self.user.username == username:
user = self.user
else:
user = User.objects.get(username=username)
evt_time = time()
for usage_key, state in block_keys_to_state.items():
student_module, created = StudentModule.objects.get_or_create(
student=user,
course_id=usage_key.course_key,
module_state_key=usage_key,
defaults={
'state': json.dumps(state),
'module_type': usage_key.block_type,
},
)
num_fields_before = num_fields_after = num_new_fields_set = len(state)
num_fields_updated = 0
if not created:
if student_module.state is None:
current_state = {}
else:
current_state = json.loads(student_module.state)
num_fields_before = len(current_state)
current_state.update(state)
num_fields_after = len(current_state)
student_module.state = json.dumps(current_state)
# We just read this object, so we know that we can do an update
student_module.save(force_update=True)
# The rest of this method exists only to submit DataDog events.
# Remove it once we're no longer interested in the data.
#
# Record whether a state row has been created or updated.
if created:
self._ddog_increment(evt_time, 'set_many.state_created')
else:
self._ddog_increment(evt_time, 'set_many.state_updated')
# Event to record number of fields sent in to set/set_many.
self._ddog_histogram(evt_time, 'set_many.fields_in', len(state))
# Event to record number of new fields set in set/set_many.
num_new_fields_set = num_fields_after - num_fields_before
self._ddog_histogram(evt_time, 'set_many.fields_set', num_new_fields_set)
# Event to record number of existing fields updated in set/set_many.
num_fields_updated = max(0, len(state) - num_new_fields_set)
self._ddog_histogram(evt_time, 'set_many.fields_updated', num_fields_updated)
# Events for the entire set_many call.
finish_time = time()
self._ddog_histogram(evt_time, 'set_many.blks_updated', len(block_keys_to_state))
self._ddog_histogram(evt_time, 'set_many.response_time', (finish_time - evt_time) * 1000)
def delete_many(self, username, block_keys, scope=Scope.user_state, fields=None):
"""
Delete the stored XBlock state for a many xblock usages.
Arguments:
username: The name of the user whose state should be deleted
block_keys (list): The UsageKey identifying which xblock state to delete.
scope (Scope): The scope to delete data from
fields: A list of fields to delete. If None, delete all stored fields.
"""
if scope != Scope.user_state:
raise ValueError("Only Scope.user_state is supported")
evt_time = time()
if fields is None:
self._ddog_increment(evt_time, 'delete_many.empty_state')
else:
self._ddog_histogram(evt_time, 'delete_many.field_count', len(fields))
self._ddog_histogram(evt_time, 'delete_many.block_count', len(block_keys))
student_modules = self._get_student_modules(username, block_keys)
for student_module, _ in student_modules:
if fields is None:
student_module.state = "{}"
else:
current_state = json.loads(student_module.state)
for field in fields:
if field in current_state:
del current_state[field]
student_module.state = json.dumps(current_state)
# We just read this object, so we know that we can do an update
student_module.save(force_update=True)
# Event for the entire delete_many call.
finish_time = time()
self._ddog_histogram(evt_time, 'delete_many.response_time', (finish_time - evt_time) * 1000)
def get_history(self, username, block_key, scope=Scope.user_state):
"""
Retrieve history of state changes for a given block for a given
student. We don't guarantee that history for many blocks will be fast.
If the specified block doesn't exist, raise :class:`~DoesNotExist`.
Arguments:
username: The name of the user whose history should be retrieved.
block_key: The key identifying which xblock history to retrieve.
scope (Scope): The scope to load data from.
Yields:
XBlockUserState entries for each modification to the specified XBlock, from latest
to earliest.
"""
if scope != Scope.user_state:
raise ValueError("Only Scope.user_state is supported")
student_modules = list(
student_module
for student_module, usage_id
in self._get_student_modules(username, [block_key])
)
if len(student_modules) == 0:
raise self.DoesNotExist()
history_entries = StudentModuleHistory.objects.prefetch_related('student_module').filter(
student_module__in=student_modules
).order_by('-id')
# If no history records exist, raise an error
if not history_entries:
raise self.DoesNotExist()
for history_entry in history_entries:
state = history_entry.state
# If the state is serialized json, then load it
if state is not None:
state = json.loads(state)
# If the state is empty, then for the purposes of `get_history`, it has been
# deleted, and so we list that entry as `None`.
if state == {}:
state = None
block_key = history_entry.student_module.module_state_key
block_key = block_key.map_into_course(
history_entry.student_module.course_id
)
yield XBlockUserState(username, block_key, state, history_entry.created, scope)
def iter_all_for_block(self, block_key, scope=Scope.user_state, batch_size=None):
"""
You get no ordering guarantees. Fetching will happen in batch_size
increments. If you're using this method, you should be running in an
async task.
"""
if scope != Scope.user_state:
raise ValueError("Only Scope.user_state is supported")
raise NotImplementedError()
def iter_all_for_course(self, course_key, block_type=None, scope=Scope.user_state, batch_size=None):
"""
You get no ordering guarantees. Fetching will happen in batch_size
increments. If you're using this method, you should be running in an
async task.
"""
if scope != Scope.user_state:
raise ValueError("Only Scope.user_state is supported")
raise NotImplementedError()
|
agpl-3.0
| 2,447,833,767,377,206,300 | 1,147,693,618,892,892,900 | 39.747222 | 104 | 0.605904 | false |
tdickers/mitmproxy
|
pathod/utils.py
|
4
|
1080
|
import os
import sys
import netlib.utils
class MemBool(object):
"""
Truth-checking with a memory, for use in chained if statements.
"""
def __init__(self):
self.v = None
def __call__(self, v):
self.v = v
return bool(v)
data = netlib.utils.Data(__name__)
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): # pragma: no cover
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.chdir("/")
os.umask(0)
os.setsid()
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
si = open(stdin, 'rb')
so = open(stdout, 'a+b')
se = open(stderr, 'a+b', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
|
mit
| 2,596,136,794,464,760,000 | -9,006,443,659,176,294,000 | 22.478261 | 93 | 0.532407 | false |
Nirlendu/Dummy-Search-Engine
|
tornado-3.2/tornado/test/import_test.py
|
42
|
1477
|
from __future__ import absolute_import, division, print_function, with_statement
from tornado.test.util import unittest
class ImportTest(unittest.TestCase):
def test_import_everything(self):
# Some of our modules are not otherwise tested. Import them
# all (unless they have external dependencies) here to at
# least ensure that there are no syntax errors.
import tornado.auth
import tornado.autoreload
import tornado.concurrent
# import tornado.curl_httpclient # depends on pycurl
import tornado.escape
import tornado.gen
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.iostream
import tornado.locale
import tornado.log
import tornado.netutil
import tornado.options
import tornado.process
import tornado.simple_httpclient
import tornado.stack_context
import tornado.tcpserver
import tornado.template
import tornado.testing
import tornado.util
import tornado.web
import tornado.websocket
import tornado.wsgi
# for modules with dependencies, if those dependencies can be loaded,
# load them too.
def test_import_pycurl(self):
try:
import pycurl
except ImportError:
pass
else:
import tornado.curl_httpclient
|
mit
| 4,916,682,496,994,817,000 | -2,702,251,227,194,627,600 | 31.822222 | 80 | 0.658091 | false |
zhukaixy/kbengine
|
kbe/res/scripts/common/Lib/test/test_dbm_ndbm.py
|
91
|
1622
|
from test import support
support.import_module("dbm.ndbm") #skip if not supported
import unittest
import os
import random
import dbm.ndbm
from dbm.ndbm import error
class DbmTestCase(unittest.TestCase):
def setUp(self):
self.filename = support.TESTFN
self.d = dbm.ndbm.open(self.filename, 'c')
self.d.close()
def tearDown(self):
for suffix in ['', '.pag', '.dir', '.db']:
support.unlink(self.filename + suffix)
def test_keys(self):
self.d = dbm.ndbm.open(self.filename, 'c')
self.assertTrue(self.d.keys() == [])
self.d['a'] = 'b'
self.d[b'bytes'] = b'data'
self.d['12345678910'] = '019237410982340912840198242'
self.d.keys()
self.assertIn('a', self.d)
self.assertIn(b'a', self.d)
self.assertEqual(self.d[b'bytes'], b'data')
self.d.close()
def test_modes(self):
for mode in ['r', 'rw', 'w', 'n']:
try:
self.d = dbm.ndbm.open(self.filename, mode)
self.d.close()
except error:
self.fail()
def test_context_manager(self):
with dbm.ndbm.open(self.filename, 'c') as db:
db["ndbm context manager"] = "context manager"
with dbm.ndbm.open(self.filename, 'r') as db:
self.assertEqual(list(db.keys()), [b"ndbm context manager"])
with self.assertRaises(dbm.ndbm.error) as cm:
db.keys()
self.assertEqual(str(cm.exception),
"DBM object has already been closed")
if __name__ == '__main__':
unittest.main()
|
lgpl-3.0
| -3,991,098,907,176,344,600 | 5,452,817,721,260,299,000 | 29.037037 | 72 | 0.561036 | false |
matijapretnar/projekt-tomo
|
web/problems/models.py
|
2
|
8264
|
from copy import deepcopy
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from rest_framework.authtoken.models import Token
from simple_history.models import HistoricalRecords
from utils import is_json_string_list, truncate
from utils.models import OrderWithRespectToMixin
from taggit.managers import TaggableManager
from django.core import signing
class Problem(OrderWithRespectToMixin, models.Model):
title = models.CharField(max_length=70)
description = models.TextField(blank=True)
problem_set = models.ForeignKey('courses.ProblemSet', related_name='problems')
history = HistoricalRecords()
tags = TaggableManager(blank=True)
language = models.CharField(max_length=8, choices=(
('python', 'Python 3'),
('octave', 'Octave/Matlab'),
('r', 'R')), default='python')
EXTENSIONS = {'python': 'py', 'octave': 'm', 'r': 'r'}
MIMETYPES = {'python': 'text/x-python',
'octave': 'text/x-octave',
'r': 'text/x-R'}
class Meta:
order_with_respect_to = 'problem_set'
def __str__(self):
return self.title
@property
def guarded_description(self):
return 'Navodila so napisana na listu' if self.problem_set.solution_visibility == self.problem_set.PROBLEM_HIDDEN else self.description
def get_absolute_url(self):
return '{}#{}'.format(self.problem_set.get_absolute_url(), self.anchor())
def anchor(self):
return 'problem-{}'.format(self.pk)
def user_attempts(self, user):
return user.attempts.filter(part__problem=self)
def user_solutions(self, user):
return {attempt.part.id: attempt.solution for attempt in self.user_attempts(user)}
@property
def slug(self):
return slugify(self.title).replace("-", "_")
def attempt_file(self, user):
authentication_token = Token.objects.get(user=user)
solutions = self.user_solutions(user)
parts = [(part, solutions.get(part.id, part.template), part.attempt_token(user)) for part in self.parts.all()]
url = settings.SUBMISSION_URL + reverse('attempts-submit')
problem_slug = slugify(self.title).replace("-", "_")
extension = self.EXTENSIONS[self.language]
filename = "{0}.{1}".format(problem_slug, extension)
contents = render_to_string("{0}/attempt.{1}".format(self.language, extension), {
"problem": self,
"parts": parts,
"submission_url": url,
"authentication_token": authentication_token
})
return filename, contents
def marking_file(self, user):
attempts = {attempt.part.id: attempt for attempt in self.user_attempts(user)}
parts = [(part, attempts.get(part.id)) for part in self.parts.all()]
username = user.get_full_name() or user.username
problem_slug = slugify(username).replace("-", "_")
extension = self.EXTENSIONS[self.language]
filename = "{0}.{1}".format(problem_slug, extension)
contents = render_to_string("{0}/marking.{1}".format(self.language, extension), {
"problem": self,
"parts": parts,
"user": user,
})
return filename, contents
def bare_file(self, user):
attempts = {attempt.part.id: attempt for attempt in self.user_attempts(user)}
parts = [(part, attempts.get(part.id)) for part in self.parts.all()]
username = user.get_full_name() or user.username
problem_slug = slugify(username).replace("-", "_")
extension = self.EXTENSIONS[self.language]
filename = "{0}.{1}".format(problem_slug, extension)
contents = render_to_string("{0}/bare.{1}".format(self.language, extension), {
"problem": self,
"parts": parts,
"user": user,
})
return filename, contents
def edit_file(self, user):
authentication_token = Token.objects.get(user=user)
url = settings.SUBMISSION_URL + reverse('problems-submit')
problem_slug = slugify(self.title).replace("-", "_")
filename = "{0}_edit.{1}".format(problem_slug, self.EXTENSIONS[self.language])
contents = render_to_string("{0}/edit.{1}".format(self.language, self.EXTENSIONS[self.language]), {
"problem": self,
"submission_url": url,
"authentication_token": authentication_token
})
return filename, contents
def attempts_by_user(self, active_only=True):
attempts = {}
for part in self.parts.all().prefetch_related('attempts', 'attempts__user'):
for attempt in part.attempts.all():
if attempt.user in attempts:
attempts[attempt.user][part] = attempt
else:
attempts[attempt.user] = {part: attempt}
for student in self.problem_set.course.students.all():
if student not in attempts:
attempts[student] = {}
observed_students = self.problem_set.course.observed_students()
if active_only:
observed_students = observed_students.filter(attempts__part__problem=self).distinct()
observed_students = list(observed_students)
for user in observed_students:
user.valid = user.invalid = user.empty = 0
user.these_attempts = [attempts[user].get(part) for part in self.parts.all()]
for attempt in user.these_attempts:
if attempt is None:
user.empty += 1
elif attempt.valid:
user.valid += 1
else:
user.invalid += 1
return observed_students
def attempts_by_user_all(self):
return self.attempts_by_user(active_only=False)
def copy_to(self, problem_set):
new_problem = deepcopy(self)
new_problem.pk = None
new_problem.problem_set = problem_set
new_problem.save()
for part in self.parts.all():
part.copy_to(new_problem)
return new_problem
def content_type(self):
return self.MIMETYPES[self.language]
class Part(OrderWithRespectToMixin, models.Model):
problem = models.ForeignKey(Problem, related_name='parts')
description = models.TextField(blank=True)
template = models.TextField(blank=True)
solution = models.TextField(blank=True)
validation = models.TextField(blank=True)
secret = models.TextField(default="[]", validators=[is_json_string_list])
history = HistoricalRecords()
class Meta:
order_with_respect_to = 'problem'
def __str__(self):
return '@{0:06d} ({1})'.format(self.pk, truncate(self.description))
@property
def guarded_description(self):
return 'Navodila so napisana na listu' if self.problem.problem_set.solution_visibility == self.problem.problem_set.PROBLEM_HIDDEN else self.description
def get_absolute_url(self):
return '{}#{}'.format(self.problem_set.get_absolute_url(), self.anchor())
def anchor(self):
return 'part-{}'.format(self.pk)
def check_secret(self, secret):
'''
Checks whether a submitted secret corresponds to the official one.
The function accepts a secret (list of strings) and returns the pair:
True, None -- if secret matches the official one
False, None -- if secret has an incorrect length
False, i -- if secret first differs from the official one at index i
'''
official_secret = json.loads(self.secret)
if len(official_secret) != len(secret):
return False, None
for i in range(len(secret)):
if secret[i] != official_secret[i]:
return False, i
return True, None
def copy_to(self, problem):
new_part = deepcopy(self)
new_part.pk = None
new_part.problem = problem
new_part.save()
return new_part
def attempt_token(self, user):
return signing.dumps({
'part': self.pk,
'user': user.pk,
})
|
agpl-3.0
| -6,074,392,959,662,998,000 | 7,656,502,912,196,227,000 | 37.798122 | 159 | 0.620281 | false |
davidzchen/tensorflow
|
tensorflow/python/summary/writer/event_file_writer_v2.py
|
19
|
5699
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes events to disk in a logdir."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import gfile
class EventFileWriterV2(object):
"""Writes `Event` protocol buffers to an event file via the graph.
The `EventFileWriterV2` class is backed by the summary file writer in the v2
summary API (currently in tf.contrib.summary), so it uses a shared summary
writer resource and graph ops to write events.
As with the original EventFileWriter, this class will asynchronously write
Event protocol buffers to the backing file. The Event file is encoded using
the tfrecord format, which is similar to RecordIO.
"""
def __init__(self, session, logdir, max_queue=10, flush_secs=120,
filename_suffix=''):
"""Creates an `EventFileWriterV2` and an event file to write to.
On construction, this calls `tf.contrib.summary.create_file_writer` within
the graph from `session.graph` to look up a shared summary writer resource
for `logdir` if one exists, and create one if not. Creating the summary
writer resource in turn creates a new event file in `logdir` to be filled
with `Event` protocol buffers passed to `add_event`. Graph ops to control
this writer resource are added to `session.graph` during this init call;
stateful methods on this class will call `session.run()` on these ops.
Note that because the underlying resource is shared, it is possible that
other parts of the code using the same session may interact independently
with the resource, e.g. by flushing or even closing it. It is the caller's
responsibility to avoid any undesirable sharing in this regard.
The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
`filename_suffix`) control the construction of the shared writer resource
if one is created. If an existing resource is reused, these arguments have
no effect. See `tf.contrib.summary.create_file_writer` for details.
Args:
session: A `tf.compat.v1.Session`. Session that will hold shared writer
resource. The writer ops will be added to session.graph during this
init call.
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._session = session
self._logdir = logdir
self._closed = False
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
with self._session.graph.as_default():
with ops.name_scope('filewriter'):
file_writer = summary_ops_v2.create_file_writer(
logdir=self._logdir,
max_queue=max_queue,
flush_millis=flush_secs * 1000,
filename_suffix=filename_suffix)
with summary_ops_v2.always_record_summaries(), file_writer.as_default():
self._event_placeholder = array_ops.placeholder_with_default(
constant_op.constant('unused', dtypes.string),
shape=[])
self._add_event_op = summary_ops_v2.import_event(
self._event_placeholder)
self._init_op = file_writer.init()
self._flush_op = file_writer.flush()
self._close_op = file_writer.close()
self._session.run(self._init_op)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._closed = False
self._session.run(self._init_op)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
event_pb = event.SerializeToString()
self._session.run(
self._add_event_op, feed_dict={self._event_placeholder: event_pb})
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._session.run(self._flush_op)
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._session.run(self._close_op)
self._closed = True
|
apache-2.0
| -98,534,188,182,254,200 | 782,535,447,006,322,800 | 39.41844 | 80 | 0.688542 | false |
neerajvashistha/pa-dude
|
lib/python2.7/site-packages/nltk/tag/util.py
|
3
|
2281
|
# Natural Language Toolkit: Tagger Utilities
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
def str2tuple(s, sep='/'):
"""
Given the string representation of a tagged token, return the
corresponding tuple representation. The rightmost occurrence of
*sep* in *s* will be used to divide *s* into a word string and
a tag string. If *sep* does not occur in *s*, return (s, None).
>>> from nltk.tag.util import str2tuple
>>> str2tuple('fly/NN')
('fly', 'NN')
:type s: str
:param s: The string representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
loc = s.rfind(sep)
if loc >= 0:
return (s[:loc], s[loc+len(sep):].upper())
else:
return (s, None)
def tuple2str(tagged_token, sep='/'):
"""
Given the tuple representation of a tagged token, return the
corresponding string representation. This representation is
formed by concatenating the token's word string, followed by the
separator, followed by the token's tag. (If the tag is None,
then just return the bare word string.)
>>> from nltk.tag.util import tuple2str
>>> tagged_token = ('fly', 'NN')
>>> tuple2str(tagged_token)
'fly/NN'
:type tagged_token: tuple(str, str)
:param tagged_token: The tuple representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
word, tag = tagged_token
if tag is None:
return word
else:
assert sep not in tag, 'tag may not contain sep!'
return '%s%s%s' % (word, sep, tag)
def untag(tagged_sentence):
"""
Given a tagged sentence, return an untagged version of that
sentence. I.e., return a list containing the first element
of each tuple in *tagged_sentence*.
>>> from nltk.tag.util import untag
>>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')])
['John', 'saw', 'Mary']
"""
return [w for (w, t) in tagged_sentence]
|
mit
| 1,316,622,171,173,089,800 | 8,092,368,532,971,295,000 | 30.680556 | 69 | 0.621219 | false |
kmp3325/linguine-python
|
linguine/ops/remove_caps.py
|
4
|
1485
|
#!/usr/bin/env python
"""
Removes all non-proper-noun capitals from a given text.
Removes capital letters from text, even for Bill Clinton.
Accepts as input a non-tokenized string.
There are multiple types of cap-removal to do.
greedy: removes all caps. GOAL -> goal, Mr. -> mr., Cook -> cook
preserve_nnp: removes capitalization that isn't a proper noun.
"""
from textblob import TextBlob
class RemoveCapsGreedy:
def run(self, data):
results = []
for corpus in data:
corpus.contents = corpus.contents.lower()
results.append(corpus)
return results
class RemoveCapsPreserveNNP:
def run(self, data):
results = []
for corpus in data:
blob = TextBlob(corpus.contents)
tags = blob.tags
words = list()
wordCount = 0
tokenCount = 0
while(tokenCount < len(blob.tokens)):
if blob.tokens[tokenCount][0].isalpha():
if tags[wordCount][1] != 'NNP':
words.append(blob.words[wordCount].lower())
else:
words.append(blob.words[wordCount])
wordCount += 1
else:
words[len(words)-1] = ''.join(
[words[len(words)-1],blob.tokens[tokenCount]])
tokenCount += 1
corpus.contents = (' '.join(words))
results.append(corpus)
return results
|
mit
| 3,673,808,324,904,703,500 | 3,293,441,037,757,736,000 | 34.357143 | 67 | 0.553535 | false |
leiferikb/bitpop
|
build/third_party/twisted_10_2/twisted/mail/pb.py
|
57
|
3847
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.spread import banana
import os
import types
class Maildir(pb.Referenceable):
def __init__(self, directory, rootDirectory):
self.virtualDirectory = directory
self.rootDirectory = rootDirectory
self.directory = os.path.join(rootDirectory, directory)
def getFolderMessage(self, folder, name):
if '/' in name:
raise IOError("can only open files in '%s' directory'" % folder)
fp = open(os.path.join(self.directory, 'new', name))
try:
return fp.read()
finally:
fp.close()
def deleteFolderMessage(self, folder, name):
if '/' in name:
raise IOError("can only delete files in '%s' directory'" % folder)
os.rename(os.path.join(self.directory, folder, name),
os.path.join(self.rootDirectory, '.Trash', folder, name))
def deleteNewMessage(self, name):
return self.deleteFolderMessage('new', name)
remote_deleteNewMessage = deleteNewMessage
def deleteCurMessage(self, name):
return self.deleteFolderMessage('cur', name)
remote_deleteCurMessage = deleteCurMessage
def getNewMessages(self):
return os.listdir(os.path.join(self.directory, 'new'))
remote_getNewMessages = getNewMessages
def getCurMessages(self):
return os.listdir(os.path.join(self.directory, 'cur'))
remote_getCurMessages = getCurMessages
def getNewMessage(self, name):
return self.getFolderMessage('new', name)
remote_getNewMessage = getNewMessage
def getCurMessage(self, name):
return self.getFolderMessage('cur', name)
remote_getCurMessage = getCurMessage
def getSubFolder(self, name):
if name[0] == '.':
raise IOError("subfolder name cannot begin with a '.'")
name = name.replace('/', ':')
if self.virtualDirectoy == '.':
name = '.'+name
else:
name = self.virtualDirectory+':'+name
if not self._isSubFolder(name):
raise IOError("not a subfolder")
return Maildir(name, self.rootDirectory)
remote_getSubFolder = getSubFolder
def _isSubFolder(self, name):
return (not os.path.isdir(os.path.join(self.rootDirectory, name)) or
not os.path.isfile(os.path.join(self.rootDirectory, name,
'maildirfolder')))
class MaildirCollection(pb.Referenceable):
def __init__(self, root):
self.root = root
def getSubFolders(self):
return os.listdir(self.getRoot())
remote_getSubFolders = getSubFolders
def getSubFolder(self, name):
if '/' in name or name[0] == '.':
raise IOError("invalid name")
return Maildir('.', os.path.join(self.getRoot(), name))
remote_getSubFolder = getSubFolder
class MaildirBroker(pb.Broker):
def proto_getCollection(self, requestID, name, domain, password):
collection = self._getCollection()
if collection is None:
self.sendError(requestID, "permission denied")
else:
self.sendAnswer(requestID, collection)
def getCollection(self, name, domain, password):
if not self.domains.has_key(domain):
return
domain = self.domains[domain]
if (domain.dbm.has_key(name) and
domain.dbm[name] == password):
return MaildirCollection(domain.userDirectory(name))
class MaildirClient(pb.Broker):
def getCollection(self, name, domain, password, callback, errback):
requestID = self.newRequestID()
self.waitingForAnswers[requestID] = callback, errback
self.sendCall("getCollection", requestID, name, domain, password)
|
gpl-3.0
| -1,445,357,387,170,181,400 | -5,579,381,284,750,375,000 | 32.452174 | 78 | 0.6366 | false |
exelearning/iteexe
|
nevow/stan.py
|
14
|
16657
|
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
"""An s-expression-like syntax for expressing xml in pure python.
Stan tags allow you to build XML documents using Python. Stan tags
have special attributes that enable the developer to insert hooks in
the document for locating data and custom rendering.
Stan is a DOM, or Document Object Model, implemented using
basic Python types and functions called "flatteners". A flattener is
a function that knows how to turn an object of a specific type
into something that is closer to an HTML string. Stan differs
from the W3C DOM by not being as cumbersome and heavy
weight. Since the object model is built using simple python types
such as lists, strings, and dictionaries, the API is simpler and
constructing a DOM less cumbersome.
Stan also makes it convenient to build trees of XML in pure python
code. See nevow.stan.Tag for details, and nevow.tags for tag
prototypes for all of the XHTML element types.
"""
from __future__ import generators
import warnings
import sys
from nevow import inevow
class Proto(str):
"""Proto is a string subclass. Instances of Proto, which are constructed
with a string, will construct Tag instances in response to __call__
and __getitem__, delegating responsibility to the tag.
"""
__slots__ = []
def __call__(self, **kw):
return Tag(self)(**kw)
def __getitem__(self, children):
return Tag(self)[children]
def fillSlots(self, slotName, slotValue):
return Tag(self).fillSlots(slotName, slotValue)
def clone(self, deep=True):
return self
class xml(object):
"""XML content marker.
xml contains content that is already correct XML and should not be escaped
to make it XML-safe. xml can contain unicode content and will be encoded to
utf-8 when flattened.
"""
__slots__ = ['content']
def __init__(self, content):
self.content = content
def __repr__(self):
return '<xml %r>' % self.content
class raw(str):
"""Raw content marker.
Raw content is never altered in any way. It is a sequence of bytes that will
be passed through unchanged to the XML output.
You probably don't want this - look at xml first.
"""
__slots__ = []
def cdata(data):
"""CDATA section. data must be a string
"""
return xml('<![CDATA[%s]]>' % data)
class directive(object):
"""Marker for a directive in a template
"""
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __repr__(self):
return "directive('%s')" % self.name
class slot(object):
"""Marker for slot insertion in a template
"""
__slots__ = ['name', 'children', 'default']
def __init__(self, name, default=None):
self.name = name
self.children = []
self.default = default
def __repr__(self):
return "slot('%s')" % self.name
def __getitem__(self, children):
"""Allow slots to have children. These children will not show up in the
output, but they will be searched for patterns.
"""
if not isinstance(children, (list, tuple)):
children = [children]
self.children.extend(children)
return self
def __iter__(self):
"""Prevent an infinite loop if someone tries to do
for x in slot('foo'):
"""
raise NotImplementedError, "Stan slot instances are not iterable."
class Tag(object):
"""Tag instances represent XML tags with a tag name, attributes,
and children. Tag instances can be constructed using the Prototype
tags in the 'tags' module, or may be constructed directly with a tag
name. Tags have two special methods, __call__ and __getitem__,
which make representing trees of XML natural using pure python
syntax. See the docstrings for these methods for more details.
"""
__implements__ = inevow.IQ,
specials = ['data', 'render', 'remember', 'pattern', 'key', 'macro']
slotData = None
def __init__(self, tag, attributes=None, children=None, specials=None):
self.tagName = tag
if attributes is None:
self.attributes = {}
else:
self.attributes = attributes
if children is None:
self.children = []
else:
self.children = children
if specials is None:
self._specials = {}
else:
self._specials = specials
def fillSlots(self, slotName, slotValue):
"""Remember the stan 'slotValue' with the name 'slotName' at this position
in the DOM. During the rendering of children of this node, slots with
the name 'slotName' will render themselves as 'slotValue'.
"""
if self.slotData is None:
self.slotData = {}
self.slotData[slotName] = slotValue
return self
def patternGenerator(self, pattern, default=None):
"""Returns a psudeo-Tag which will generate clones of matching
pattern tags forever, looping around to the beginning when running
out of unique matches.
If no matches are found, and default is None, raise an exception,
otherwise, generate clones of default forever.
You can use the normal stan syntax on the return value.
Useful to find repeating pattern elements. Example rendering function:
>>> def simpleSequence(context, data):
... pattern = context.patternCloner('item')
... return [pattern(data=element) for element in data]
"""
patterner = _locatePatterns(self, pattern, default)
return PatternTag(patterner)
def allPatterns(self, pattern):
"""Return a list of all matching pattern tags, cloned.
Useful if you just want to insert them in the output in one
place.
E.g. the sequence renderer's header and footer are found with this.
"""
return [tag.clone(deep=False, clearPattern=True) for tag in
specialMatches(self, 'pattern', pattern)]
def onePattern(self, pattern):
"""Return a single matching pattern, cloned.
If there is more than one matching pattern or no matching patterns,
raise an exception.
Useful in the case where you want to locate one and only one
sub-tag and do something with it.
"""
return _locateOne(pattern,
lambda pattern: specialMatches(
self, 'pattern', pattern),
'pattern').clone(deep=False, clearPattern=True)
def __call__(self, **kw):
"""Change attributes of this tag. This is implemented using
__call__ because it then allows the natural syntax::
table(width="100%", height="50%", border="1")
Attributes may be 'invisible' tag instances (so that
C{a(href=invisible(data="foo", render=myhrefrenderer))} works),
strings, functions, or any other object which has a registered
flattener.
If the attribute is a python keyword, such as 'class', you can
add an underscore to the name, like 'class_'.
A few magic attributes have values other than these, as they
are not serialized for output but rather have special purposes
of their own:
- data: The value is saved on the context stack and passed to
render functions.
- render: A function to call that may modify the tag in any
way desired.
- remember: Remember the value on the context stack with
context.remember(value) for later lookup with
context.locate()
- pattern: Value should be a key that can later be used to
locate this tag with context.patternGenerator() or
context.allPatterns()
- key: A string used to give the node a unique label. This
is automatically namespaced, so in C{span(key="foo")[span(key="bar")]}
the inner span actually has a key of 'foo.bar'. The key is
intended for use as e.g. an html 'id' attribute, but will
is not automatically output.
- macro - A function which will be called once in the lifetime
of the template, when the template is loaded. The return
result from this function will replace this Tag in the template.
"""
if not kw:
return self
for name in self.specials:
if kw.has_key(name):
setattr(self, name, kw[name])
del kw[name]
for k, v in kw.iteritems():
if k[-1] == '_':
k = k[:-1]
elif k[0] == '_':
k = k[1:]
self.attributes[k] = v
return self
def __getitem__(self, children):
"""Add children to this tag. Multiple children may be added by
passing a tuple or a list. Children may be other tag instances,
strings, functions, or any other object which has a registered
flatten.
This is implemented using __getitem__ because it then allows
the natural syntax::
html[
head[
title["Hello World!"]
],
body[
"This is a page",
h3["How are you!"],
div(style="color: blue")["I hope you are fine."]
]
]
"""
if not isinstance(children, (list, tuple)):
children = [children]
self.children.extend(children)
return self
def __iter__(self):
"""Prevent an infinite loop if someone tries to do
for x in stantaginstance:
"""
raise NotImplementedError, "Stan tag instances are not iterable."
def _clearSpecials(self):
"""Clears all the specials in this tag. For use by flatstan.
"""
self._specials = {}
# FIXME: make this function actually be used.
def precompilable(self):
"""Is this tag precompilable?
Tags are precompilable if they will not be modified by a user
render function.
Currently, the following attributes prevent the tag from being
precompiled:
- render (because the function can modify its own tag)
- pattern (because it is locatable and thus modifiable by an
enclosing renderer)
"""
return self.render is Unset and self.pattern is Unset
def _clone(self, obj, deep):
if hasattr(obj, 'clone'):
return obj.clone(deep)
elif isinstance(obj, (list, tuple)):
return [self._clone(x, deep)
for x in obj]
else:
return obj
def clone(self, deep=True, clearPattern=False):
"""Return a clone of this tag. If deep is True, clone all of this
tag's children. Otherwise, just shallow copy the children list
without copying the children themselves.
"""
if deep:
newchildren = [self._clone(x, True) for x in self.children]
else:
newchildren = self.children[:]
newattrs = self.attributes.copy()
for key in newattrs:
newattrs[key]=self._clone(newattrs[key], True)
newslotdata = None
if self.slotData:
newslotdata = self.slotData.copy()
for key in newslotdata:
newslotdata[key] = self._clone(newslotdata[key], True)
newtag = Tag(
self.tagName,
attributes=newattrs,
children=newchildren,
specials=self._specials.copy()
)
newtag.slotData = newslotdata
if clearPattern:
newtag.pattern = None
return newtag
def clear(self):
"""Clear any existing children from this tag.
"""
self._specials = {}
self.children = []
return self
def __repr__(self):
rstr = ''
if self.attributes:
rstr += ', attributes=%r' % self.attributes
if self._specials:
rstr += ', specials=%r' % self._specials
if self.children:
rstr += ', children=%r' % self.children
return "Tag(%r%s)" % (self.tagName, rstr)
def freeze(self):
"""Freeze this tag so that making future calls to __call__ or __getitem__ on the
return value will result in clones of this tag.
"""
def forever():
while True:
yield self.clone()
return PatternTag(forever())
class UnsetClass:
def __nonzero__(self):
return False
def __repr__(self):
return "Unset"
Unset=UnsetClass()
def makeAccessors(special):
def getSpecial(self):
return self._specials.get(special, Unset)
def setSpecial(self, data):
self._specials[special] = data
return getSpecial, setSpecial
for name in Tag.specials:
setattr(Tag, name, property(*makeAccessors(name)))
del name
### Pattern machinery
class NodeNotFound(KeyError):
def __str__(self):
return "The %s named %r wasn't found in the template." % tuple(self.args[:2])
class TooManyNodes(Exception):
def __str__(self):
return "More than one %r with the name %r was found." % tuple(self.args[:2])
class PatternTag(object):
'''A pseudotag created by Tag.patternGenerator() which loops
through a sequence of matching patterns.'''
def __init__(self, patterner):
self.pat = patterner.next()
self.patterner = patterner
def next(self):
if self.pat:
p, self.pat = self.pat, None
return p
return self.patterner.next()
def makeForwarder(name):
return lambda self, *args, **kw: getattr(self.next(), name)(*args, **kw)
for forward in ['__call__', '__getitem__', 'fillSlots']:
setattr(PatternTag, forward, makeForwarder(forward))
def _locatePatterns(tag, pattern, default, loop=True):
gen = specialMatches(tag, 'pattern', pattern)
produced = []
for x in gen:
produced.append(x)
cloned = x.clone(deep=False, clearPattern=True)
yield cloned
gen=None
if produced:
if not loop:
return
while True:
for x in produced:
cloned = x.clone(deep=False, clearPattern=True)
yield cloned
if default is None:
raise NodeNotFound, ("pattern", pattern)
if hasattr(default, 'clone'):
while True: yield default.clone(deep=False)
else:
while True: yield default
Tag._locatePatterns = staticmethod(_locatePatterns)
def _locateOne(name, locator, descr):
found = False
for node in locator(name):
if found:
raise TooManyNodes(descr, name)
found = node
if not found:
raise NodeNotFound(descr, name)
return found
def specials(tag, special):
"""Generate tags with special attributes regardless of attribute value.
"""
for childOrContext in getattr(tag, 'children', []):
child = getattr(childOrContext, 'tag', childOrContext)
if getattr(child, special, Unset) is not Unset:
yield child
else:
for match in specials(child, special):
yield match
def specialMatches(tag, special, pattern):
"""Generate special attribute matches starting with the given tag;
if a tag has special, do not look any deeper below that tag, whether
it matches pattern or not. Returns an iterable.
"""
for childOrContext in getattr(tag, 'children', []):
child = getattr(childOrContext, 'tag', childOrContext)
data = getattr(child, special, Unset)
if data == pattern:
yield child
elif data is Unset:
for match in specialMatches(child, special, pattern):
yield match
## End pattern machinery
class CommentProto(Proto):
__slots__ = []
def __call__(self, **kw):
return Comment(self)(**kw)
def __getitem__(self, children):
return Comment(self)[children]
class Comment(Tag):
def __call__(self, **kw):
raise NotImplementedError('comments are not callable')
invisible = Proto('')
class Entity(object):
def __init__(self, name, num, description):
self.name = name
self.num = num
self.description = description
def __repr__(self):
return "Entity(%r, %r, %r)" % (self.name, self.num, self.description)
|
gpl-2.0
| -5,789,473,079,424,382,000 | 1,090,939,248,971,203,800 | 30.6673 | 88 | 0.600768 | false |
keto/askbot-devel
|
askbot/migrations/0058_transplant_answer_count_field_2.py
|
17
|
26835
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Question.answer_count'
db.delete_column(u'question', 'answer_count')
def backwards(self, orm):
# Adding field 'Question.answer_count'
db.add_column(u'question', 'answer_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
gpl-3.0
| 2,226,495,234,986,908,700 | -4,500,034,822,156,717,600 | 86.126623 | 221 | 0.557295 | false |
flabby/rocksdb
|
build_tools/amalgamate.py
|
45
|
4700
|
#!/usr/bin/python
# amalgamate.py creates an amalgamation from a unity build.
# It can be run with either Python 2 or 3.
# An amalgamation consists of a header that includes the contents of all public
# headers and a source file that includes the contents of all source files and
# private headers.
#
# This script works by starting with the unity build file and recursively expanding
# #include directives. If the #include is found in a public include directory,
# that header is expanded into the amalgamation header.
#
# A particular header is only expanded once, so this script will
# break if there are multiple inclusions of the same header that are expected to
# expand differently. Similarly, this type of code causes issues:
#
# #ifdef FOO
# #include "bar.h"
# // code here
# #else
# #include "bar.h" // oops, doesn't get expanded
# // different code here
# #endif
#
# The solution is to move the include out of the #ifdef.
from __future__ import print_function
import argparse
from os import path
import re
import sys
include_re = re.compile('^[ \t]*#include[ \t]+"(.*)"[ \t]*$')
included = set()
excluded = set()
def find_header(name, abs_path, include_paths):
samedir = path.join(path.dirname(abs_path), name)
if path.exists(samedir):
return samedir
for include_path in include_paths:
include_path = path.join(include_path, name)
if path.exists(include_path):
return include_path
return None
def expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths):
if include_path in included:
return False
included.add(include_path)
with open(include_path) as f:
print('#line 1 "{}"'.format(include_path), file=source_out)
process_file(f, include_path, source_out, header_out, include_paths, public_include_paths)
return True
def process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths):
for (line, text) in enumerate(f):
m = include_re.match(text)
if m:
filename = m.groups()[0]
# first check private headers
include_path = find_header(filename, abs_path, include_paths)
if include_path:
if include_path in excluded:
source_out.write(text)
expanded = False
else:
expanded = expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths)
else:
# now try public headers
include_path = find_header(filename, abs_path, public_include_paths)
if include_path:
# found public header
expanded = False
if include_path in excluded:
source_out.write(text)
else:
expand_include(include_path, f, abs_path, header_out, None, public_include_paths, [])
else:
sys.exit("unable to find {}, included in {} on line {}".format(filename, abs_path, line))
if expanded:
print('#line {} "{}"'.format(line+1, abs_path), file=source_out)
elif text != "#pragma once\n":
source_out.write(text)
def main():
parser = argparse.ArgumentParser(description="Transform a unity build into an amalgamation")
parser.add_argument("source", help="source file")
parser.add_argument("-I", action="append", dest="include_paths", help="include paths for private headers")
parser.add_argument("-i", action="append", dest="public_include_paths", help="include paths for public headers")
parser.add_argument("-x", action="append", dest="excluded", help="excluded header files")
parser.add_argument("-o", dest="source_out", help="output C++ file", required=True)
parser.add_argument("-H", dest="header_out", help="output C++ header file", required=True)
args = parser.parse_args()
include_paths = list(map(path.abspath, args.include_paths or []))
public_include_paths = list(map(path.abspath, args.public_include_paths or []))
excluded.update(map(path.abspath, args.excluded or []))
filename = args.source
abs_path = path.abspath(filename)
with open(filename) as f, open(args.source_out, 'w') as source_out, open(args.header_out, 'w') as header_out:
print('#line 1 "{}"'.format(filename), file=source_out)
print('#include "{}"'.format(header_out.name), file=source_out)
process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths)
if __name__ == "__main__":
main()
|
bsd-3-clause
| -3,382,505,217,065,519,600 | 1,529,627,222,524,163,800 | 41.727273 | 133 | 0.636383 | false |
xiangel/hue
|
desktop/core/ext-py/Django-1.6.10/django/db/backends/oracle/base.py
|
17
|
40746
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import decimal
import re
import sys
import warnings
def _setup_environment(environ):
import platform
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
try:
import pytz
except ImportError:
pytz = None
from django.db import utils
from django.db.backends import *
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import force_bytes, force_text
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class Oracle_datetime(datetime.datetime):
"""
A datetime object, with an additional class attribute
to tell cx_Oracle to save the microseconds too.
"""
input_size = Database.TIMESTAMP
@classmethod
def from_datetime(cls, dt):
return Oracle_datetime(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.microsecond)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_zoneinfo_database = pytz is not None
supports_bitwise_or = False
can_defer_constraint_checks = True
ignores_nulls_in_unique_constraints = False
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
atomic_transactions = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = self._get_sequence_name(table)
tr_name = self._get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
(datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
return fmt % (sql, connector, days, hours, minutes, seconds,
timedelta.microseconds, day_precision)
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
# Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
# Convert to a DATETIME, which is called DATE by Oracle. There's no
# built-in function to do that; the easiest is to go through a string.
result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
# Re-convert to a TIMESTAMP because EXTRACT only handles the date part
# on DATE values, even though they actually store the time part.
return "CAST(%s AS TIMESTAMP)" % result
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
sql = "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_text(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = ''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s, 4000)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode):
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%','%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
self.connection.cursor()
return self.connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
# Create bounds as real date values
first = datetime.date(value, 1, 1)
last = datetime.date(value, 12, 31)
return [first, last]
def year_lookup_bounds_for_datetime_field(self, value):
# cx_Oracle doesn't support tz-aware datetimes
bounds = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
if settings.USE_TZ:
bounds = [b.astimezone(timezone.utc) for b in bounds]
return [Oracle_datetime.from_datetime(b) for b in bounds]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, num_values):
items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
return " UNION ALL ".join([items_sql] * num_values)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
cursor.close()
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version is not None and self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher
self.connection.ping()
else:
# Use a cx_Oracle cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1 FROM DUAL")
except Database.Error:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
version = self.connection.version
try:
return int(version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = Oracle_datetime.from_datetime(param.astimezone(timezone.utc))
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = "1"
elif param is False:
param = "0"
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, six.memoryview):
self.force_bytes = param
else:
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, six.string_types) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return dict((k,OracleParam(v, self, True)) for k,v in params.items())
except AttributeError:
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return dict((k, v.force_bytes) for k,v in params.items())
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = dict((k, ":%s"%k) for k in params.keys())
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams]+[self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
# datetimes are returned as TIMESTAMP, except the results
# of "dates" queries, which are returned as DATETIME.
elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
# Confirm that dt is naive before overwriting its tzinfo.
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
|
apache-2.0
| 4,463,953,310,630,405,600 | 6,763,329,291,090,987,000 | 40.157576 | 126 | 0.598758 | false |
olapaola/olapaola-android-scripting
|
python/src/Lib/bsddb/test/test_basics.py
|
31
|
32840
|
"""
Basic TestCases for BTree and hash DBs, with and without a DBEnv, with
various DB flags, etc.
"""
import os
import errno
import string
from pprint import pprint
import unittest
import time
from test_all import db, test_support, verbose, get_new_environment_path, \
get_new_database_path
DASH = '-'
#----------------------------------------------------------------------
class VersionTestCase(unittest.TestCase):
def test00_version(self):
info = db.version()
if verbose:
print '\n', '-=' * 20
print 'bsddb.db.version(): %s' % (info, )
print db.DB_VERSION_STRING
print '-=' * 20
self.assertEqual(info, (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
db.DB_VERSION_PATCH))
#----------------------------------------------------------------------
class BasicTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
dbmode = 0660
dbname = None
useEnv = 0
envflags = 0
envsetflags = 0
_numKeys = 1002 # PRIVATE. NOTE: must be an even value
def setUp(self):
if self.useEnv:
self.homeDir=get_new_environment_path()
try:
self.env = db.DBEnv()
self.env.set_lg_max(1024*1024)
self.env.set_tx_max(30)
self.env.set_tx_timestamp(int(time.time()))
self.env.set_flags(self.envsetflags, 1)
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = "test"
# Yes, a bare except is intended, since we're re-raising the exc.
except:
test_support.rmtree(self.homeDir)
raise
else:
self.env = None
self.filename = get_new_database_path()
# create and open the DB
self.d = db.DB(self.env)
self.d.set_flags(self.dbsetflags)
if self.dbname:
self.d.open(self.filename, self.dbname, self.dbtype,
self.dbopenflags|db.DB_CREATE, self.dbmode)
else:
self.d.open(self.filename, # try out keyword args
mode = self.dbmode,
dbtype = self.dbtype,
flags = self.dbopenflags|db.DB_CREATE)
self.populateDB()
def tearDown(self):
self.d.close()
if self.env is not None:
self.env.close()
test_support.rmtree(self.homeDir)
else:
os.remove(self.filename)
def populateDB(self, _txn=None):
d = self.d
for x in range(self._numKeys//2):
key = '%04d' % (self._numKeys - x) # insert keys in reverse order
data = self.makeData(key)
d.put(key, data, _txn)
d.put('empty value', '', _txn)
for x in range(self._numKeys//2-1):
key = '%04d' % x # and now some in forward order
data = self.makeData(key)
d.put(key, data, _txn)
if _txn:
_txn.commit()
num = len(d)
if verbose:
print "created %d records" % num
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------
def test01_GetsAndPuts(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_GetsAndPuts..." % self.__class__.__name__
for key in ['0001', '0100', '0400', '0700', '0999']:
data = d.get(key)
if verbose:
print data
self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
# By default non-existant keys return None...
self.assertEqual(d.get('abcd'), None)
# ...but they raise exceptions in other situations. Call
# set_get_returns_none() to change it.
try:
d.delete('abcd')
except db.DBNotFoundError, val:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
else:
self.fail("expected exception")
d.put('abcd', 'a new record')
self.assertEqual(d.get('abcd'), 'a new record')
d.put('abcd', 'same key')
if self.dbsetflags & db.DB_DUP:
self.assertEqual(d.get('abcd'), 'a new record')
else:
self.assertEqual(d.get('abcd'), 'same key')
try:
d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError, val:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_KEYEXIST)
else :
self.assertEqual(val.args[0], db.DB_KEYEXIST)
if verbose: print val
else:
self.fail("expected exception")
if self.dbsetflags & db.DB_DUP:
self.assertEqual(d.get('abcd'), 'a new record')
else:
self.assertEqual(d.get('abcd'), 'same key')
d.sync()
d.close()
del d
self.d = db.DB(self.env)
if self.dbname:
self.d.open(self.filename, self.dbname)
else:
self.d.open(self.filename)
d = self.d
self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
if self.dbsetflags & db.DB_DUP:
self.assertEqual(d.get('abcd'), 'a new record')
else:
self.assertEqual(d.get('abcd'), 'same key')
rec = d.get_both('0555', '0555-0555-0555-0555-0555')
if verbose:
print rec
self.assertEqual(d.get_both('0555', 'bad data'), None)
# test default value
data = d.get('bad key', 'bad data')
self.assertEqual(data, 'bad data')
# any object can pass through
data = d.get('bad key', self)
self.assertEqual(data, self)
s = d.stat()
self.assertEqual(type(s), type({}))
if verbose:
print 'd.stat() returned this dictionary:'
pprint(s)
#----------------------------------------
def test02_DictionaryMethods(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_DictionaryMethods..." % \
self.__class__.__name__
for key in ['0002', '0101', '0401', '0701', '0998']:
data = d[key]
self.assertEqual(data, self.makeData(key))
if verbose:
print data
self.assertEqual(len(d), self._numKeys)
keys = d.keys()
self.assertEqual(len(keys), self._numKeys)
self.assertEqual(type(keys), type([]))
d['new record'] = 'a new record'
self.assertEqual(len(d), self._numKeys+1)
keys = d.keys()
self.assertEqual(len(keys), self._numKeys+1)
d['new record'] = 'a replacement record'
self.assertEqual(len(d), self._numKeys+1)
keys = d.keys()
self.assertEqual(len(keys), self._numKeys+1)
if verbose:
print "the first 10 keys are:"
pprint(keys[:10])
self.assertEqual(d['new record'], 'a replacement record')
# We check also the positional parameter
self.assertEqual(d.has_key('0001', None), 1)
# We check also the keyword parameter
self.assertEqual(d.has_key('spam', txn=None), 0)
items = d.items()
self.assertEqual(len(items), self._numKeys+1)
self.assertEqual(type(items), type([]))
self.assertEqual(type(items[0]), type(()))
self.assertEqual(len(items[0]), 2)
if verbose:
print "the first 10 items are:"
pprint(items[:10])
values = d.values()
self.assertEqual(len(values), self._numKeys+1)
self.assertEqual(type(values), type([]))
if verbose:
print "the first 10 values are:"
pprint(values[:10])
#----------------------------------------
def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=0):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \
(self.__class__.__name__, get_raises_error, set_raises_error)
if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
txn = self.env.txn_begin()
else:
txn = None
c = self.d.cursor(txn=txn)
rec = c.first()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
try:
rec = c.next()
except db.DBNotFoundError, val:
if get_raises_error:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
rec = None
else:
self.fail("unexpected DBNotFoundError")
self.assertEqual(c.get_current_size(), len(c.current()[1]),
"%s != len(%r)" % (c.get_current_size(), c.current()[1]))
self.assertEqual(count, self._numKeys)
rec = c.last()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
try:
rec = c.prev()
except db.DBNotFoundError, val:
if get_raises_error:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
rec = None
else:
self.fail("unexpected DBNotFoundError")
self.assertEqual(count, self._numKeys)
rec = c.set('0505')
rec2 = c.current()
self.assertEqual(rec, rec2)
self.assertEqual(rec[0], '0505')
self.assertEqual(rec[1], self.makeData('0505'))
self.assertEqual(c.get_current_size(), len(rec[1]))
# make sure we get empty values properly
rec = c.set('empty value')
self.assertEqual(rec[1], '')
self.assertEqual(c.get_current_size(), 0)
try:
n = c.set('bad key')
except db.DBNotFoundError, val:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
else:
if set_raises_error:
self.fail("expected exception")
if n != None:
self.fail("expected None: %r" % (n,))
rec = c.get_both('0404', self.makeData('0404'))
self.assertEqual(rec, ('0404', self.makeData('0404')))
try:
n = c.get_both('0404', 'bad data')
except db.DBNotFoundError, val:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
else:
if get_raises_error:
self.fail("expected exception")
if n != None:
self.fail("expected None: %r" % (n,))
if self.d.get_type() == db.DB_BTREE:
rec = c.set_range('011')
if verbose:
print "searched for '011', found: ", rec
rec = c.set_range('011',dlen=0,doff=0)
if verbose:
print "searched (partial) for '011', found: ", rec
if rec[1] != '': self.fail('expected empty data portion')
ev = c.set_range('empty value')
if verbose:
print "search for 'empty value' returned", ev
if ev[1] != '': self.fail('empty value lookup failed')
c.set('0499')
c.delete()
try:
rec = c.current()
except db.DBKeyEmptyError, val:
if get_raises_error:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_KEYEMPTY)
else :
self.assertEqual(val.args[0], db.DB_KEYEMPTY)
if verbose: print val
else:
self.fail("unexpected DBKeyEmptyError")
else:
if get_raises_error:
self.fail('DBKeyEmptyError exception expected')
c.next()
c2 = c.dup(db.DB_POSITION)
self.assertEqual(c.current(), c2.current())
c2.put('', 'a new value', db.DB_CURRENT)
self.assertEqual(c.current(), c2.current())
self.assertEqual(c.current()[1], 'a new value')
c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
self.assertEqual(c2.current()[1], 'a newer value')
c.close()
c2.close()
if txn:
txn.commit()
# time to abuse the closed cursors and hope we don't crash
methods_to_test = {
'current': (),
'delete': (),
'dup': (db.DB_POSITION,),
'first': (),
'get': (0,),
'next': (),
'prev': (),
'last': (),
'put':('', 'spam', db.DB_CURRENT),
'set': ("0505",),
}
for method, args in methods_to_test.items():
try:
if verbose:
print "attempting to use a closed cursor's %s method" % \
method
# a bug may cause a NULL pointer dereference...
apply(getattr(c, method), args)
except db.DBError, val:
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], 0)
else :
self.assertEqual(val.args[0], 0)
if verbose: print val
else:
self.fail("no exception raised when using a buggy cursor's"
"%s method" % method)
#
# free cursor referencing a closed database, it should not barf:
#
oldcursor = self.d.cursor(txn=txn)
self.d.close()
# this would originally cause a segfault when the cursor for a
# closed database was cleaned up. it should not anymore.
# SF pybsddb bug id 667343
del oldcursor
def test03b_SimpleCursorWithoutGetReturnsNone0(self):
# same test but raise exceptions instead of returning None
if verbose:
print '\n', '-=' * 30
print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
self.__class__.__name__
old = self.d.set_get_returns_none(0)
self.assertEqual(old, 2)
self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1)
def test03b_SimpleCursorWithGetReturnsNone1(self):
# same test but raise exceptions instead of returning None
if verbose:
print '\n', '-=' * 30
print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
self.__class__.__name__
old = self.d.set_get_returns_none(1)
self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=1)
def test03c_SimpleCursorGetReturnsNone2(self):
# same test but raise exceptions instead of returning None
if verbose:
print '\n', '-=' * 30
print "Running %s.test03c_SimpleCursorStuffWithoutSetReturnsNone..." % \
self.__class__.__name__
old = self.d.set_get_returns_none(1)
self.assertEqual(old, 2)
old = self.d.set_get_returns_none(2)
self.assertEqual(old, 1)
self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0)
#----------------------------------------
def test04_PartialGetAndPut(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test04_PartialGetAndPut..." % \
self.__class__.__name__
key = "partialTest"
data = "1" * 1000 + "2" * 1000
d.put(key, data)
self.assertEqual(d.get(key), data)
self.assertEqual(d.get(key, dlen=20, doff=990),
("1" * 10) + ("2" * 10))
d.put("partialtest2", ("1" * 30000) + "robin" )
self.assertEqual(d.get("partialtest2", dlen=5, doff=30000), "robin")
# There seems to be a bug in DB here... Commented out the test for
# now.
##self.assertEqual(d.get("partialtest2", dlen=5, doff=30010), "")
if self.dbsetflags != db.DB_DUP:
# Partial put with duplicate records requires a cursor
d.put(key, "0000", dlen=2000, doff=0)
self.assertEqual(d.get(key), "0000")
d.put(key, "1111", dlen=1, doff=2)
self.assertEqual(d.get(key), "0011110")
#----------------------------------------
def test05_GetSize(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test05_GetSize..." % self.__class__.__name__
for i in range(1, 50000, 500):
key = "size%s" % i
#print "before ", i,
d.put(key, "1" * i)
#print "after",
self.assertEqual(d.get_size(key), i)
#print "done"
#----------------------------------------
def test06_Truncate(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test99_Truncate..." % self.__class__.__name__
d.put("abcde", "ABCDE");
num = d.truncate()
self.assert_(num >= 1, "truncate returned <= 0 on non-empty database")
num = d.truncate()
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
#----------------------------------------
def test07_verify(self):
# Verify bug solved in 4.7.3pre8
self.d.close()
d = db.DB(self.env)
d.verify(self.filename)
#----------------------------------------
#----------------------------------------------------------------------
class BasicBTreeTestCase(BasicTestCase):
dbtype = db.DB_BTREE
class BasicHashTestCase(BasicTestCase):
dbtype = db.DB_HASH
class BasicBTreeWithThreadFlagTestCase(BasicTestCase):
dbtype = db.DB_BTREE
dbopenflags = db.DB_THREAD
class BasicHashWithThreadFlagTestCase(BasicTestCase):
dbtype = db.DB_HASH
dbopenflags = db.DB_THREAD
class BasicWithEnvTestCase(BasicTestCase):
dbopenflags = db.DB_THREAD
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
#----------------------------------------
def test08_EnvRemoveAndRename(self):
if not self.env:
return
if verbose:
print '\n', '-=' * 30
print "Running %s.test08_EnvRemoveAndRename..." % self.__class__.__name__
# can't rename or remove an open DB
self.d.close()
newname = self.filename + '.renamed'
self.env.dbrename(self.filename, None, newname)
self.env.dbremove(newname)
# dbremove and dbrename are in 4.1 and later
if db.version() < (4,1):
del test08_EnvRemoveAndRename
#----------------------------------------
class BasicBTreeWithEnvTestCase(BasicWithEnvTestCase):
dbtype = db.DB_BTREE
class BasicHashWithEnvTestCase(BasicWithEnvTestCase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class BasicTransactionTestCase(BasicTestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
useEnv = 1
envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_TXN)
envsetflags = db.DB_AUTO_COMMIT
def tearDown(self):
self.txn.commit()
BasicTestCase.tearDown(self)
def populateDB(self):
txn = self.env.txn_begin()
BasicTestCase.populateDB(self, _txn=txn)
self.txn = self.env.txn_begin()
def test06_Transactions(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test06_Transactions..." % self.__class__.__name__
self.assertEqual(d.get('new rec', txn=self.txn), None)
d.put('new rec', 'this is a new record', self.txn)
self.assertEqual(d.get('new rec', txn=self.txn),
'this is a new record')
self.txn.abort()
self.assertEqual(d.get('new rec'), None)
self.txn = self.env.txn_begin()
self.assertEqual(d.get('new rec', txn=self.txn), None)
d.put('new rec', 'this is a new record', self.txn)
self.assertEqual(d.get('new rec', txn=self.txn),
'this is a new record')
self.txn.commit()
self.assertEqual(d.get('new rec'), 'this is a new record')
self.txn = self.env.txn_begin()
c = d.cursor(self.txn)
rec = c.first()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
rec = c.next()
self.assertEqual(count, self._numKeys+1)
c.close() # Cursors *MUST* be closed before commit!
self.txn.commit()
# flush pending updates
try:
self.env.txn_checkpoint (0, 0, 0)
except db.DBIncompleteError:
pass
statDict = self.env.log_stat(0);
self.assert_(statDict.has_key('magic'))
self.assert_(statDict.has_key('version'))
self.assert_(statDict.has_key('cur_file'))
self.assert_(statDict.has_key('region_nowait'))
# must have at least one log file present:
logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
self.assertNotEqual(logs, None)
for log in logs:
if verbose:
print 'log file: ' + log
if db.version() >= (4,2):
logs = self.env.log_archive(db.DB_ARCH_REMOVE)
self.assertTrue(not logs)
self.txn = self.env.txn_begin()
#----------------------------------------
def test08_TxnTruncate(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test08_TxnTruncate..." % self.__class__.__name__
d.put("abcde", "ABCDE");
txn = self.env.txn_begin()
num = d.truncate(txn)
self.assert_(num >= 1, "truncate returned <= 0 on non-empty database")
num = d.truncate(txn)
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
txn.commit()
#----------------------------------------
def test09_TxnLateUse(self):
txn = self.env.txn_begin()
txn.abort()
try:
txn.abort()
except db.DBError, e:
pass
else:
raise RuntimeError, "DBTxn.abort() called after DB_TXN no longer valid w/o an exception"
txn = self.env.txn_begin()
txn.commit()
try:
txn.commit()
except db.DBError, e:
pass
else:
raise RuntimeError, "DBTxn.commit() called after DB_TXN no longer valid w/o an exception"
class BTreeTransactionTestCase(BasicTransactionTestCase):
dbtype = db.DB_BTREE
class HashTransactionTestCase(BasicTransactionTestCase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class BTreeRecnoTestCase(BasicTestCase):
dbtype = db.DB_BTREE
dbsetflags = db.DB_RECNUM
def test08_RecnoInBTree(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test08_RecnoInBTree..." % self.__class__.__name__
rec = d.get(200)
self.assertEqual(type(rec), type(()))
self.assertEqual(len(rec), 2)
if verbose:
print "Record #200 is ", rec
c = d.cursor()
c.set('0200')
num = c.get_recno()
self.assertEqual(type(num), type(1))
if verbose:
print "recno of d['0200'] is ", num
rec = c.current()
self.assertEqual(c.set_recno(num), rec)
c.close()
class BTreeRecnoWithThreadFlagTestCase(BTreeRecnoTestCase):
dbopenflags = db.DB_THREAD
#----------------------------------------------------------------------
class BasicDUPTestCase(BasicTestCase):
dbsetflags = db.DB_DUP
def test09_DuplicateKeys(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test09_DuplicateKeys..." % \
self.__class__.__name__
d.put("dup0", "before")
for x in "The quick brown fox jumped over the lazy dog.".split():
d.put("dup1", x)
d.put("dup2", "after")
data = d.get("dup1")
self.assertEqual(data, "The")
if verbose:
print data
c = d.cursor()
rec = c.set("dup1")
self.assertEqual(rec, ('dup1', 'The'))
next_reg = c.next()
self.assertEqual(next_reg, ('dup1', 'quick'))
rec = c.set("dup1")
count = c.count()
self.assertEqual(count, 9)
next_dup = c.next_dup()
self.assertEqual(next_dup, ('dup1', 'quick'))
rec = c.set('dup1')
while rec is not None:
if verbose:
print rec
rec = c.next_dup()
c.set('dup1')
rec = c.next_nodup()
self.assertNotEqual(rec[0], 'dup1')
if verbose:
print rec
c.close()
class BTreeDUPTestCase(BasicDUPTestCase):
dbtype = db.DB_BTREE
class HashDUPTestCase(BasicDUPTestCase):
dbtype = db.DB_HASH
class BTreeDUPWithThreadTestCase(BasicDUPTestCase):
dbtype = db.DB_BTREE
dbopenflags = db.DB_THREAD
class HashDUPWithThreadTestCase(BasicDUPTestCase):
dbtype = db.DB_HASH
dbopenflags = db.DB_THREAD
#----------------------------------------------------------------------
class BasicMultiDBTestCase(BasicTestCase):
dbname = 'first'
def otherType(self):
if self.dbtype == db.DB_BTREE:
return db.DB_HASH
else:
return db.DB_BTREE
def test10_MultiDB(self):
d1 = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test10_MultiDB..." % self.__class__.__name__
d2 = db.DB(self.env)
d2.open(self.filename, "second", self.dbtype,
self.dbopenflags|db.DB_CREATE)
d3 = db.DB(self.env)
d3.open(self.filename, "third", self.otherType(),
self.dbopenflags|db.DB_CREATE)
for x in "The quick brown fox jumped over the lazy dog".split():
d2.put(x, self.makeData(x))
for x in string.letters:
d3.put(x, x*70)
d1.sync()
d2.sync()
d3.sync()
d1.close()
d2.close()
d3.close()
self.d = d1 = d2 = d3 = None
self.d = d1 = db.DB(self.env)
d1.open(self.filename, self.dbname, flags = self.dbopenflags)
d2 = db.DB(self.env)
d2.open(self.filename, "second", flags = self.dbopenflags)
d3 = db.DB(self.env)
d3.open(self.filename, "third", flags = self.dbopenflags)
c1 = d1.cursor()
c2 = d2.cursor()
c3 = d3.cursor()
count = 0
rec = c1.first()
while rec is not None:
count = count + 1
if verbose and (count % 50) == 0:
print rec
rec = c1.next()
self.assertEqual(count, self._numKeys)
count = 0
rec = c2.first()
while rec is not None:
count = count + 1
if verbose:
print rec
rec = c2.next()
self.assertEqual(count, 9)
count = 0
rec = c3.first()
while rec is not None:
count = count + 1
if verbose:
print rec
rec = c3.next()
self.assertEqual(count, len(string.letters))
c1.close()
c2.close()
c3.close()
d2.close()
d3.close()
# Strange things happen if you try to use Multiple DBs per file without a
# DBEnv with MPOOL and LOCKing...
class BTreeMultiDBTestCase(BasicMultiDBTestCase):
dbtype = db.DB_BTREE
dbopenflags = db.DB_THREAD
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
class HashMultiDBTestCase(BasicMultiDBTestCase):
dbtype = db.DB_HASH
dbopenflags = db.DB_THREAD
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
class PrivateObject(unittest.TestCase) :
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def tearDown(self) :
del self.obj
def test01_DefaultIsNone(self) :
self.assertEqual(self.obj.get_private(), None)
def test02_assignment(self) :
a = "example of private object"
self.obj.set_private(a)
b = self.obj.get_private()
self.assertTrue(a is b) # Object identity
def test03_leak_assignment(self) :
import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
self.assertEqual(refcount+1, sys.getrefcount(a))
self.obj.set_private(None)
self.assertEqual(refcount, sys.getrefcount(a))
def test04_leak_GC(self) :
import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
self.obj = None
self.assertEqual(refcount, sys.getrefcount(a))
class DBEnvPrivateObject(PrivateObject) :
def setUp(self) :
self.obj = db.DBEnv()
class DBPrivateObject(PrivateObject) :
def setUp(self) :
self.obj = db.DB()
class CrashAndBurn(unittest.TestCase) :
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
#def test01_OpenCrash(self) :
# # See http://bugs.python.org/issue3307
# self.assertRaises(db.DBInvalidArgError, db.DB, None, 65535)
def test02_DBEnv_dealloc(self):
# http://bugs.python.org/issue3885
import gc
self.assertRaises(db.DBInvalidArgError, db.DBEnv, ~db.DB_RPCCLIENT)
gc.collect()
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(VersionTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeTestCase))
suite.addTest(unittest.makeSuite(BasicHashTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BasicHashWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeWithEnvTestCase))
suite.addTest(unittest.makeSuite(BasicHashWithEnvTestCase))
suite.addTest(unittest.makeSuite(BTreeTransactionTestCase))
suite.addTest(unittest.makeSuite(HashTransactionTestCase))
suite.addTest(unittest.makeSuite(BTreeRecnoTestCase))
suite.addTest(unittest.makeSuite(BTreeRecnoWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BTreeDUPTestCase))
suite.addTest(unittest.makeSuite(HashDUPTestCase))
suite.addTest(unittest.makeSuite(BTreeDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
suite.addTest(unittest.makeSuite(HashMultiDBTestCase))
suite.addTest(unittest.makeSuite(DBEnvPrivateObject))
suite.addTest(unittest.makeSuite(DBPrivateObject))
suite.addTest(unittest.makeSuite(CrashAndBurn))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
apache-2.0
| -757,311,094,528,680,300 | 8,857,814,030,829,367,000 | 29.605778 | 101 | 0.523051 | false |
underlost/GamerNews
|
gamernews/apps/threadedcomments/management/commands/migrate_threaded_comments.py
|
5
|
3452
|
from django.core.management.base import NoArgsCommand
from django.contrib.sites.models import Site
from django.db import transaction, connection
from django.conf import settings
from threadedcomments.models import ThreadedComment
USER_SQL = """
SELECT
content_type_id,
object_id,
parent_id,
user_id,
date_submitted,
date_modified,
date_approved,
comment,
markup,
is_public,
is_approved,
ip_address
FROM threadedcomments_threadedcomment
"""
FREE_SQL = """
SELECT
content_type_id,
object_id,
parent_id,
name,
website,
email,
date_submitted,
date_modified,
date_approved,
comment,
markup,
is_public,
is_approved,
ip_address
FROM threadedcomments_freethreadedcomment
"""
PATH_SEPARATOR = getattr(settings, 'COMMENT_PATH_SEPARATOR', '/')
PATH_DIGITS = getattr(settings, 'COMMENT_PATH_DIGITS', 10)
class Command(NoArgsCommand):
help = "Migrates django-threadedcomments <= 0.5 to the new model structure"
def handle(self, *args, **options):
transaction.commit_unless_managed()
transaction.enter_transaction_management()
transaction.managed(True)
site = Site.objects.all()[0]
cursor = connection.cursor()
cursor.execute(FREE_SQL)
for row in cursor:
(content_type_id, object_id, parent_id, name, website, email,
date_submitted, date_modified, date_approved, comment, markup,
is_public, is_approved, ip_address) = row
tc = ThreadedComment(
content_type_id=content_type_id,
object_pk=object_id,
user_name=name,
user_email=email,
user_url=website,
comment=comment,
submit_date=date_submitted,
ip_address=ip_address,
is_public=is_public,
is_removed=not is_approved,
parent_id=parent_id,
site=site,
)
tc.save(skip_tree_path=True)
cursor = connection.cursor()
cursor.execute(USER_SQL)
for row in cursor:
(content_type_id, object_id, parent_id, user_id, date_submitted,
date_modified, date_approved, comment, markup, is_public,
is_approved, ip_address) = row
tc = ThreadedComment(
content_type_id=content_type_id,
object_pk=object_id,
user_id=user_id,
comment=comment,
submit_date=date_submitted,
ip_address=ip_address,
is_public=is_public,
is_removed=not is_approved,
parent_id=parent_id,
site=site,
)
tc.save(skip_tree_path=True)
for comment in ThreadedComment.objects.all():
path = [str(comment.id).zfill(PATH_DIGITS)]
current = comment
while current.parent:
current = current.parent
path.append(str(current.id).zfill(PATH_DIGITS))
comment.tree_path = PATH_SEPARATOR.join(reversed(path))
comment.save(skip_tree_path=True)
if comment.parent:
ThreadedComment.objects.filter(pk=comment.parent.pk).update(
last_child=comment)
transaction.commit()
transaction.leave_transaction_management()
|
mit
| -5,780,238,010,957,534,000 | -9,135,423,366,619,208,000 | 29.821429 | 79 | 0.580533 | false |
cloudera/hue
|
desktop/core/ext-py/Paste-2.0.1/paste/config.py
|
78
|
4312
|
# (c) 2006 Ian Bicking, Philip Jenvey and contributors
# Written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""Paste Configuration Middleware and Objects"""
from paste.registry import RegistryManager, StackedObjectProxy
__all__ = ['DispatchingConfig', 'CONFIG', 'ConfigMiddleware']
class DispatchingConfig(StackedObjectProxy):
"""
This is a configuration object that can be used globally,
imported, have references held onto. The configuration may differ
by thread (or may not).
Specific configurations are registered (and deregistered) either
for the process or for threads.
"""
# @@: What should happen when someone tries to add this
# configuration to itself? Probably the conf should become
# resolved, and get rid of this delegation wrapper
def __init__(self, name='DispatchingConfig'):
super(DispatchingConfig, self).__init__(name=name)
self.__dict__['_process_configs'] = []
def push_thread_config(self, conf):
"""
Make ``conf`` the active configuration for this thread.
Thread-local configuration always overrides process-wide
configuration.
This should be used like::
conf = make_conf()
dispatching_config.push_thread_config(conf)
try:
... do stuff ...
finally:
dispatching_config.pop_thread_config(conf)
"""
self._push_object(conf)
def pop_thread_config(self, conf=None):
"""
Remove a thread-local configuration. If ``conf`` is given,
it is checked against the popped configuration and an error
is emitted if they don't match.
"""
self._pop_object(conf)
def push_process_config(self, conf):
"""
Like push_thread_config, but applies the configuration to
the entire process.
"""
self._process_configs.append(conf)
def pop_process_config(self, conf=None):
self._pop_from(self._process_configs, conf)
def _pop_from(self, lst, conf):
popped = lst.pop()
if conf is not None and popped is not conf:
raise AssertionError(
"The config popped (%s) is not the same as the config "
"expected (%s)"
% (popped, conf))
def _current_obj(self):
try:
return super(DispatchingConfig, self)._current_obj()
except TypeError:
if self._process_configs:
return self._process_configs[-1]
raise AttributeError(
"No configuration has been registered for this process "
"or thread")
current = current_conf = _current_obj
CONFIG = DispatchingConfig()
no_config = object()
class ConfigMiddleware(RegistryManager):
"""
A WSGI middleware that adds a ``paste.config`` key (by default)
to the request environment, as well as registering the
configuration temporarily (for the length of the request) with
``paste.config.CONFIG`` (or any other ``DispatchingConfig``
object).
"""
def __init__(self, application, config, dispatching_config=CONFIG,
environ_key='paste.config'):
"""
This delegates all requests to `application`, adding a *copy*
of the configuration `config`.
"""
def register_config(environ, start_response):
popped_config = environ.get(environ_key, no_config)
current_config = environ[environ_key] = config.copy()
environ['paste.registry'].register(dispatching_config,
current_config)
try:
app_iter = application(environ, start_response)
finally:
if popped_config is no_config:
environ.pop(environ_key, None)
else:
environ[environ_key] = popped_config
return app_iter
super(self.__class__, self).__init__(register_config)
def make_config_filter(app, global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ConfigMiddleware(app, conf)
make_config_middleware = ConfigMiddleware.__doc__
|
apache-2.0
| -1,583,181,195,346,589,000 | -3,532,940,549,102,638,600 | 34.933333 | 84 | 0.611317 | false |
cedriclaunay/gaffer
|
apps/license/license-1.py
|
7
|
3146
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import os
import IECore
import Gaffer
class license( Gaffer.Application ) :
def __init__( self ) :
Gaffer.Application.__init__( self )
self.parameters().addParameter(
IECore.BoolParameter(
name = "withDependencies",
description = "Display the copyright and licensing information for the dependencies.",
defaultValue = True
)
)
def _run( self, args ) :
sys.stderr.write( Gaffer.About.name() + " " + Gaffer.About.versionString() + "\n" )
sys.stderr.write( Gaffer.About.copyright() + "\n" )
sys.stderr.write( Gaffer.About.url() + "\n" )
if args["withDependencies"].value :
sys.stderr.write( "\n" + Gaffer.About.dependenciesPreamble() + "\n" )
for d in Gaffer.About.dependencies() :
sys.stderr.write( "\n" + d["name"] + "\n" )
sys.stderr.write( "-" * len( d["name"] ) + "\n\n" )
if "credit" in d :
sys.stderr.write( d["credit"] + "\n" )
if "url" in d :
sys.stderr.write( "Project URL : " + d["url"] + "\n" )
if "license" in d :
sys.stderr.write( "License : %s\n" % os.path.expandvars( d["license"] ) )
if "source" in d :
sys.stderr.write( "Source : %s\n" % os.path.expandvars( d["source"] ) )
return 0
IECore.registerRunTimeTyped( license )
|
bsd-3-clause
| 3,243,465,882,675,332,000 | -3,477,123,181,636,916,000 | 36.011765 | 90 | 0.647171 | false |
ReganBell/QReview
|
networkx/utils/tests/test_heaps.py
|
64
|
3979
|
from nose.tools import *
import networkx as nx
from networkx.utils import *
class X(object):
def __eq__(self, other):
raise self is other
def __ne__(self, other):
raise self is not other
def __lt__(self, other):
raise TypeError('cannot compare')
def __le__(self, other):
raise TypeError('cannot compare')
def __ge__(self, other):
raise TypeError('cannot compare')
def __gt__(self, other):
raise TypeError('cannot compare')
def __hash__(self):
return hash(id(self))
x = X()
data = [# min should not invent an element.
('min', nx.NetworkXError),
# Popping an empty heap should fail.
('pop', nx.NetworkXError),
# Getting nonexisting elements should return None.
('get', 0, None),
('get', x, None),
('get', None, None),
# Inserting a new key should succeed.
('insert', x, 1, True),
('get', x, 1),
('min', (x, 1)),
# min should not pop the top element.
('min', (x, 1)),
# Inserting a new key of different type should succeed.
('insert', 1, -2.0, True),
# int and float values should interop.
('min', (1, -2.0)),
# pop removes minimum-valued element.
('insert', 3, -10 ** 100, True),
('insert', 4, 5, True),
('pop', (3, -10 ** 100)),
('pop', (1, -2.0)),
# Decrease-insert should succeed.
('insert', 4, -50, True),
('insert', 4, -60, False, True),
# Decrease-insert should not create duplicate keys.
('pop', (4, -60)),
('pop', (x, 1)),
# Popping all elements should empty the heap.
('min', nx.NetworkXError),
('pop', nx.NetworkXError),
# Non-value-changing insert should fail.
('insert', x, 0, True),
('insert', x, 0, False, False),
('min', (x, 0)),
('insert', x, 0, True, False),
('min', (x, 0)),
# Failed insert should not create duplicate keys.
('pop', (x, 0)),
('pop', nx.NetworkXError),
# Increase-insert should succeed when allowed.
('insert', None, 0, True),
('insert', 2, -1, True),
('min', (2, -1)),
('insert', 2, 1, True, False),
('min', (None, 0)),
# Increase-insert should fail when disallowed.
('insert', None, 2, False, False),
('min', (None, 0)),
# Failed increase-insert should not create duplicate keys.
('pop', (None, 0)),
('pop', (2, 1)),
('min', nx.NetworkXError),
('pop', nx.NetworkXError)]
def _test_heap_class(cls, *args, **kwargs):
heap = cls(*args, **kwargs)
# Basic behavioral test
for op in data:
if op[-1] is not nx.NetworkXError:
assert_equal(op[-1], getattr(heap, op[0])(*op[1:-1]))
else:
assert_raises(op[-1], getattr(heap, op[0]), *op[1:-1])
# Coverage test.
for i in range(99, -1, -1):
assert_true(heap.insert(i, i))
for i in range(50):
assert_equal(heap.pop(), (i, i))
for i in range(100):
assert_equal(heap.insert(i, i), i < 50)
for i in range(100):
assert_false(heap.insert(i, i + 1))
for i in range(50):
assert_equal(heap.pop(), (i, i))
for i in range(100):
assert_equal(heap.insert(i, i + 1), i < 50)
for i in range(49):
assert_equal(heap.pop(), (i, i + 1))
assert_equal(sorted([heap.pop(), heap.pop()]), [(49, 50), (50, 50)])
for i in range(51, 100):
assert_false(heap.insert(i, i + 1, True))
for i in range(51, 70):
assert_equal(heap.pop(), (i, i + 1))
for i in range(100):
assert_true(heap.insert(i, i))
for i in range(100):
assert_equal(heap.pop(), (i, i))
assert_raises(nx.NetworkXError, heap.pop)
def test_PairingHeap():
_test_heap_class(PairingHeap)
def test_BinaryHeap():
_test_heap_class(BinaryHeap)
|
bsd-3-clause
| -4,969,613,331,149,948,000 | -4,484,696,332,301,202,000 | 29.607692 | 72 | 0.522493 | false |
ogajduse/spacewalk
|
backend/server/action/kickstart_guest.py
|
10
|
4459
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from spacewalk.common.usix import raise_with_tb
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
from spacewalk.server.rhnLib import InvalidAction, ShadowAction
from spacewalk.server.action.utils import SubscribedChannel, \
ChannelPackage, \
PackageInstallScheduler, \
NoActionInfo, \
PackageNotFound
from spacewalk.server.rhnChannel import subscribe_to_tools_channel
__rhnexport__ = ['initiate', 'schedule_virt_guest_pkg_install', 'add_tools_channel']
_query_initiate_guest = rhnSQL.Statement("""
select ksd.label as profile_name, akg.kickstart_host, kvt.label as virt_type,
akg.mem_kb, akg.vcpus, akg.disk_path, akg.virt_bridge, akg.cobbler_system_name,
akg.disk_gb, akg.append_string,
akg.guest_name, akg.ks_session_id from rhnActionKickstartGuest akg,
rhnKSData ksd, rhnKickstartSession ksess,
rhnKickstartDefaults ksdef, rhnKickstartVirtualizationType kvt
where akg.action_id = :action_id
and ksess.kickstart_id = ksd.id
and ksess.id = akg.ks_session_id
and ksdef.kickstart_id = ksd.id
and ksdef.virtualization_type = kvt.id
""")
def schedule_virt_guest_pkg_install(server_id, action_id, dry_run=0):
"""
ShadowAction that schedules a package installation action for the
rhn-virtualization-guest package.
"""
log_debug(3)
virt_host_package_name = "rhn-virtualization-guest"
tools_channel = SubscribedChannel(server_id, "rhn-tools")
found_tools_channel = tools_channel.is_subscribed_to_channel()
if not found_tools_channel:
raise InvalidAction("System not subscribed to the RHN Tools channel.")
rhn_v12n_package = ChannelPackage(server_id, virt_host_package_name)
if not rhn_v12n_package.exists():
raise InvalidAction("Could not find the rhn-virtualization-guest package.")
try:
install_scheduler = PackageInstallScheduler(server_id, action_id, rhn_v12n_package)
if (not dry_run):
install_scheduler.schedule_package_install()
else:
log_debug(4, "dry run requested")
except NoActionInfo:
nai = sys.exc_info()[1]
raise_with_tb(InvalidAction(str(nai)), sys.exc_info()[2])
except PackageNotFound:
pnf = sys.exc_info()[1]
raise_with_tb(InvalidAction(str(pnf)), sys.exc_info()[2])
except Exception:
e = sys.exc_info()[1]
raise_with_tb(InvalidAction(str(e)), sys.exc_info()[2])
log_debug(3, "Completed scheduling install of rhn-virtualization-guest!")
raise ShadowAction("Scheduled installation of RHN Virtualization Guest packages.")
def initiate(server_id, action_id, dry_run=0):
log_debug(3)
h = rhnSQL.prepare(_query_initiate_guest)
h.execute(action_id=action_id)
row = h.fetchone_dict()
if not row:
raise InvalidAction("Kickstart action without an associated kickstart")
kickstart_host = row['kickstart_host']
virt_type = row['virt_type']
name = row['guest_name']
boot_image = "spacewalk-koan"
append_string = row['append_string']
vcpus = row['vcpus']
disk_gb = row['disk_gb']
mem_kb = row['mem_kb']
ks_session_id = row['ks_session_id']
virt_bridge = row['virt_bridge']
disk_path = row['disk_path']
cobbler_system_name = row['cobbler_system_name']
if not boot_image:
raise InvalidAction("Boot image missing")
return (kickstart_host, cobbler_system_name, virt_type, ks_session_id, name,
mem_kb, vcpus, disk_gb, virt_bridge, disk_path, append_string)
def add_tools_channel(server_id, action_id, dry_run=0):
log_debug(3)
if (not dry_run):
subscribe_to_tools_channel(server_id)
else:
log_debug(4, "dry run requested")
raise ShadowAction("Subscribed guest to tools channel.")
|
gpl-2.0
| -6,390,841,404,919,844,000 | -5,241,002,344,162,811,000 | 36.470588 | 91 | 0.693205 | false |
ArneBab/pypyjs
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-mac/lib-scriptpackages/CodeWarrior/Required.py
|
81
|
1664
|
"""Suite Required: Terms that every application should support
Level 1, version 1
Generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'reqd'
from StdSuites.Required_Suite import *
class Required_Events(Required_Suite_Events):
_argmap_open = {
'converting' : 'Conv',
}
def open(self, _object, _attributes={}, **_arguments):
"""open: Open the specified object(s)
Required argument: list of objects to open
Keyword argument converting: Whether to convert project to latest version (yes/no; default is ask).
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'odoc'
aetools.keysubst(_arguments, self._argmap_open)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'Conv', _Enum_Conv)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_Enum_Conv = {
'yes' : 'yes ', # Convert the project if necessary on open
'no' : 'no ', # Do not convert the project if needed on open
}
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
'Conv' : _Enum_Conv,
}
|
mit
| -8,490,519,503,802,921,000 | 5,557,254,727,208,365,000 | 25.83871 | 123 | 0.635216 | false |
mixedpuppy/offlineimap
|
setup.py
|
16
|
1493
|
#!/usr/bin/env python
# $Id: setup.py,v 1.1 2002/06/21 18:10:49 jgoerzen Exp $
# IMAP synchronization
# Module: installer
# COPYRIGHT #
# Copyright (C) 2002 - 2006 John Goerzen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# END OF COPYRIGHT #
from distutils.core import setup
import offlineimap
setup(name = "offlineimap",
version = offlineimap.__version__,
description = offlineimap.__description__,
author = offlineimap.__author__,
author_email = offlineimap.__author_email__,
url = offlineimap.__homepage__,
packages = ['offlineimap', 'offlineimap.folder',
'offlineimap.repository', 'offlineimap.ui'],
scripts = ['bin/offlineimap'],
license = offlineimap.__copyright__ + \
", Licensed under the GPL version 2"
)
|
gpl-2.0
| -1,914,266,127,557,150,000 | 4,390,447,646,257,528,000 | 35.414634 | 78 | 0.684528 | false |
izapolsk/integration_tests
|
cfme/tests/infrastructure/test_host_drift_analysis.py
|
1
|
4959
|
import pytest
from cfme import test_requirements
from cfme.common.host_views import HostDriftAnalysis
from cfme.infrastructure.host import Host
from cfme.infrastructure.provider import InfraProvider
from cfme.utils import testgen
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.drift,
pytest.mark.tier(3),
pytest.mark.meta(blockers=[BZ(1635126, forced_streams=['5.10'])]),
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=['hosts'])
argnames += ['host']
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(list(zip(argnames, argvalue_tuple)))
for test_host in args['provider'].data['hosts']:
if not test_host.get('test_fleece', False):
continue
argvs = argvalues[i][:]
new_argvalues.append(argvs + [test_host])
test_id = '{}-{}'.format(args['provider'].key, test_host['type'])
new_idlist.append(test_id)
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def a_host(host, appliance, provider):
host_collection = appliance.collections.hosts
return host_collection.instantiate(name=host.name, provider=provider)
@pytest.fixture(scope='module')
def set_host_credentials(provider, a_host, setup_provider_modscope):
try:
host_data, = [data for data in provider.data['hosts'] if data['name'] == a_host.name]
except ValueError:
pytest.skip('Multiple hosts with the same name found, only expecting one')
a_host.update_credentials_rest(credentials=host_data['credentials'])
yield
a_host.update_credentials_rest(
credentials={'default': Host.Credential(principal='', secret='')})
@pytest.mark.rhv3
def test_host_drift_analysis(appliance, request, a_host, soft_assert, set_host_credentials):
"""Tests host drift analysis
Metadata:
test_flag: host_drift_analysis
Polarion:
assignee: sbulage
casecomponent: SmartState
initialEstimate: 1/3h
"""
# get drift history num
view = navigate_to(a_host, 'Details')
drift_num_orig = int(view.entities.summary('Relationships').get_text_of('Drift History'))
# clear table
col = appliance.collections.tasks.filter({'tab': 'AllTasks'})
col.delete_all()
# initiate 1st analysis
a_host.run_smartstate_analysis(wait_for_task_result=True)
# wait for for drift history num+1
navigate_to(a_host, 'Details')
wait_for(
lambda: (view.entities.summary('Relationships').get_text_of('Drift History') ==
str(drift_num_orig + 1)),
delay=10,
num_sec=360,
message="Waiting for Drift History count to increase",
fail_func=appliance.server.browser.refresh
)
# add a tag and a finalizer to remove it
added_tag = appliance.collections.categories.instantiate(
display_name='Department').collections.tags.instantiate(
display_name='Accounting')
a_host.add_tag(added_tag)
request.addfinalizer(lambda: a_host.remove_tag(added_tag))
# initiate 2nd analysis
a_host.run_smartstate_analysis(wait_for_task_result=True)
# wait for for drift history num+2
navigate_to(a_host, 'Details')
wait_for(
lambda: (view.entities.summary('Relationships').get_text_of('Drift History') ==
str(drift_num_orig + 2)),
delay=10,
num_sec=360,
message="Waiting for Drift History count to increase",
fail_func=appliance.server.browser.refresh
)
# check drift difference
soft_assert(
a_host.equal_drift_results(
'{} (1)'.format(added_tag.category.display_name),
'My Company Tags',
0,
1
),
"Drift analysis results are equal when they shouldn't be"
)
# Test UI features that modify the drift grid
drift_analysis_view = appliance.browser.create_view(HostDriftAnalysis)
# Accounting tag should not be displayed, because it was changed to True
drift_analysis_view.toolbar.same_values_attributes.click()
soft_assert(
not drift_analysis_view.drift_analysis.check_section_attribute_availability(
'{}'.format(added_tag.category.display_name)),
"{} row should be hidden, but not".format(added_tag.display_name))
# Accounting tag should be displayed now
drift_analysis_view.toolbar.different_values_attributes.click()
soft_assert(
drift_analysis_view.drift_analysis.check_section_attribute_availability(
'{} (1)'.format(added_tag.category.display_name)),
"{} row should be visible, but not".format(added_tag.display_name))
|
gpl-2.0
| -8,433,924,845,734,762,000 | -2,889,257,489,190,329,000 | 34.170213 | 93 | 0.667272 | false |
Semanticle/Semanticle
|
sm-mt-devel/src/metabulate/tests/test26case-004d.py
|
1
|
18359
|
'''
Copyright 2009, 2010 Anthony John Machin. All rights reserved.
Supplied subject to The GNU General Public License v3.0
Created on 28 Jan 2009
Last Updated on 10 July 2010
As test20 with tests of:
rules instantiation and query inference
Related:
single dict TS recursion rule plus generic rule + minimal data:
test20simple-001d - unmerged recursive rule EQ order correct QL order correct
test20simple-002d - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003d - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004d - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005d - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006d - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007d - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008d - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009d - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010d - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011d - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012d - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
single rbtree TS recursion rule plus generic rule + minimal data:
test20simple-001r - unmerged recursive rule EQ order correct QL order correct
test20simple-002r - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003r - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004r - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005r - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006r - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007r - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008r - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009r - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010r - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011r - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012r - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
@author: Administrator
'''
import metabulate.stores.stores as mtstores
import metabulate.facades.facade as mtfacade
import metabulate.utils.utils as mtutils
import metabulate.utils.debug as mtdebug
import metabulate.renderers.render as mtrender
import metabulate.rules.rules as mtrules
import metabulate.singletons.singleton as mtsingleton
if __name__ == "__main__":
# get default file paths and types
mtconfig = mtsingleton.Settings()._getItem('config')
debug_path = mtconfig._getItem('debugfile_path','%configfilesbase%Debug\\',mtconfig)
debug_type = mtconfig._getItem('debugfile_type','txt',mtconfig)
result_path = mtconfig._getItem('resultsfile_path','%configfilesbase%Results\\',mtconfig)
result_type = mtconfig._getItem('resultsfile_type','txt',mtconfig)
unload_path = mtconfig._getItem('stores_unloadfile_path','%configfilesbase%Unloads\\',mtconfig)
unload_type = mtconfig._getItem('stores_unloadfile_type','pyo',mtconfig)
# set debug criteria
dc22f = mtdebug.Criteria(methods=['_actionPredicate','_actionTriple','_processTriple','_addTriple'],
targets=[mtutils.Flatfile(path=debug_path,
name='DebugOutput_dc22',
type=debug_type)])
dc28 = mtdebug.Criteria(classes=['Query'],methods=['_solve'],notes=['trace'])
# set debug
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc8f,dc12f,dc7f,dc13f,dc10f,dc14f,dc15f])
# d._update(criteria=[dc6,dc20f_dup,dc20f_ok])
# d._update(criteria=[dc11f])
# d._update(criteria=[dc21f])
# d._update(criteria=[dc6,dc20f])
# files
fu = mtutils.Flatfile(path=unload_path,
name='test20r-30_unload_s1',
type=unload_type)
f1 = mtutils.Flatfile(path=result_path,
name='genealogy_test1',
type=result_type)
f3 = mtutils.Flatfile(path=result_path,
name='test20r-30_triples',
type=result_type)
f4 = mtutils.Flatfile(path=result_path,
name='test20r-30_rules',
type=result_type)
f5 = mtutils.Flatfile(path=result_path,
name='test20r-30_queries',
type=result_type)
f6 = mtutils.Flatfile(path=result_path,
name='test20r-30_results',
type=result_type)
# stores
sa = mtstores.TripleStore(structure='dict') # TS sa dict
sr = mtstores.TripleStore() # TS sr
s2 = mtstores.TripleStore()
s3 = mtstores.TripleStore()
s4 = mtstores.TripleStore()
# add namespaces in source stores
sa._addNamespace('mytriples', 'http://www.semanticle.org/triples/')
sa._addNamespace('comtriples', 'http://www.semanticle.com/triples/')
# triples for recursion test
sa._actionTriple("add [('mytriples#bob', 'child_of', 'alice'),('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')]")
sa._actionTriple("add", [('cev', 'child_of', 'http://www.semanticle.org/triples/#bob'),"('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')"])
sa._actionTriple("add", 'eve', 'child_of', 'comtriples#dan')
# sa._actionTriple("add",{('?desc', 'desc_of', '?ancs'):
# [
# [[('?desc', 'child_of', '?ancs')]]
# ,[[('?child', 'child_of', '?ancs')],[('?desc', 'desc_of', '?child')]]
# ,[[('?desc', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]
# ]})
sa._actionTriple("add ('?desc', 'desc_of', '?ancs') :- [[[('?desc', 'child_of', '?ancs')]]]") # add rule clause 1 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add",{('?obj', '?inv', '?sub'):
[
[[('?inv', 'rev_of', '?forw'),('?forw', 'rev_of', '?inv')]
,[('?sub', "?forw", '?obj')]]
,[[('?inv', 'syn_of', '?inv1'),('?inv1', 'syn_of', '?inv')]
,[('?obj', "?inv1", '?sub')]]
]}) # add rule to DTS._queryStore a (or change to DTS s1)
sa._actionTriple("add","{('?desc1', 'desc_of', '?ancs'):[[[('?child', 'child_of', '?ancs')],[('?desc1', 'desc_of', '?child')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
# sa._actionTriple("add","{('?desc1', 'desc_of', '?ancs'):[[[('?desc1', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add", 'ancs_of', 'rev_of', 'desc_of') # ant
# s1._actionTriple("add", 'desc_of', 'rev_of', 'ancsr_of') # rev ant
sa._actionTriple("add", 'des_of', 'syn_of', 'desc_of') # syn
# s1._actionTriple("add", 'desc_of', 'syn_of', 'descr_of') # rev syn
sa._actionTriple("add", 'anc_of', 'rev_of', 'des_of') # ant of syn
# s1._actionTriple("add", 'ancestor1_of', 'syn_of', 'ancs_of') # syn of ant
sa._actionTriple("add", 'ancestor2_of', 'syn_of', 'anc_of') # syn of ant of syn
# s1._actionTriple("add", 'ancestor3_of', 'syn_of', 'ancestor2_of') # syn of syn of ant of syn
# triples for nested rules test
# s1._actionTriple("add", 'bob', 'is_sex', 'male')
# s1._actionTriple("add", 'cev', 'is_sex', 'male')
# s1._actionTriple("add", 'dan', 'is_sex', 'male')
# s1._actionTriple("add", 'eve', 'is_sex', 'female')
# s1._actionTriple("add", 'nancy', 'mother_of', 'mike')
# s1._actionTriple("add", 'niel', 'father_of', 'mike')
# s1._actionTriple("add", 'mike', 'is_sex', 'male')
# s1._actionPredicate(action="add",
# fact=('?child', 'son_of', '?parent'),
# rule=[[[('?child', 'child_of', '?parent')],
# [('?child', "'is_sex'", "'male'")]]])
# s1._actionPredicate(action="add",
# fact=('?child', 'child_of', '?parent'),
# rule=[[[('?parent', 'father_of', '?child')]],
# [[('?parent', "'mother_of'", '?child')]]])
# Test Load/Unload
# s1._unload(file=f1)
# s0 = s1._load(file=f1)
# print (s0._toString())
# print
print (sa._toString())
# print
# print ('unloading DSTS s1 to fu')
# sa._unload()
# print ('reloading DSTS from fu as sr')
# sr = sr._load()
# print
# print (sr._toString())
# print
# print (s0._toString())
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc19f])
# set Result requests
# rlt04 = mtrules.Result(request=[[('?sub=eve','?pred=child_of','?obj=dan')]]) # pass
# rlt04 = mtrules.Result(request=[[("?sub='*'","?pred='*'","?obj='*'")]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub="*"','?pred="*"','?obj="*"')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub="?"','?pred','?obj="?"')]]) # FAIL - NO RULES RETURNED (MAYBE OK?)
# rlt04 = mtrules.Result(request=[[("?sub='?'","?pred","?obj='?'")]]) # FAIL - NO RULES RETURNED (MAYBE OK?)
# rlt04 = mtrules.Result(request=[[('?sub=eve', "?pred=desc_of", '?obj=alice')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub=eve', "?pred=des_of", '?obj=alice')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=desc_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=ancs_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=des_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub=?','?pred','?obj')
# ,('?sub','?pred=?','?obj')
# ,('?sub','?pred','?obj=?')]]) # pass - all inferences
# rlt04 = mtrules.Result(request=[[('?sub == ?','?pred','?obj')
# ,('?sub','?pred = =?','?obj')
# ,('?sub','?pred','?obj==?')]]) # pass - all rules
rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],[('?sub','child_of','dan')]]) # FAIL
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],['not',('?sub','child_of','dan')]]) # pass
# rlt04 = mtrules.Result(request=[['not',('?sub','child_of','comtriples#dan')],[('?sub','?pred','?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],['not',('?sub','child_of','dan')]
# ,['not',('?sub','from','London')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred=ancestor3_of','?obj')]]) # pass
rlt05 = mtrules.Result(request=[[("?s","?r=?r1='child_of'","?o")]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('ender', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('ender|eddy', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('?person1', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "desc_of", '?person2')]
# ,[('?person2', "desc_of", 'alice')]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "des_of", '?person2')]
# ,[('?person2', "des_of", 'alice')]]) # pass - syn of recursed rule
# rlt02 = mtrules.Result(request=[[('eve', "descr_of", '?person2')]
# ,[('?person2', "descr_of", 'alice')]]) # pass - reversed syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancs_of", '?person2')]
# ,[('?person2', "ancs_of", 'eve')]]) # pass - ant of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancsr_of", '?person2')]
# ,[('?person2', "ancsr_of", 'eve')]]) # pass - reversed ant of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "anc_of", '?person2')]
# ,[('?person2', "anc_of", 'eve')]]) # pass - ant of syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancestor1_of", '?person2')]
# ,[('?person2', "ancestor1_of", 'eve')]]) # pass - syn of ant of recursed rule
rlt02 = mtrules.Result(request=[[('alice', "ancestor2_of", '?person2')]
,[('?person2', "ancestor2_of", 'eve')]]) # pass - syn of ant of syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancestor3_of", '?person2')]
# ,[('?person2', "ancestor3_of", 'eve')]]) # pass - syn of syn of ant of syn of recursed rule
print ('queries defined')
# rendering submission
p0t = mtrender.Sequence(pattern=['?!triples'], # via variable notation
targets=[s3,f3],
render='py')
p0r = mtrender.Sequence(pattern=['?!rules'], # via variable notation
targets=[s4,f4],
render='py')
p0q = mtrender.Sequence(pattern=['?!queries'], # via variable notation
targets=[f5],
render='py')
p1 = mtrender.Sequence(pattern=[('?son', 'son_of', '?person')], # triple propogation
targets=[s2,'display'],
render='csv')
p2 = mtrender.Sequence(pattern=[('?person1', 'desc_of', '?person2')], # triple propogation
targets=[s2,'display'],
render='csv')
p3 = mtrender.Sequence(pattern=['?person2'],
targets=['display'],
render='csv')
p4 = mtrender.Sequence(pattern=[('?sub', '?pred', '?obj')],
targets=[s2,'display'],
render='csv',
URImode='nativealias')
p4a = mtrender.Sequence(pattern=[('?sub', '?pred', '?obj'),('results', 'contain', ('?sub', '?pred', '?obj'))],
targets=[s2,'display'],
render='csv',
URImode='nativealias')
p6 = mtrender.Transformation(pattern=['!!og!!','/^(.)(.*?)(.)$/$3$2$1/'],id='?p6')
#p5 = mtrender.Sequence(pattern=[({('np2',p2):{'og':'?o'}},'is known by','?s')])
p5 = mtrender.Sequence(pattern=[({('np6','!p6'):{'?og':'?o'}},'is known by','?s')],
targets=['display'],
render='csv')
print ('Renders defined')
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc16f])
# set query
rlt02._update(outputs=[p3])
face02 = mtfacade.Facade(store=sa,
results=[rlt02])
# rlt04._update(outputs=[p4,p0t,p0r,p0q])
rlt04._update(outputs=[p4])
face04 = mtfacade.Facade(store=sa,
results=[rlt04])
rlt05._update(outputs=[p5])
face05 = mtfacade.Facade(store=sa,
results=[rlt05])
print ('results and facades defined')
# reset dubug criteria
# execute the query
# s1._update(infer=False)
face04._generate()
print
# check output channelled to a store
print ('results instantiated')
print (s2._toString())
print ('should be 60 results')
print
print ('contributory triples instantiated')
print (s3._toString())
print ('contributory rules instantiated')
print (s4._toString())
# print ('source Store again')
# print (sr._toString())
|
gpl-2.0
| -334,937,377,422,382,200 | 6,879,962,192,656,685,000 | 66.00365 | 216 | 0.503513 | false |
40223119/2015cda
|
static/Brython3.1.1-20150328-091302/Lib/codecs.py
|
739
|
35436
|
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
gpl-3.0
| 2,767,250,795,188,994,000 | 9,083,503,117,729,755,000 | 31.243858 | 80 | 0.61288 | false |
boada/vpCluster
|
data/boada/may_2012/analysis/c260p61+32p13/stack_dithers.py
|
6
|
2253
|
import pyfits as pyf
from glob import glob
import os
from astLib.astStats import clippedMedianStdev
from numpy import average, delete
import numpy as np
def skySubtract(data):
sums = [sum(data[i, :]) for i in range(data.shape[0])]
med = clippedMedianStdev(sums)
med = med['clippedMedian']
skyfibers = [i for i in range(data.shape[0]) if sum(data[i, :]) <= med]
skydata = data.take(skyfibers, axis=0)
skyflux = [average(skydata[:, i]) for i in range(skydata.shape[1])]
return skyflux
files = glob('bcs*.fits')
for f in files:
oimg = pyf.open(f)
obj = oimg[1].header['object'].split('_')
print f, obj
field, dither, num = obj[1].split()
# Correct for a typo in the naming.
if obj[0] == 'c205p08+46p7':
obj[0] = 'c250p08+46p7'
# load data and skysubtract
data = oimg[1].data
dataHDR = oimg[1].header
sky = skySubtract(data)
if not os.path.isfile(obj[0] + '_' + field + '_' + num + '.fits'):
# rewriting the whole file because that is easy to update
oimg.writeto(obj[0] + '_' + field + '_' + num + '.fits')
# update with sky subtraction
pyf.update(obj[0] + '_' + field + '_' + num + '.fits', data, dataHDR,
1)
else:
# Here's the data we are going to add to
img = pyf.open(obj[0] + '_' + field + '_' + num + '.fits')
data1 = img[1].data
dataHDR1 = img[1].header
try:
pyf.update(obj[0] + '_' + field + '_' + num + '.fits',
data1 + data, dataHDR, 1)
except ValueError:
print 'Different lengths'
# Make sure all of the arrays are the same length
if data.shape[1] > data1.shape[1]:
sky.pop(-1 * (data.shape[1] - data1.shape[1]))
data = delete(data, -1 * (data.shape[1] - data1.shape[1]), 1)
elif data.shape[1] < data1.shape[1]:
data1 = delete(data1, -1 * (data1.shape[1] - data.shape[1]), 1)
else:
print "I don't know what to do!"
# UPDATE!!!
pyf.update(obj[0] + '_' + field + '_' + num + '.fits',
data1 + data, dataHDR, 1)
img.close()
oimg.close()
|
mit
| -8,820,437,071,594,307,000 | -6,905,920,317,403,745,000 | 32.132353 | 79 | 0.532623 | false |
Microsoft/PTVS
|
Python/Templates/Django/ProjectTemplates/Python/Web/DjangoProject/settings.py
|
4
|
3444
|
"""
Django settings for $safeprojectname$ project.
Based on by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$guid2$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application references
# https://docs.djangoproject.com/en/2.1/ref/settings/#std:setting-INSTALLED_APPS
INSTALLED_APPS = [
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
# Middleware framework
# https://docs.djangoproject.com/en/2.1/topics/http/middleware/
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '$safeprojectname$.urls'
# Template configuration
# https://docs.djangoproject.com/en/2.1/topics/templates/
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '$safeprojectname$.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
|
apache-2.0
| -5,725,653,498,355,290,000 | 2,675,797,260,026,082,000 | 29.477876 | 91 | 0.696574 | false |
cypod/arsenalsuite
|
cpp/lib/PyQt4/examples/dialogs/findfiles.py
|
20
|
7982
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
browseButton = self.createButton("&Browse...", self.browse)
findButton = self.createButton("&Find", self.find)
self.fileComboBox = self.createComboBox("*")
self.textComboBox = self.createComboBox()
self.directoryComboBox = self.createComboBox(QtCore.QDir.currentPath())
fileLabel = QtGui.QLabel("Named:")
textLabel = QtGui.QLabel("Containing text:")
directoryLabel = QtGui.QLabel("In directory:")
self.filesFoundLabel = QtGui.QLabel()
self.createFilesTable()
buttonsLayout = QtGui.QHBoxLayout()
buttonsLayout.addStretch()
buttonsLayout.addWidget(findButton)
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(fileLabel, 0, 0)
mainLayout.addWidget(self.fileComboBox, 0, 1, 1, 2)
mainLayout.addWidget(textLabel, 1, 0)
mainLayout.addWidget(self.textComboBox, 1, 1, 1, 2)
mainLayout.addWidget(directoryLabel, 2, 0)
mainLayout.addWidget(self.directoryComboBox, 2, 1)
mainLayout.addWidget(browseButton, 2, 2)
mainLayout.addWidget(self.filesTable, 3, 0, 1, 3)
mainLayout.addWidget(self.filesFoundLabel, 4, 0)
mainLayout.addLayout(buttonsLayout, 5, 0, 1, 3)
self.setLayout(mainLayout)
self.setWindowTitle("Find Files")
self.resize(700, 300)
def browse(self):
directory = QtGui.QFileDialog.getExistingDirectory(self, "Find Files",
QtCore.QDir.currentPath())
if directory:
if self.directoryComboBox.findText(directory) == -1:
self.directoryComboBox.addItem(directory)
self.directoryComboBox.setCurrentIndex(self.directoryComboBox.findText(directory))
@staticmethod
def updateComboBox(comboBox):
if comboBox.findText(comboBox.currentText()) == -1:
comboBox.addItem(comboBox.currentText())
def find(self):
self.filesTable.setRowCount(0)
fileName = self.fileComboBox.currentText()
text = self.textComboBox.currentText()
path = self.directoryComboBox.currentText()
self.updateComboBox(self.fileComboBox)
self.updateComboBox(self.textComboBox)
self.updateComboBox(self.directoryComboBox)
self.currentDir = QtCore.QDir(path)
if not fileName:
fileName = "*"
files = self.currentDir.entryList([fileName],
QtCore.QDir.Files | QtCore.QDir.NoSymLinks)
if text:
files = self.findFiles(files, text)
self.showFiles(files)
def findFiles(self, files, text):
progressDialog = QtGui.QProgressDialog(self)
progressDialog.setCancelButtonText("&Cancel")
progressDialog.setRange(0, files.count())
progressDialog.setWindowTitle("Find Files")
foundFiles = []
for i in range(files.count()):
progressDialog.setValue(i)
progressDialog.setLabelText("Searching file number %d of %d..." % (i, files.count()))
QtGui.qApp.processEvents()
if progressDialog.wasCanceled():
break
inFile = QtCore.QFile(self.currentDir.absoluteFilePath(files[i]))
if inFile.open(QtCore.QIODevice.ReadOnly):
stream = QtCore.QTextStream(inFile)
while not stream.atEnd():
if progressDialog.wasCanceled():
break
line = stream.readLine()
if text in line:
foundFiles.append(files[i])
break
progressDialog.close()
return foundFiles
def showFiles(self, files):
for fn in files:
file = QtCore.QFile(self.currentDir.absoluteFilePath(fn))
size = QtCore.QFileInfo(file).size()
fileNameItem = QtGui.QTableWidgetItem(fn)
fileNameItem.setFlags(fileNameItem.flags() ^ QtCore.Qt.ItemIsEditable)
sizeItem = QtGui.QTableWidgetItem("%d KB" % (int((size + 1023) / 1024)))
sizeItem.setTextAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight)
sizeItem.setFlags(sizeItem.flags() ^ QtCore.Qt.ItemIsEditable)
row = self.filesTable.rowCount()
self.filesTable.insertRow(row)
self.filesTable.setItem(row, 0, fileNameItem)
self.filesTable.setItem(row, 1, sizeItem)
self.filesFoundLabel.setText("%d file(s) found (Double click on a file to open it)" % len(files))
def createButton(self, text, member):
button = QtGui.QPushButton(text)
button.clicked.connect(member)
return button
def createComboBox(self, text=""):
comboBox = QtGui.QComboBox()
comboBox.setEditable(True)
comboBox.addItem(text)
comboBox.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
return comboBox
def createFilesTable(self):
self.filesTable = QtGui.QTableWidget(0, 2)
self.filesTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.filesTable.setHorizontalHeaderLabels(("File Name", "Size"))
self.filesTable.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.Stretch)
self.filesTable.verticalHeader().hide()
self.filesTable.setShowGrid(False)
self.filesTable.cellActivated.connect(self.openFileOfItem)
def openFileOfItem(self, row, column):
item = self.filesTable.item(row, 0)
QtGui.QDesktopServices.openUrl(QtCore.QUrl(self.currentDir.absoluteFilePath(item.text())))
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
gpl-2.0
| -5,877,463,892,688,587,000 | 6,950,030,664,961,699,000 | 37.009524 | 105 | 0.648584 | false |
gadybadger/mixpanel-celery
|
mixpanel/conf/settings.py
|
1
|
2592
|
"""Default configuration values and documentation"""
from django.conf import settings
"""
.. data:: MIXPANEL_API_TOKEN
API token for your Mixpanel account. This configures the Mixpanel account
where all of the action will be.
You can find this on the ``API Information`` tab on your
`mixpanel account page`_
.. _`mixpanel account page`: http://mixpanel.com/user/account/
"""
MIXPANEL_API_TOKEN = getattr(settings, 'MIXPANEL_API_TOKEN', None)
"""
.. data:: MIXPANEL_RETRY_DELAY
Number of seconds to wait before retrying an event-tracking request that
failed because of an invalid server response. These failed responses are
usually 502's or 504's because Mixpanel is under increased load.
Defaults to 5 minutes.
"""
MIXPANEL_RETRY_DELAY = getattr(settings, 'MIXPANEL_RETRY_DELAY', 60*5)
"""
.. data:: MIXPANEL_MAX_RETRIES
Number of retry attempts to make before raising an exception.
Defaults to 5 attempts.
"""
MIXPANEL_MAX_RETRIES = getattr(settings, 'MIXPANEL_MAX_RETRIES', 5)
"""
.. data:: MIXPANEL_API_TIMEOUT
Number of seconds to wait before timing out a request the mixpanel api
server. The default 30-second timeout can cause your job queue to become
swamped.
Defaults to 5 seconds.
"""
MIXPANEL_API_TIMEOUT = getattr(settings, 'MIXPANEL_API_TIMEOUT', 5)
"""
.. data:: MIXPANEL_API_SERVER
URL for the mixpanel api server. This probably shouldn't change.
"""
MIXPANEL_API_SERVER = getattr(settings, 'MIXPANEL_API_SERVER',
'api.mixpanel.com')
"""
.. data:: MIXPANEL_TRACKING_ENDPOINT
URL endpoint for registering events. defaults to ``/track/``
Mind the slashes.
"""
MIXPANEL_TRACKING_ENDPOINT = getattr(settings, 'MIXPANEL_TRACKING_ENDPOINT',
'/track/')
"""
.. data:: MIXPANEL_PEOPLE_TRACKING_ENDPOINT
URL endpoint for registering people data. defaults to ``/engage/``
Mind the slashes.
"""
MIXPANEL_PEOPLE_TRACKING_ENDPOINT = getattr(settings, 'MIXPANEL_PEOPLE_TRACKING_ENDPOINT',
'/engage/')
"""
.. data:: MIXPANEL_DATA_VARIABLE
Name of the http GET variable used for transferring property information
when registering events.
"""
MIXPANEL_DATA_VARIABLE = getattr(settings, 'MIXPANEL_DATA_VARIABLE',
'data')
"""
.. data:: MIXPANEL_FUNNEL_EVENT_ID
The event identifier that indicates that a funnel is being tracked and not
just a normal event.
"""
MIXPANEL_FUNNEL_EVENT_ID = getattr(settings, 'MIXPANEL_FUNNEL_EVENT_ID',
'mp_funnel')
|
bsd-3-clause
| -274,113,717,889,416,900 | 3,321,208,616,821,100,500 | 26.574468 | 90 | 0.682099 | false |
gsehub/edx-platform
|
common/djangoapps/student/tests/test_reset_password.py
|
8
|
17950
|
"""
Test the various password reset flows
"""
import json
import re
import unittest
import ddt
from django.conf import settings
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.cache import cache
from django.core import mail
from django.urls import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.http import int_to_base36
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory, RefreshTokenFactory
from mock import Mock, patch
from oauth2_provider import models as dot_models
from provider.oauth2 import models as dop_models
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.models import UserRetirementRequest
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, SYSTEM_MAINTENANCE_MSG, waffle
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from student.views import SETTING_CHANGE_INITIATED, password_reset, password_reset_confirm_wrapper
from util.testing import EventTestMixin
from .test_configuration_overrides import fake_get_value
@unittest.skipUnless(
settings.ROOT_URLCONF == "lms.urls",
"reset password tests should only run in LMS"
)
@ddt.ddt
class ResetPasswordTests(EventTestMixin, CacheIsolationTestCase):
"""
Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
ENABLED_CACHES = ['default']
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.management.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD_PREFIX
self.user_bad_passwd.save()
@patch('student.views.management.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""
Tests password reset behavior for user with password marked UNUSABLE_PASSWORD_PREFIX
"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.management.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""
Now test the exception cases with of reset_password called with invalid email.
"""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.management.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
"""
Try (and fail) resetting password 30 times in a row on an non-existant email address
"""
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': '[email protected]'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@ddt.data('plain_text', 'html')
def test_reset_password_email(self, body_type):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
good_req.site = Mock(domain='example.com')
dop_client = ClientFactory()
dop_access_token = AccessTokenFactory(user=self.user, client=dop_client)
RefreshTokenFactory(user=self.user, client=dop_client, access_token=dop_access_token)
dot_application = dot_factories.ApplicationFactory(user=self.user)
dot_access_token = dot_factories.AccessTokenFactory(user=self.user, application=dot_application)
dot_factories.RefreshTokenFactory(user=self.user, application=dot_application, access_token=dot_access_token)
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
self.assertFalse(dop_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dop_models.RefreshToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.RefreshToken.objects.filter(user=self.user).exists())
obj = json.loads(good_resp.content)
self.assertTrue(obj['success'])
self.assertIn('e-mailed you instructions for setting your password', obj['value'])
from_email = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
sent_message = mail.outbox[0]
bodies = {
'plain_text': sent_message.body,
'html': sent_message.alternatives[0][0],
}
body = bodies[body_type]
self.assertIn("Password reset", sent_message.subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", body)
self.assertEquals(sent_message.from_email, from_email)
self.assertEquals(len(sent_message.to), 1)
self.assertIn(self.user.email, sent_message.to)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
# Test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
self.assertIn('password_reset_confirm/', body)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', body).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.site = Mock(domain='example.com')
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), ('edX', 'edX'))
@ddt.unpack
def test_reset_password_email_site(self, site_name, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
with patch("django.conf.settings.SITE_NAME", site_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
req.site = Mock(domain='example.com')
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}"
reset_msg = reset_msg.format(site_name)
self.assertIn(reset_msg, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
@ddt.data('plain_text', 'html')
def test_reset_password_email_configuration_override(self, body_type):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.site = Mock(domain='example.com')
req.user = self.user
with patch('crum.get_current_request', return_value=req):
password_reset(req)
sent_message = mail.outbox[0]
bodies = {
'plain_text': sent_message.body,
'html': sent_message.alternatives[0][0],
}
body = bodies[body_type]
reset_msg = "you requested a password reset for your user account at {}".format(fake_get_value('PLATFORM_NAME'))
self.assertIn(reset_msg, body)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
self.assertEqual(sent_message.from_email, "[email protected]")
@ddt.data(
('invalidUid', 'invalid_token'),
(None, 'invalid_token'),
('invalidUid', None),
)
@ddt.unpack
def test_reset_password_bad_token(self, uidb36, token):
"""
Tests bad token and uidb36 in password reset
"""
if uidb36 is None:
uidb36 = self.uidb36
if token is None:
token = self.token
bad_request = self.request_factory.get(
reverse(
"password_reset_confirm",
kwargs={"uidb36": uidb36, "token": token}
)
)
password_reset_confirm_wrapper(bad_request, uidb36, token)
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
def test_reset_password_good_token(self):
"""
Tests good token and uidb36 in password reset
"""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
def test_password_reset_fail(self):
"""
Tests that if we provide mismatched passwords, user is not marked as active.
"""
self.assertFalse(self.user.is_active)
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': 'password1', 'new_password2': 'password2'}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with mismatching passwords.
resp = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
# Verify the response status code is: 200 with password reset fail and also verify that
# the user is not marked as active.
self.assertEqual(resp.status_code, 200)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
def test_password_reset_retired_user_fail(self):
"""
Tests that if a retired user attempts to reset their password, it fails.
"""
self.assertFalse(self.user.is_active)
# Retire the user.
UserRetirementRequest.create_retirement_request(self.user)
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
reset_req = self.request_factory.get(url)
resp = password_reset_confirm_wrapper(reset_req, self.uidb36, self.token)
# Verify the response status code is: 200 with password reset fail and also verify that
# the user is not marked as active.
self.assertEqual(resp.status_code, 200)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
def test_password_reset_prevent_auth_user_writes(self):
with waffle().override(PREVENT_AUTH_USER_WRITES, True):
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
for request in [self.request_factory.get(url), self.request_factory.post(url)]:
response = password_reset_confirm_wrapper(request, self.uidb36, self.token)
assert response.context_data['err_msg'] == SYSTEM_MAINTENANCE_MSG
self.user.refresh_from_db()
assert not self.user.is_active
@override_settings(PASSWORD_MIN_LENGTH=2)
@override_settings(PASSWORD_MAX_LENGTH=10)
@ddt.data(
{
'password': '1',
'error_message': 'Enter a password with at least 2 characters.',
},
{
'password': '01234567891',
'error_message': 'Enter a password with at most 10 characters.',
}
)
def test_password_reset_with_invalid_length(self, password_dict):
"""
Tests that if we provide password characters less then PASSWORD_MIN_LENGTH,
or more than PASSWORD_MAX_LENGTH, password reset will fail with error message.
"""
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': password_dict['password'], 'new_password2': password_dict['password']}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with minimum/maximum passwords characters.
response = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
self.assertEqual(response.context_data['err_msg'], password_dict['error_message'])
@patch('student.views.management.password_reset_confirm')
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
def test_reset_password_good_token_configuration_override(self, reset_confirm):
"""
Tests password reset confirmation page for site configuration override.
"""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['extra_context']['platform_name'], 'Fake University')
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data('Crazy Awesome Site', 'edX')
def test_reset_password_email_subject(self, platform_name, send_email):
"""
Tests that the right platform name is included in
the reset password email subject
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
req.site = Mock(domain='example.com')
password_reset(req)
subj, _, _, _ = send_email.call_args[0]
self.assertIn(platform_name, subj)
|
agpl-3.0
| -3,813,667,415,754,479,000 | 1,168,716,964,056,514,600 | 42.045564 | 120 | 0.643343 | false |
mxamin/youtube-dl
|
youtube_dl/extractor/fivemin.py
|
79
|
1917
|
from __future__ import unicode_literals
from .common import InfoExtractor
class FiveMinIE(InfoExtractor):
IE_NAME = '5min'
_VALID_URL = r'(?:5min:|https?://(?:[^/]*?5min\.com/|delivery\.vidible\.tv/aol)(?:(?:Scripts/PlayerSeed\.js|playerseed/?)?\?.*?playList=)?)(?P<id>\d+)'
_TESTS = [
{
# From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/
'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791',
'md5': '4f7b0b79bf1a470e5004f7112385941d',
'info_dict': {
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
'description': 'iPad mini with Retina Display review',
'duration': 177,
'uploader': 'engadget',
'upload_date': '20131115',
'timestamp': 1384515288,
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
# From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247
'url': '5min:518086247',
'md5': 'e539a9dd682c288ef5a498898009f69e',
'info_dict': {
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
'duration': 184,
},
'skip': 'no longer available',
},
{
'url': 'http://embed.5min.com/518726732/',
'only_matching': True,
},
{
'url': 'http://delivery.vidible.tv/aol?playList=518013791',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result('aol-video:%s' % video_id)
|
unlicense
| -2,171,718,483,590,350,800 | 8,334,218,842,241,404,000 | 34.5 | 155 | 0.491914 | false |
thomasrogers03/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/xml.py
|
187
|
2044
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for XML files."""
from __future__ import absolute_import
from xml.parsers import expat
class XMLChecker(object):
"""Processes XML lines for checking style."""
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
parser = expat.ParserCreate()
try:
for line in lines:
parser.Parse(line)
parser.Parse('\n')
parser.Parse('', True)
except expat.ExpatError, error:
self._handle_style_error(error.lineno, 'xml/syntax', 5, expat.ErrorString(error.code))
|
bsd-3-clause
| -2,694,662,881,963,628,000 | -5,738,437,660,844,200,000 | 44.422222 | 98 | 0.721624 | false |
Maksimall89/houmpack
|
lcdmy.py
|
1
|
4270
|
#!/usr/bin/python
# --------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# lcd_16x2.py
# 20x4 LCD Test Script with
# backlight control and text justification
#
# Author : Matt Hawkins
# Date : 06/04/2015
#
# http://www.raspberrypi-spy.co.uk/
#
# --------------------------------------
# The wiring for the LCD is as follows:
# 1 : GND
# 2 : 5V
# 3 : Contrast (0-5V)*
# 4 : RS (Register Select)
# 5 : R/W (Read Write) - GROUND THIS PIN
# 6 : Enable or Strobe
# 7 : Data Bit 0 - NOT USED
# 8 : Data Bit 1 - NOT USED
# 9 : Data Bit 2 - NOT USED
# 10: Data Bit 3 - NOT USED
# 11: Data Bit 4
# 12: Data Bit 5
# 13: Data Bit 6
# 14: Data Bit 7
# 15: LCD Backlight +5V**
# 16: LCD Backlight GND
# import
import RPi.GPIO as GPIO
import time
# Define GPIO to LCD mapping
LCD_RS = 7
LCD_E = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18
LED_ON = 15
# Define some device constants
LCD_WIDTH = 20 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
def work(messange, line, style):
if line == 1:
string(messange, LCD_LINE_1, style)
elif line == 2:
string(messange, LCD_LINE_2, style)
elif line == 3:
string(messange, LCD_LINE_3, style)
elif line == 4:
string(messange, LCD_LINE_4, style)
def init():
# Main program block
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT) # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
# Initialise display
byte(0x33, LCD_CMD) # 110011 Initialise
byte(0x32, LCD_CMD) # 110010 Initialise
byte(0x06, LCD_CMD) # 000110 Cursor move direction
byte(0x0C, LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
byte(0x28, LCD_CMD) # 101000 Data length, number of lines, font size
byte(0x01, LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(LCD_RS, mode) # RS
# High bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits & 0x10 == 0x10:
GPIO.output(LCD_D4, True)
if bits & 0x20 == 0x20:
GPIO.output(LCD_D5, True)
if bits & 0x40 == 0x40:
GPIO.output(LCD_D6, True)
if bits & 0x80 == 0x80:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
toggle_enable()
# Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits & 0x01 == 0x01:
GPIO.output(LCD_D4, True)
if bits & 0x02 == 0x02:
GPIO.output(LCD_D5, True)
if bits & 0x04 == 0x04:
GPIO.output(LCD_D6, True)
if bits & 0x08 == 0x08:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
toggle_enable()
def toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def string(message, line, style):
# Send string to display
# style=1 Left justified
# style=2 Centred
# style=3 Right justified
if style == "left":
message = message.ljust(LCD_WIDTH, " ")
elif style == "center":
message = message.center(LCD_WIDTH, " ")
elif style == "right":
message = message.rjust(LCD_WIDTH, " ")
else:
message = message.ljust(LCD_WIDTH, " ")
byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
byte(ord(message[i]), LCD_CHR)
def clear():
# Blank display
byte(0x01, LCD_CMD)
|
gpl-3.0
| -8,707,150,332,237,235,000 | -1,303,474,173,749,116,400 | 23.682081 | 73 | 0.572131 | false |
talbrecht/pism_pik06
|
doc/site-packages/sphinxcontrib/bibtex/latex_codec.py
|
2
|
36564
|
# -*- coding: utf-8 -*-
"""
Character translation utilities for LaTeX-formatted text
========================================================
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
Copyright (c) 2003, 2008 David Eppstein
Copyright (c) 2011 Matthias C. M. Troffaes
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import codecs
import collections
import re
from sphinxcontrib.bibtex import latex_lexer
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(find_latex)
# returns the codec search function
# this is used if latex_codec.py were to be placed in stdlib
def getregentry():
"""Encodings module API."""
return find_latex('latex')
class LatexUnicodeTable:
"""Tabulates a translation between latex and unicode."""
def __init__(self, lexer):
self.lexer = lexer
self.unicode_map = {}
self.max_length = 0
self.latex_map = {}
self.register_all()
def register_all(self):
# TODO complete this list
# register special symbols
self.register(u'\N{EN DASH}', b'--')
self.register(u'\N{EN DASH}', b'\\textendash')
self.register(u'\N{EM DASH}', b'---')
self.register(u'\N{EM DASH}', b'\\textemdash')
self.register(u'\N{LEFT SINGLE QUOTATION MARK}', b'`', decode=False)
self.register(u'\N{RIGHT SINGLE QUOTATION MARK}', b"'", decode=False)
self.register(u'\N{LEFT DOUBLE QUOTATION MARK}', b'``')
self.register(u'\N{RIGHT DOUBLE QUOTATION MARK}', b"''")
self.register(u'\N{DAGGER}', b'\\dag')
self.register(u'\N{DOUBLE DAGGER}', b'\\ddag')
self.register(u'\N{BULLET}', b'\\bullet', mode='math')
self.register(u'\N{BULLET}', b'\\textbullet', package='textcomp')
self.register(u'\N{NUMBER SIGN}', b'\\#')
self.register(u'\N{AMPERSAND}', b'\\&')
self.register(u'\N{NO-BREAK SPACE}', b'~')
self.register(u'\N{INVERTED EXCLAMATION MARK}', b'!`')
self.register(u'\N{CENT SIGN}', b'\\not{c}')
self.register(u'\N{POUND SIGN}', b'\\pounds')
self.register(u'\N{POUND SIGN}', b'\\textsterling', package='textcomp')
self.register(u'\N{SECTION SIGN}', b'\\S')
self.register(u'\N{DIAERESIS}', b'\\"{}')
self.register(u'\N{NOT SIGN}', b'\\neg')
self.register(u'\N{SOFT HYPHEN}', b'\\-')
self.register(u'\N{MACRON}', b'\\={}')
self.register(u'\N{DEGREE SIGN}', b'^\\circ', mode='math')
self.register(u'\N{DEGREE SIGN}', b'\\textdegree', package='textcomp')
self.register(u'\N{PLUS-MINUS SIGN}', b'\\pm', mode='math')
self.register(u'\N{PLUS-MINUS SIGN}', b'\\textpm', package='textcomp')
self.register(u'\N{SUPERSCRIPT TWO}', b'^2', mode='math')
self.register(u'\N{SUPERSCRIPT TWO}', b'\\texttwosuperior', package='textcomp')
self.register(u'\N{SUPERSCRIPT THREE}', b'^3', mode='math')
self.register(u'\N{SUPERSCRIPT THREE}', b'\\textthreesuperior', package='textcomp')
self.register(u'\N{ACUTE ACCENT}', b"\\'{}")
self.register(u'\N{MICRO SIGN}', b'\\mu', mode='math')
self.register(u'\N{MICRO SIGN}', b'\\micro', package='gensymb')
self.register(u'\N{PILCROW SIGN}', b'\\P')
self.register(u'\N{MIDDLE DOT}', b'\\cdot', mode='math')
self.register(u'\N{MIDDLE DOT}', b'\\textperiodcentered', package='textcomp')
self.register(u'\N{CEDILLA}', b'\\c{}')
self.register(u'\N{SUPERSCRIPT ONE}', b'^1', mode='math')
self.register(u'\N{SUPERSCRIPT ONE}', b'\\textonesuperior', package='textcomp')
self.register(u'\N{INVERTED QUESTION MARK}', b'?`')
self.register(u'\N{LATIN CAPITAL LETTER A WITH GRAVE}', b'\\`A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH CIRCUMFLEX}', b'\\^A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH TILDE}', b'\\~A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}', b'\\"A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', b'\\AA')
self.register(u'\N{LATIN CAPITAL LETTER AE}', b'\\AE')
self.register(u'\N{LATIN CAPITAL LETTER C WITH CEDILLA}', b'\\c C')
self.register(u'\N{LATIN CAPITAL LETTER E WITH GRAVE}', b'\\`E')
self.register(u'\N{LATIN CAPITAL LETTER E WITH ACUTE}', b"\\'E")
self.register(u'\N{LATIN CAPITAL LETTER E WITH CIRCUMFLEX}', b'\\^E')
self.register(u'\N{LATIN CAPITAL LETTER E WITH DIAERESIS}', b'\\"E')
self.register(u'\N{LATIN CAPITAL LETTER I WITH GRAVE}', b'\\`I')
self.register(u'\N{LATIN CAPITAL LETTER I WITH CIRCUMFLEX}', b'\\^I')
self.register(u'\N{LATIN CAPITAL LETTER I WITH DIAERESIS}', b'\\"I')
self.register(u'\N{LATIN CAPITAL LETTER N WITH TILDE}', b'\\~N')
self.register(u'\N{LATIN CAPITAL LETTER O WITH GRAVE}', b'\\`O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH ACUTE}', b"\\'O")
self.register(u'\N{LATIN CAPITAL LETTER O WITH CIRCUMFLEX}', b'\\^O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH TILDE}', b'\\~O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH DIAERESIS}', b'\\"O')
self.register(u'\N{MULTIPLICATION SIGN}', b'\\times', mode='math')
self.register(u'\N{LATIN CAPITAL LETTER O WITH STROKE}', b'\\O')
self.register(u'\N{LATIN CAPITAL LETTER U WITH GRAVE}', b'\\`U')
self.register(u'\N{LATIN CAPITAL LETTER U WITH ACUTE}', b"\\'U")
self.register(u'\N{LATIN CAPITAL LETTER U WITH CIRCUMFLEX}', b'\\^U')
self.register(u'\N{LATIN CAPITAL LETTER U WITH DIAERESIS}', b'\\"U')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH ACUTE}', b"\\'Y")
self.register(u'\N{LATIN SMALL LETTER SHARP S}', b'\\ss')
self.register(u'\N{LATIN SMALL LETTER A WITH GRAVE}', b'\\`a')
self.register(u'\N{LATIN SMALL LETTER A WITH ACUTE}', b"\\'a")
self.register(u'\N{LATIN SMALL LETTER A WITH CIRCUMFLEX}', b'\\^a')
self.register(u'\N{LATIN SMALL LETTER A WITH TILDE}', b'\\~a')
self.register(u'\N{LATIN SMALL LETTER A WITH DIAERESIS}', b'\\"a')
self.register(u'\N{LATIN SMALL LETTER A WITH RING ABOVE}', b'\\aa')
self.register(u'\N{LATIN SMALL LETTER AE}', b'\\ae')
self.register(u'\N{LATIN SMALL LETTER C WITH CEDILLA}', b'\\c c')
self.register(u'\N{LATIN SMALL LETTER E WITH GRAVE}', b'\\`e')
self.register(u'\N{LATIN SMALL LETTER E WITH ACUTE}', b"\\'e")
self.register(u'\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}', b'\\^e')
self.register(u'\N{LATIN SMALL LETTER E WITH DIAERESIS}', b'\\"e')
self.register(u'\N{LATIN SMALL LETTER I WITH GRAVE}', b'\\`\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH GRAVE}', b'\\`i')
self.register(u'\N{LATIN SMALL LETTER I WITH ACUTE}', b"\\'\\i")
self.register(u'\N{LATIN SMALL LETTER I WITH ACUTE}', b"\\'i")
self.register(u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', b'\\^\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', b'\\^i')
self.register(u'\N{LATIN SMALL LETTER I WITH DIAERESIS}', b'\\"\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH DIAERESIS}', b'\\"i')
self.register(u'\N{LATIN SMALL LETTER N WITH TILDE}', b'\\~n')
self.register(u'\N{LATIN SMALL LETTER O WITH GRAVE}', b'\\`o')
self.register(u'\N{LATIN SMALL LETTER O WITH ACUTE}', b"\\'o")
self.register(u'\N{LATIN SMALL LETTER O WITH CIRCUMFLEX}', b'\\^o')
self.register(u'\N{LATIN SMALL LETTER O WITH TILDE}', b'\\~o')
self.register(u'\N{LATIN SMALL LETTER O WITH DIAERESIS}', b'\\"o')
self.register(u'\N{DIVISION SIGN}', b'\\div', mode='math')
self.register(u'\N{LATIN SMALL LETTER O WITH STROKE}', b'\\o')
self.register(u'\N{LATIN SMALL LETTER U WITH GRAVE}', b'\\`u')
self.register(u'\N{LATIN SMALL LETTER U WITH ACUTE}', b"\\'u")
self.register(u'\N{LATIN SMALL LETTER U WITH CIRCUMFLEX}', b'\\^u')
self.register(u'\N{LATIN SMALL LETTER U WITH DIAERESIS}', b'\\"u')
self.register(u'\N{LATIN SMALL LETTER Y WITH ACUTE}', b"\\'y")
self.register(u'\N{LATIN SMALL LETTER Y WITH DIAERESIS}', b'\\"y')
self.register(u'\N{LATIN CAPITAL LETTER A WITH MACRON}', b'\\=A')
self.register(u'\N{LATIN SMALL LETTER A WITH MACRON}', b'\\=a')
self.register(u'\N{LATIN CAPITAL LETTER A WITH BREVE}', b'\\u A')
self.register(u'\N{LATIN SMALL LETTER A WITH BREVE}', b'\\u a')
self.register(u'\N{LATIN CAPITAL LETTER A WITH OGONEK}', b'\\c A')
self.register(u'\N{LATIN SMALL LETTER A WITH OGONEK}', b'\\c a')
self.register(u'\N{LATIN CAPITAL LETTER C WITH ACUTE}', b"\\'C")
self.register(u'\N{LATIN SMALL LETTER C WITH ACUTE}', b"\\'c")
self.register(u'\N{LATIN CAPITAL LETTER C WITH CIRCUMFLEX}', b'\\^C')
self.register(u'\N{LATIN SMALL LETTER C WITH CIRCUMFLEX}', b'\\^c')
self.register(u'\N{LATIN CAPITAL LETTER C WITH DOT ABOVE}', b'\\.C')
self.register(u'\N{LATIN SMALL LETTER C WITH DOT ABOVE}', b'\\.c')
self.register(u'\N{LATIN CAPITAL LETTER C WITH CARON}', b'\\v C')
self.register(u'\N{LATIN SMALL LETTER C WITH CARON}', b'\\v c')
self.register(u'\N{LATIN CAPITAL LETTER D WITH CARON}', b'\\v D')
self.register(u'\N{LATIN SMALL LETTER D WITH CARON}', b'\\v d')
self.register(u'\N{LATIN CAPITAL LETTER E WITH MACRON}', b'\\=E')
self.register(u'\N{LATIN SMALL LETTER E WITH MACRON}', b'\\=e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH BREVE}', b'\\u E')
self.register(u'\N{LATIN SMALL LETTER E WITH BREVE}', b'\\u e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH DOT ABOVE}', b'\\.E')
self.register(u'\N{LATIN SMALL LETTER E WITH DOT ABOVE}', b'\\.e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH OGONEK}', b'\\c E')
self.register(u'\N{LATIN SMALL LETTER E WITH OGONEK}', b'\\c e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH CARON}', b'\\v E')
self.register(u'\N{LATIN SMALL LETTER E WITH CARON}', b'\\v e')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CIRCUMFLEX}', b'\\^G')
self.register(u'\N{LATIN SMALL LETTER G WITH CIRCUMFLEX}', b'\\^g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH BREVE}', b'\\u G')
self.register(u'\N{LATIN SMALL LETTER G WITH BREVE}', b'\\u g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH DOT ABOVE}', b'\\.G')
self.register(u'\N{LATIN SMALL LETTER G WITH DOT ABOVE}', b'\\.g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CEDILLA}', b'\\c G')
self.register(u'\N{LATIN SMALL LETTER G WITH CEDILLA}', b'\\c g')
self.register(u'\N{LATIN CAPITAL LETTER H WITH CIRCUMFLEX}', b'\\^H')
self.register(u'\N{LATIN SMALL LETTER H WITH CIRCUMFLEX}', b'\\^h')
self.register(u'\N{LATIN CAPITAL LETTER I WITH TILDE}', b'\\~I')
self.register(u'\N{LATIN SMALL LETTER I WITH TILDE}', b'\\~\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH TILDE}', b'\\~i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH MACRON}', b'\\=I')
self.register(u'\N{LATIN SMALL LETTER I WITH MACRON}', b'\\=\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH MACRON}', b'\\=i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH BREVE}', b'\\u I')
self.register(u'\N{LATIN SMALL LETTER I WITH BREVE}', b'\\u\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH BREVE}', b'\\u i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH OGONEK}', b'\\c I')
self.register(u'\N{LATIN SMALL LETTER I WITH OGONEK}', b'\\c i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}', b'\\.I')
self.register(u'\N{LATIN SMALL LETTER DOTLESS I}', b'\\i')
self.register(u'\N{LATIN CAPITAL LIGATURE IJ}', b'IJ', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE IJ}', b'ij', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER J WITH CIRCUMFLEX}', b'\\^J')
self.register(u'\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', b'\\^\\j')
self.register(u'\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', b'\\^j')
self.register(u'\N{LATIN CAPITAL LETTER K WITH CEDILLA}', b'\\c K')
self.register(u'\N{LATIN SMALL LETTER K WITH CEDILLA}', b'\\c k')
self.register(u'\N{LATIN CAPITAL LETTER L WITH ACUTE}', b"\\'L")
self.register(u'\N{LATIN SMALL LETTER L WITH ACUTE}', b"\\'l")
self.register(u'\N{LATIN CAPITAL LETTER L WITH CEDILLA}', b'\\c L')
self.register(u'\N{LATIN SMALL LETTER L WITH CEDILLA}', b'\\c l')
self.register(u'\N{LATIN CAPITAL LETTER L WITH CARON}', b'\\v L')
self.register(u'\N{LATIN SMALL LETTER L WITH CARON}', b'\\v l')
self.register(u'\N{LATIN CAPITAL LETTER L WITH STROKE}', b'\\L')
self.register(u'\N{LATIN SMALL LETTER L WITH STROKE}', b'\\l')
self.register(u'\N{LATIN CAPITAL LETTER N WITH ACUTE}', b"\\'N")
self.register(u'\N{LATIN SMALL LETTER N WITH ACUTE}', b"\\'n")
self.register(u'\N{LATIN CAPITAL LETTER N WITH CEDILLA}', b'\\c N')
self.register(u'\N{LATIN SMALL LETTER N WITH CEDILLA}', b'\\c n')
self.register(u'\N{LATIN CAPITAL LETTER N WITH CARON}', b'\\v N')
self.register(u'\N{LATIN SMALL LETTER N WITH CARON}', b'\\v n')
self.register(u'\N{LATIN CAPITAL LETTER O WITH MACRON}', b'\\=O')
self.register(u'\N{LATIN SMALL LETTER O WITH MACRON}', b'\\=o')
self.register(u'\N{LATIN CAPITAL LETTER O WITH BREVE}', b'\\u O')
self.register(u'\N{LATIN SMALL LETTER O WITH BREVE}', b'\\u o')
self.register(u'\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}', b'\\H O')
self.register(u'\N{LATIN SMALL LETTER O WITH DOUBLE ACUTE}', b'\\H o')
self.register(u'\N{LATIN CAPITAL LIGATURE OE}', b'\\OE')
self.register(u'\N{LATIN SMALL LIGATURE OE}', b'\\oe')
self.register(u'\N{LATIN CAPITAL LETTER R WITH ACUTE}', b"\\'R")
self.register(u'\N{LATIN SMALL LETTER R WITH ACUTE}', b"\\'r")
self.register(u'\N{LATIN CAPITAL LETTER R WITH CEDILLA}', b'\\c R')
self.register(u'\N{LATIN SMALL LETTER R WITH CEDILLA}', b'\\c r')
self.register(u'\N{LATIN CAPITAL LETTER R WITH CARON}', b'\\v R')
self.register(u'\N{LATIN SMALL LETTER R WITH CARON}', b'\\v r')
self.register(u'\N{LATIN CAPITAL LETTER S WITH ACUTE}', b"\\'S")
self.register(u'\N{LATIN SMALL LETTER S WITH ACUTE}', b"\\'s")
self.register(u'\N{LATIN CAPITAL LETTER S WITH CIRCUMFLEX}', b'\\^S')
self.register(u'\N{LATIN SMALL LETTER S WITH CIRCUMFLEX}', b'\\^s')
self.register(u'\N{LATIN CAPITAL LETTER S WITH CEDILLA}', b'\\c S')
self.register(u'\N{LATIN SMALL LETTER S WITH CEDILLA}', b'\\c s')
self.register(u'\N{LATIN CAPITAL LETTER S WITH CARON}', b'\\v S')
self.register(u'\N{LATIN SMALL LETTER S WITH CARON}', b'\\v s')
self.register(u'\N{LATIN CAPITAL LETTER T WITH CEDILLA}', b'\\c T')
self.register(u'\N{LATIN SMALL LETTER T WITH CEDILLA}', b'\\c t')
self.register(u'\N{LATIN CAPITAL LETTER T WITH CARON}', b'\\v T')
self.register(u'\N{LATIN SMALL LETTER T WITH CARON}', b'\\v t')
self.register(u'\N{LATIN CAPITAL LETTER U WITH TILDE}', b'\\~U')
self.register(u'\N{LATIN SMALL LETTER U WITH TILDE}', b'\\~u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH MACRON}', b'\\=U')
self.register(u'\N{LATIN SMALL LETTER U WITH MACRON}', b'\\=u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH BREVE}', b'\\u U')
self.register(u'\N{LATIN SMALL LETTER U WITH BREVE}', b'\\u u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH RING ABOVE}', b'\\r U')
self.register(u'\N{LATIN SMALL LETTER U WITH RING ABOVE}', b'\\r u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}', b'\\H U')
self.register(u'\N{LATIN SMALL LETTER U WITH DOUBLE ACUTE}', b'\\H u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH OGONEK}', b'\\c U')
self.register(u'\N{LATIN SMALL LETTER U WITH OGONEK}', b'\\c u')
self.register(u'\N{LATIN CAPITAL LETTER W WITH CIRCUMFLEX}', b'\\^W')
self.register(u'\N{LATIN SMALL LETTER W WITH CIRCUMFLEX}', b'\\^w')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH CIRCUMFLEX}', b'\\^Y')
self.register(u'\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}', b'\\^y')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}', b'\\"Y')
self.register(u'\N{LATIN CAPITAL LETTER Z WITH ACUTE}', b"\\'Z")
self.register(u'\N{LATIN SMALL LETTER Z WITH ACUTE}', b"\\'Z")
self.register(u'\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}', b'\\.Z')
self.register(u'\N{LATIN SMALL LETTER Z WITH DOT ABOVE}', b'\\.Z')
self.register(u'\N{LATIN CAPITAL LETTER Z WITH CARON}', b'\\v Z')
self.register(u'\N{LATIN SMALL LETTER Z WITH CARON}', b'\\v z')
self.register(u'\N{LATIN CAPITAL LETTER DZ WITH CARON}', b'D\\v Z')
self.register(u'\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON}', b'D\\v z')
self.register(u'\N{LATIN SMALL LETTER DZ WITH CARON}', b'd\\v z')
self.register(u'\N{LATIN CAPITAL LETTER LJ}', b'LJ', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}', b'Lj', decode=False)
self.register(u'\N{LATIN SMALL LETTER LJ}', b'lj', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER NJ}', b'NJ', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER N WITH SMALL LETTER J}', b'Nj', decode=False)
self.register(u'\N{LATIN SMALL LETTER NJ}', b'nj', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER A WITH CARON}', b'\\v A')
self.register(u'\N{LATIN SMALL LETTER A WITH CARON}', b'\\v a')
self.register(u'\N{LATIN CAPITAL LETTER I WITH CARON}', b'\\v I')
self.register(u'\N{LATIN SMALL LETTER I WITH CARON}', b'\\v\\i')
self.register(u'\N{LATIN CAPITAL LETTER O WITH CARON}', b'\\v O')
self.register(u'\N{LATIN SMALL LETTER O WITH CARON}', b'\\v o')
self.register(u'\N{LATIN CAPITAL LETTER U WITH CARON}', b'\\v U')
self.register(u'\N{LATIN SMALL LETTER U WITH CARON}', b'\\v u')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CARON}', b'\\v G')
self.register(u'\N{LATIN SMALL LETTER G WITH CARON}', b'\\v g')
self.register(u'\N{LATIN CAPITAL LETTER K WITH CARON}', b'\\v K')
self.register(u'\N{LATIN SMALL LETTER K WITH CARON}', b'\\v k')
self.register(u'\N{LATIN CAPITAL LETTER O WITH OGONEK}', b'\\c O')
self.register(u'\N{LATIN SMALL LETTER O WITH OGONEK}', b'\\c o')
self.register(u'\N{LATIN SMALL LETTER J WITH CARON}', b'\\v\\j')
self.register(u'\N{LATIN CAPITAL LETTER DZ}', b'DZ')
self.register(u'\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z}', b'Dz', decode=False)
self.register(u'\N{LATIN SMALL LETTER DZ}', b'dz', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER G WITH ACUTE}', b"\\'G")
self.register(u'\N{LATIN SMALL LETTER G WITH ACUTE}', b"\\'g")
self.register(u'\N{LATIN CAPITAL LETTER AE WITH ACUTE}', b"\\'\\AE")
self.register(u'\N{LATIN SMALL LETTER AE WITH ACUTE}', b"\\'\\ae")
self.register(u'\N{LATIN CAPITAL LETTER O WITH STROKE AND ACUTE}', b"\\'\\O")
self.register(u'\N{LATIN SMALL LETTER O WITH STROKE AND ACUTE}', b"\\'\\o")
self.register(u'\N{PARTIAL DIFFERENTIAL}', b'\\partial', mode='math')
self.register(u'\N{N-ARY PRODUCT}', b'\\prod', mode='math')
self.register(u'\N{N-ARY SUMMATION}', b'\\sum', mode='math')
self.register(u'\N{SQUARE ROOT}', b'\\surd', mode='math')
self.register(u'\N{INFINITY}', b'\\infty', mode='math')
self.register(u'\N{INTEGRAL}', b'\\int', mode='math')
self.register(u'\N{INTERSECTION}', b'\\cap', mode='math')
self.register(u'\N{UNION}', b'\\cup', mode='math')
self.register(u'\N{RIGHTWARDS ARROW}', b'\\rightarrow', mode='math')
self.register(u'\N{RIGHTWARDS DOUBLE ARROW}', b'\\Rightarrow', mode='math')
self.register(u'\N{LEFTWARDS ARROW}', b'\\leftarrow', mode='math')
self.register(u'\N{LEFTWARDS DOUBLE ARROW}', b'\\Leftarrow', mode='math')
self.register(u'\N{LOGICAL OR}', b'\\vee', mode='math')
self.register(u'\N{LOGICAL AND}', b'\\wedge', mode='math')
self.register(u'\N{ALMOST EQUAL TO}', b'\\approx', mode='math')
self.register(u'\N{NOT EQUAL TO}', b'\\neq', mode='math')
self.register(u'\N{LESS-THAN OR EQUAL TO}', b'\\leq', mode='math')
self.register(u'\N{GREATER-THAN OR EQUAL TO}', b'\\geq', mode='math')
self.register(u'\N{MODIFIER LETTER CIRCUMFLEX ACCENT}', b'\\^{}')
self.register(u'\N{CARON}', b'\\v{}')
self.register(u'\N{BREVE}', b'\\u{}')
self.register(u'\N{DOT ABOVE}', b'\\.{}')
self.register(u'\N{RING ABOVE}', b'\\r{}')
self.register(u'\N{OGONEK}', b'\\c{}')
self.register(u'\N{SMALL TILDE}', b'\\~{}')
self.register(u'\N{DOUBLE ACUTE ACCENT}', b'\\H{}')
self.register(u'\N{LATIN SMALL LIGATURE FI}', b'fi', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE FL}', b'fl', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE FF}', b'ff', decode=False)
self.register(u'\N{GREEK SMALL LETTER ALPHA}', b'\\alpha', mode='math')
self.register(u'\N{GREEK SMALL LETTER BETA}', b'\\beta', mode='math')
self.register(u'\N{GREEK SMALL LETTER GAMMA}', b'\\gamma', mode='math')
self.register(u'\N{GREEK SMALL LETTER DELTA}', b'\\delta', mode='math')
self.register(u'\N{GREEK SMALL LETTER EPSILON}', b'\\epsilon', mode='math')
self.register(u'\N{GREEK SMALL LETTER ZETA}', b'\\zeta', mode='math')
self.register(u'\N{GREEK SMALL LETTER ETA}', b'\\eta', mode='math')
self.register(u'\N{GREEK SMALL LETTER THETA}', b'\\theta', mode='math')
self.register(u'\N{GREEK SMALL LETTER IOTA}', b'\\iota', mode='math')
self.register(u'\N{GREEK SMALL LETTER KAPPA}', b'\\kappa', mode='math')
self.register(u'\N{GREEK SMALL LETTER LAMDA}', b'\\lambda', mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK SMALL LETTER MU}', b'\\mu', mode='math')
self.register(u'\N{GREEK SMALL LETTER NU}', b'\\nu', mode='math')
self.register(u'\N{GREEK SMALL LETTER XI}', b'\\xi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMICRON}', b'\\omicron', mode='math')
self.register(u'\N{GREEK SMALL LETTER PI}', b'\\pi', mode='math')
self.register(u'\N{GREEK SMALL LETTER RHO}', b'\\rho', mode='math')
self.register(u'\N{GREEK SMALL LETTER SIGMA}', b'\\sigma', mode='math')
self.register(u'\N{GREEK SMALL LETTER TAU}', b'\\tau', mode='math')
self.register(u'\N{GREEK SMALL LETTER UPSILON}', b'\\upsilon', mode='math')
self.register(u'\N{GREEK SMALL LETTER PHI}', b'\\phi', mode='math')
self.register(u'\N{GREEK SMALL LETTER CHI}', b'\\chi', mode='math')
self.register(u'\N{GREEK SMALL LETTER PSI}', b'\\psi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMEGA}', b'\\omega', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ALPHA}', b'\\Alpha', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER BETA}', b'\\Beta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER GAMMA}', b'\\Gamma', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER DELTA}', b'\\Delta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER EPSILON}', b'\\Epsilon', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ZETA}', b'\\Zeta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ETA}', b'\\Eta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER THETA}', b'\\Theta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER IOTA}', b'\\Iota', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER KAPPA}', b'\\Kappa', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER LAMDA}', b'\\Lambda', mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK CAPITAL LETTER MU}', b'\\Mu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER NU}', b'\\Nu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER XI}', b'\\Xi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER OMICRON}', b'\\Omicron', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PI}', b'\\Pi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER RHO}', b'\\Rho', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER SIGMA}', b'\\Sigma', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER TAU}', b'\\Tau', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER UPSILON}', b'\\Upsilon', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PHI}', b'\\Phi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER CHI}', b'\\Chi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PSI}', b'\\Psi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER OMEGA}', b'\\Omega', mode='math')
self.register(u'\N{COPYRIGHT SIGN}', b'\\copyright')
self.register(u'\N{COPYRIGHT SIGN}', b'\\textcopyright')
self.register(u'\N{LATIN CAPITAL LETTER A WITH ACUTE}', b"\\'A")
self.register(u'\N{LATIN CAPITAL LETTER I WITH ACUTE}', b"\\'I")
self.register(u'\N{HORIZONTAL ELLIPSIS}', b'\\ldots')
self.register(u'\N{TRADE MARK SIGN}', b'^{TM}', mode='math')
self.register(u'\N{TRADE MARK SIGN}', b'\\texttrademark', package='textcomp')
def register(self, unicode_text, latex_text, mode='text', package=None,
decode=True, encode=True):
if package is not None:
# TODO implement packages
pass
if mode == 'math':
# also register text version
self.register(unicode_text, b'$' + latex_text + b'$', mode='text',
package=package, decode=decode, encode=encode)
# XXX for the time being, we do not perform in-math substitutions
return
# tokenize, and register unicode translation
tokens = tuple(self.lexer.get_tokens(latex_text, final=True))
if decode:
self.max_length = max(self.max_length, len(tokens))
if not tokens in self.unicode_map:
self.unicode_map[tokens] = unicode_text
# also register token variant with brackets, if appropriate
# for instance, "\'{e}" for "\'e", "\c{c}" for "\c c", etc.
# note: we do not remove brackets (they sometimes matter,
# e.g. bibtex uses them to prevent lower case transformation)
if (len(tokens) == 2
and tokens[0].name.startswith('control')
and tokens[1].name == 'chars'):
alt_tokens = (
tokens[0], latex_lexer.Token('chars', b'{'),
tokens[1], latex_lexer.Token('chars', b'}'),
)
if not alt_tokens in self.unicode_map:
self.unicode_map[alt_tokens] = u"{" + unicode_text + u"}"
if encode and unicode_text not in self.latex_map:
self.latex_map[unicode_text] = (latex_text, tokens)
_LATEX_UNICODE_TABLE = LatexUnicodeTable(latex_lexer.LatexIncrementalDecoder())
# incremental encoder does not need a buffer
# but decoder does
class LatexIncrementalEncoder(latex_lexer.LatexIncrementalEncoder):
"""Translating incremental encoder for latex. Maintains a state to
determine whether control spaces etc. need to be inserted.
"""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
latex_lexer.LatexIncrementalEncoder.__init__(self, errors=errors)
self.reset()
def reset(self):
self.state = 'M'
def get_space_bytes(self, bytes_):
"""Inserts space bytes in space eating mode."""
if self.state == 'S':
# in space eating mode
# control space needed?
if bytes_.startswith(b' '):
# replace by control space
return b'\\ ', bytes_[1:]
else:
# insert space (it is eaten, but needed for separation)
return b' ', bytes_
else:
return b'', bytes_
def get_latex_bytes(self, unicode_, final=False):
""":meth:`encode` calls this function to produce the final
sequence of latex bytes. This implementation simply
encodes every sequence in *inputenc* encoding. Override to
process the bytes in some other way (for example, for token
translation).
"""
if not isinstance(unicode_, basestring):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
# convert character by character
for pos, c in enumerate(unicode_):
# attempt input encoding first
# if this succeeds, then we don't need a latex representation
try:
bytes_ = c.encode(self.inputenc, 'strict')
except UnicodeEncodeError:
pass
else:
space, bytes_ = self.get_space_bytes(bytes_)
self.state = 'M'
if space:
yield space
yield bytes_
continue
# inputenc failed; let's try the latex equivalents
# of common unicode characters
try:
bytes_, tokens = self.table.latex_map[c]
except KeyError:
# translation failed
if errors == 'strict':
raise UnicodeEncodeError(
"latex", # codec
unicode_, # problematic input
pos, pos + 1, # location of problematic character
"don't know how to translate {1} ({0}) into latex"
.format(c, repr(c)))
elif errors == 'ignore':
pass
elif errors == 'replace':
# use the \\char command
# this assumes
# \usepackage[T1]{fontenc}
# \usepackage[utf8]{inputenc}
yield b'{\\char'
yield str(ord(c)).encode("ascii")
yield b'}'
self.state = 'M'
else:
raise ValueError(
"latex codec does not support {0} errors"
.format(errors))
else:
# translation succeeded
space, bytes_ = self.get_space_bytes(bytes_)
# update state
if tokens[-1].name == 'control_word':
# we're eating spaces
self.state = 'S'
else:
self.state = 'M'
if space:
yield space
yield bytes_
class LatexIncrementalDecoder(latex_lexer.LatexIncrementalDecoder):
"""Translating incremental decoder for latex."""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
latex_lexer.LatexIncrementalDecoder.__init__(self)
self.max_length = 0
def reset(self):
latex_lexer.LatexIncrementalDecoder.reset(self)
self.token_buffer = []
# python codecs API does not support multibuffer incremental decoders
def getstate(self):
raise NotImplementedError
def setstate(self, state):
raise NotImplementedError
def get_unicode_tokens(self, bytes_, final=False):
for token in self.get_tokens(bytes_, final=final):
# at this point, token_buffer does not match anything
self.token_buffer.append(token)
# new token appended at the end, see if we have a match now
# note: match is only possible at the *end* of the buffer
# because all other positions have already been checked in
# earlier iterations
for i in xrange(1, len(self.token_buffer) + 1):
last_tokens = tuple(self.token_buffer[-i:]) # last i tokens
try:
unicode_text = self.table.unicode_map[last_tokens]
except KeyError:
# no match: continue
continue
else:
# match!! flush buffer, and translate last bit
for token in self.token_buffer[:-i]: # exclude last i tokens
yield token.decode(self.inputenc)
yield unicode_text
self.token_buffer = []
break
# flush tokens that can no longer match
while len(self.token_buffer) >= self.table.max_length:
yield self.token_buffer.pop(0).decode(self.inputenc)
# also flush the buffer at the end
if final:
for token in self.token_buffer:
yield token.decode(self.inputenc)
self.token_buffer = []
class LatexCodec(codecs.Codec):
IncrementalEncoder = None
IncrementalDecoder = None
def encode(self, unicode_, errors='strict'):
"""Convert unicode string to latex bytes."""
return (
self.IncrementalEncoder(errors=errors).encode(unicode_, final=True),
len(unicode_),
)
def decode(self, bytes_, errors='strict'):
"""Convert latex bytes to unicode string."""
return (
self.IncrementalDecoder(errors=errors).decode(bytes_, final=True),
len(bytes_),
)
def find_latex(encoding):
# check if requested codec info is for latex encoding
if not encoding.startswith('latex'):
return None
# set up all classes with correct latex input encoding
inputenc_ = encoding[6:] if encoding.startswith('latex+') else 'ascii'
class IncrementalEncoder_(LatexIncrementalEncoder):
inputenc = inputenc_
class IncrementalDecoder_(LatexIncrementalDecoder):
inputenc = inputenc_
class Codec(LatexCodec):
IncrementalEncoder = IncrementalEncoder_
IncrementalDecoder = IncrementalDecoder_
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder_,
incrementaldecoder=IncrementalDecoder_,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
codecs.register(find_latex)
|
gpl-3.0
| 4,882,661,946,667,891,000 | -4,932,868,037,708,670,000 | 54.066265 | 101 | 0.596871 | false |
unseenlaser/python-for-android
|
python3-alpha/python3-src/Tools/gdb/libpython.py
|
88
|
56224
|
#!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
from __future__ import with_statement
import gdb
import locale
import sys
# Look up the gdb.Type for some standard types:
_type_char_ptr = gdb.lookup_type('char').pointer() # char*
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer() # unsigned char*
_type_void_ptr = gdb.lookup_type('void').pointer() # void*
_type_size_t = gdb.lookup_type('size_t')
SIZEOF_VOID_P = _type_void_ptr.sizeof
Py_TPFLAGS_HEAPTYPE = (1L << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1L << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1L << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1L << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1L << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1L << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1L << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1L << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1L << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given a integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(val))
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to cStringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's a either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by old-style and new-style classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(SIZEOF_VOID_P - 1)
) & ~(SIZEOF_VOID_P - 1)
).cast(_type_size_t)
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % SIZEOF_VOID_P == 0
dictptr = self._gdbval.cast(_type_char_ptr) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analagous to dict.iteritems()
'''
for i in safe_range(self.field('ma_mask') + 1):
ep = self.field('ma_table') + i
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
class PyInstanceObjectPtr(PyObjectPtr):
_typename = 'PyInstanceObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
in_dict = self.pyop_field('in_dict').proxyval(visited)
# Old-style class:
return InstanceProxy(cl_name, in_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
# Old-style class:
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
pyop_in_dict = self.pyop_field('in_dict')
_write_instance_repr(out, visited,
cl_name, pyop_in_dict, self.as_address())
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0L
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15L
else:
SHIFT = 30L
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
try:
f = open(os_fsencode(filename), 'r')
except IOError:
return None
with f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
visited = set()
sys.stdout.write(' File "%s", line %i, in %s\n'
% (self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = []
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
key_proxy = PyObjectPtr.from_pyobject_ptr(key).proxyval(visited)
if key_proxy != '<dummy key>':
members.append(key_proxy)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
pyop_key = PyObjectPtr.from_pyobject_ptr(key)
key_proxy = pyop_key.proxyval(visited) # FIXME!
if key_proxy != '<dummy key>':
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr)
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
# From unicodeobject.h:
# Py_ssize_t length; /* Length of raw Unicode data in buffer */
# Py_UNICODE *str; /* Raw Unicode buffer */
field_length = long(self.field('length'))
field_str = self.field('str')
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-2 or UCS-4 code points:
if self.char_width() > 2:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj == None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
return PyFrameObjectPtr.from_pyobject_ptr(f)
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
line = pyop.current_line()
if line is not None:
sys.stdout.write(line)
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop = frame.get_pyop()
if not pyop:
print 'Unable to read information on python frame'
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print 'Unable to find an older python frame'
else:
print 'Unable to find a newer python frame'
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_evalframeex():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
sys.stdout.write('Traceback (most recent call first):\n')
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_evalframeex():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print ('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print '%r not found' % name
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print ('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
|
apache-2.0
| 1,063,766,112,629,001,700 | 7,955,566,924,474,481,000 | 32.347568 | 102 | 0.558498 | false |
CasparLi/calibre
|
src/calibre/ebooks/textile/functions.py
|
14
|
39331
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
PyTextile
A Humane Web Text Generator
"""
# Last upstream version basis
# __version__ = '2.1.4'
#__date__ = '2009/12/04'
__copyright__ = """
Copyright (c) 2011, Leigh Parry <[email protected]>
Copyright (c) 2011, John Schember <[email protected]>
Copyright (c) 2009, Jason Samsa, http://jsamsa.com/
Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
Original PHP Version:
Copyright (c) 2003-2004, Dean Allen <[email protected]>
All rights reserved.
Thanks to Carlo Zottmann <[email protected]> for refactoring
Textile's procedural code into a class framework
Additions and fixes Copyright (c) 2006 Alex Shiels http://thresholdstate.com/
"""
__license__ = """
L I C E N S E
=============
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Textile nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import uuid
from urlparse import urlparse
from calibre.utils.smartypants import smartyPants
def _normalize_newlines(string):
out = re.sub(r'\r\n', '\n', string)
out = re.sub(r'\n{3,}', '\n\n', out)
out = re.sub(r'\n\s*\n', '\n\n', out)
out = re.sub(r'"$', '" ', out)
return out
def getimagesize(url):
"""
Attempts to determine an image's width and height, and returns a string
suitable for use in an <img> tag, or None in case of failure.
Requires that PIL is installed.
>>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif")
... #doctest: +ELLIPSIS, +SKIP
'width="..." height="..."'
"""
try:
from PIL import ImageFile
except ImportError:
try:
import ImageFile
except ImportError:
return None
try:
import urllib2
except ImportError:
return None
try:
p = ImageFile.Parser()
f = urllib2.urlopen(url)
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
if p.image:
return 'width="%i" height="%i"' % p.image.size
except (IOError, ValueError):
return None
class Textile(object):
hlgn = r'(?:\<(?!>)|(?<!<)\>|\<\>|\=|[()]+(?! ))'
vlgn = r'[\-^~]'
clas = r'(?:\([^)]+\))'
lnge = r'(?:\[[^\]]+\])'
styl = r'(?:\{[^}]+\})'
cspn = r'(?:\\\d+)'
rspn = r'(?:\/\d+)'
a = r'(?:%s|%s)*' % (hlgn, vlgn)
s = r'(?:%s|%s)*' % (cspn, rspn)
c = r'(?:%s)*' % '|'.join([clas, styl, lnge, hlgn])
pnct = r'[-!"#$%&()*+,/:;<=>?@\'\[\\\]\.^_`{|}~]'
# urlch = r'[\w"$\-_.+!*\'(),";/?:@=&%#{}|\\^~\[\]`]'
urlch = '[\w"$\-_.+*\'(),";\/?:@=&%#{}|\\^~\[\]`]'
url_schemes = ('http', 'https', 'ftp', 'mailto')
btag = ('bq', 'bc', 'notextile', 'pre', 'h[1-6]', 'fn\d+', 'p')
btag_lite = ('bq', 'bc', 'p')
macro_defaults = [
(re.compile(r'{(c\||\|c)}'), r'¢'), # cent
(re.compile(r'{(L-|-L)}'), r'£'), # pound
(re.compile(r'{(Y=|=Y)}'), r'¥'), # yen
(re.compile(r'{\(c\)}'), r'©'), # copyright
(re.compile(r'{\(r\)}'), r'®'), # registered
(re.compile(r'{(\+_|_\+)}'), r'±'), # plus-minus
(re.compile(r'{1/4}'), r'¼'), # quarter
(re.compile(r'{1/2}'), r'½'), # half
(re.compile(r'{3/4}'), r'¾'), # three-quarter
(re.compile(r'{(A`|`A)}'), r'À'), # A-acute
(re.compile(r'{(A\'|\'A)}'), r'Á'), # A-grave
(re.compile(r'{(A\^|\^A)}'), r'Â'), # A-circumflex
(re.compile(r'{(A~|~A)}'), r'Ã'), # A-tilde
(re.compile(r'{(A\"|\"A)}'), r'Ä'), # A-diaeresis
(re.compile(r'{(Ao|oA)}'), r'Å'), # A-ring
(re.compile(r'{(AE)}'), r'Æ'), # AE
(re.compile(r'{(C,|,C)}'), r'Ç'), # C-cedilla
(re.compile(r'{(E`|`E)}'), r'È'), # E-acute
(re.compile(r'{(E\'|\'E)}'), r'É'), # E-grave
(re.compile(r'{(E\^|\^E)}'), r'Ê'), # E-circumflex
(re.compile(r'{(E\"|\"E)}'), r'Ë'), # E-diaeresis
(re.compile(r'{(I`|`I)}'), r'Ì'), # I-acute
(re.compile(r'{(I\'|\'I)}'), r'Í'), # I-grave
(re.compile(r'{(I\^|\^I)}'), r'Î'), # I-circumflex
(re.compile(r'{(I\"|\"I)}'), r'Ï'), # I-diaeresis
(re.compile(r'{(D-|-D)}'), r'Ð'), # ETH
(re.compile(r'{(N~|~N)}'), r'Ñ'), # N-tilde
(re.compile(r'{(O`|`O)}'), r'Ò'), # O-acute
(re.compile(r'{(O\'|\'O)}'), r'Ó'), # O-grave
(re.compile(r'{(O\^|\^O)}'), r'Ô'), # O-circumflex
(re.compile(r'{(O~|~O)}'), r'Õ'), # O-tilde
(re.compile(r'{(O\"|\"O)}'), r'Ö'), # O-diaeresis
(re.compile(r'{x}'), r'×'), # dimension
(re.compile(r'{(O\/|\/O)}'), r'Ø'), # O-slash
(re.compile(r'{(U`|`U)}'), r'Ù'), # U-acute
(re.compile(r'{(U\'|\'U)}'), r'Ú'), # U-grave
(re.compile(r'{(U\^|\^U)}'), r'Û'), # U-circumflex
(re.compile(r'{(U\"|\"U)}'), r'Ü'), # U-diaeresis
(re.compile(r'{(Y\'|\'Y)}'), r'Ý'), # Y-grave
(re.compile(r'{sz}'), r'ß'), # sharp-s
(re.compile(r'{(a`|`a)}'), r'à'), # a-grave
(re.compile(r'{(a\'|\'a)}'), r'á'), # a-acute
(re.compile(r'{(a\^|\^a)}'), r'â'), # a-circumflex
(re.compile(r'{(a~|~a)}'), r'ã'), # a-tilde
(re.compile(r'{(a\"|\"a)}'), r'ä'), # a-diaeresis
(re.compile(r'{(ao|oa)}'), r'å'), # a-ring
(re.compile(r'{ae}'), r'æ'), # ae
(re.compile(r'{(c,|,c)}'), r'ç'), # c-cedilla
(re.compile(r'{(e`|`e)}'), r'è'), # e-grave
(re.compile(r'{(e\'|\'e)}'), r'é'), # e-acute
(re.compile(r'{(e\^|\^e)}'), r'ê'), # e-circumflex
(re.compile(r'{(e\"|\"e)}'), r'ë'), # e-diaeresis
(re.compile(r'{(i`|`i)}'), r'ì'), # i-grave
(re.compile(r'{(i\'|\'i)}'), r'í'), # i-acute
(re.compile(r'{(i\^|\^i)}'), r'î'), # i-circumflex
(re.compile(r'{(i\"|\"i)}'), r'ï'), # i-diaeresis
(re.compile(r'{(d-|-d)}'), r'ð'), # eth
(re.compile(r'{(n~|~n)}'), r'ñ'), # n-tilde
(re.compile(r'{(o`|`o)}'), r'ò'), # o-grave
(re.compile(r'{(o\'|\'o)}'), r'ó'), # o-acute
(re.compile(r'{(o\^|\^o)}'), r'ô'), # o-circumflex
(re.compile(r'{(o~|~o)}'), r'õ'), # o-tilde
(re.compile(r'{(o\"|\"o)}'), r'ö'), # o-diaeresis
(re.compile(r'{(o\/|\/o)}'), r'ø'), # o-stroke
(re.compile(r'{(u`|`u)}'), r'ù'), # u-grave
(re.compile(r'{(u\'|\'u)}'), r'ú'), # u-acute
(re.compile(r'{(u\^|\^u)}'), r'û'), # u-circumflex
(re.compile(r'{(u\"|\"u)}'), r'ü'), # u-diaeresis
(re.compile(r'{(y\'|\'y)}'), r'ý'), # y-acute
(re.compile(r'{(y\"|\"y)}'), r'ÿ'), # y-diaeresis
(re.compile(r'{(C\ˇ|\ˇC)}'), r'Č'), # C-caron
(re.compile(r'{(c\ˇ|\ˇc)}'), r'č'), # c-caron
(re.compile(r'{(D\ˇ|\ˇD)}'), r'Ď'), # D-caron
(re.compile(r'{(d\ˇ|\ˇd)}'), r'ď'), # d-caron
(re.compile(r'{(E\ˇ|\ˇE)}'), r'Ě'), # E-caron
(re.compile(r'{(e\ˇ|\ˇe)}'), r'ě'), # e-caron
(re.compile(r'{(L\'|\'L)}'), r'Ĺ'), # L-acute
(re.compile(r'{(l\'|\'l)}'), r'ĺ'), # l-acute
(re.compile(r'{(L\ˇ|\ˇL)}'), r'Ľ'), # L-caron
(re.compile(r'{(l\ˇ|\ˇl)}'), r'ľ'), # l-caron
(re.compile(r'{(N\ˇ|\ˇN)}'), r'Ň'), # N-caron
(re.compile(r'{(n\ˇ|\ˇn)}'), r'ň'), # n-caron
(re.compile(r'{OE}'), r'Œ'), # OE
(re.compile(r'{oe}'), r'œ'), # oe
(re.compile(r'{(R\'|\'R)}'), r'Ŕ'), # R-acute
(re.compile(r'{(r\'|\'r)}'), r'ŕ'), # r-acute
(re.compile(r'{(R\ˇ|\ˇR)}'), r'Ř'), # R-caron
(re.compile(r'{(r\ˇ|\ˇr)}'), r'ř'), # r-caron
(re.compile(r'{(S\^|\^S)}'), r'Ŝ'), # S-circumflex
(re.compile(r'{(s\^|\^s)}'), r'ŝ'), # s-circumflex
(re.compile(r'{(S\ˇ|\ˇS)}'), r'Š'), # S-caron
(re.compile(r'{(s\ˇ|\ˇs)}'), r'š'), # s-caron
(re.compile(r'{(T\ˇ|\ˇT)}'), r'Ť'), # T-caron
(re.compile(r'{(t\ˇ|\ˇt)}'), r'ť'), # t-caron
(re.compile(r'{(U\°|\°U)}'), r'Ů'), # U-ring
(re.compile(r'{(u\°|\°u)}'), r'ů'), # u-ring
(re.compile(r'{(Z\ˇ|\ˇZ)}'), r'Ž'), # Z-caron
(re.compile(r'{(z\ˇ|\ˇz)}'), r'ž'), # z-caron
(re.compile(r'{\*}'), r'•'), # bullet
(re.compile(r'{Fr}'), r'₣'), # Franc
(re.compile(r'{(L=|=L)}'), r'₤'), # Lira
(re.compile(r'{Rs}'), r'₨'), # Rupee
(re.compile(r'{(C=|=C)}'), r'€'), # euro
(re.compile(r'{tm}'), r'™'), # trademark
(re.compile(r'{spades?}'), r'♠'), # spade
(re.compile(r'{clubs?}'), r'♣'), # club
(re.compile(r'{hearts?}'), r'♥'), # heart
(re.compile(r'{diam(onds?|s)}'), r'♦'), # diamond
(re.compile(r'{"}'), r'"'), # double-quote
(re.compile(r"{'}"), r'''), # single-quote
(re.compile(r"{(’|'/|/')}"), r'’'), # closing-single-quote - apostrophe
(re.compile(r"{(‘|\\'|'\\)}"), r'‘'), # opening-single-quote
(re.compile(r'{(”|"/|/")}'), r'”'), # closing-double-quote
(re.compile(r'{(“|\\"|"\\)}'), r'“'), # opening-double-quote
]
glyph_defaults = [
(re.compile(r'(\d+\'?\"?)( ?)x( ?)(?=\d+)'), r'\1\2×\3'), # dimension sign
(re.compile(r'(\d+)\'(\s)', re.I), r'\1′\2'), # prime
(re.compile(r'(\d+)\"(\s)', re.I), r'\1″\2'), # prime-double
(re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), r'<acronym title="\2">\1</acronym>'), # 3+ uppercase acronym
(re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), r'<span class="caps">\1</span>'), # 3+ uppercase
(re.compile(r'\b(\s{0,1})?\.{3}'), r'\1…'), # ellipsis
(re.compile(r'^[\*_-]{3,}$', re.M), r'<hr />'), # <hr> scene-break
(re.compile(r'(^|[^-])--([^-]|$)'), r'\1—\2'), # em dash
(re.compile(r'\s-(?:\s|$)'), r' – '), # en dash
(re.compile(r'\b( ?)[([]TM[])]', re.I), r'\1™'), # trademark
(re.compile(r'\b( ?)[([]R[])]', re.I), r'\1®'), # registered
(re.compile(r'\b( ?)[([]C[])]', re.I), r'\1©'), # copyright
]
def __init__(self, restricted=False, lite=False, noimage=False):
"""docstring for __init__"""
self.restricted = restricted
self.lite = lite
self.noimage = noimage
self.get_sizes = False
self.fn = {}
self.urlrefs = {}
self.shelf = {}
self.rel = ''
self.html_type = 'xhtml'
def textile(self, text, rel=None, head_offset=0, html_type='xhtml'):
"""
>>> import textile
>>> textile.textile('some textile')
u'\\t<p>some textile</p>'
"""
self.html_type = html_type
# text = unicode(text)
text = _normalize_newlines(text)
if self.restricted:
text = self.encode_html(text, quotes=False)
if rel:
self.rel = ' rel="%s"' % rel
text = self.getRefs(text)
text = self.block(text, int(head_offset))
text = self.retrieve(text)
text = smartyPants(text, 'q')
return text
def pba(self, input, element=None):
"""
Parse block attributes.
>>> t = Textile()
>>> t.pba(r'\3')
''
>>> t.pba(r'\\3', element='td')
' colspan="3"'
>>> t.pba(r'/4', element='td')
' rowspan="4"'
>>> t.pba(r'\\3/4', element='td')
' colspan="3" rowspan="4"'
>>> t.vAlign('^')
'top'
>>> t.pba('^', element='td')
' style="vertical-align:top;"'
>>> t.pba('{line-height:18px}')
' style="line-height:18px;"'
>>> t.pba('(foo-bar)')
' class="foo-bar"'
>>> t.pba('(#myid)')
' id="myid"'
>>> t.pba('(foo-bar#myid)')
' class="foo-bar" id="myid"'
>>> t.pba('((((')
' style="padding-left:4em;"'
>>> t.pba(')))')
' style="padding-right:3em;"'
>>> t.pba('[fr]')
' lang="fr"'
"""
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
id = ''
if not input:
return ''
matched = input
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vlgn, matched)
if m:
style.append("vertical-align:%s;" % self.vAlign(m.group(1)))
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.hlgn, matched)
if m:
style.append("text-align:%s;" % self.hAlign(m.group(1)))
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang:
return ' lang="%s"'
else:
return ''
result = []
if style:
result.append(' style="%s"' % "".join(style))
if aclass:
result.append(' class="%s"' % aclass)
if lang:
result.append(' lang="%s"' % lang)
if id:
result.append(' id="%s"' % id)
if colspan:
result.append(' colspan="%s"' % colspan)
if rowspan:
result.append(' rowspan="%s"' % rowspan)
return ''.join(result)
def hasRawText(self, text):
"""
checks whether the text has text not already enclosed by a block tag
>>> t = Textile()
>>> t.hasRawText('<p>foo bar biz baz</p>')
False
>>> t.hasRawText(' why yes, yes it does')
True
"""
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r
def table(self, text):
r"""
>>> t = Textile()
>>> t.table('|one|two|three|\n|a|b|c|')
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
"""
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
return pattern.sub(self.fTable, text)
def fTable(self, match):
tatts = self.pba(match.group(1), 'table')
rows = []
for row in [ x for x in match.group(2).split('\n') if x]:
rmtch = re.search(r'^(%s%s\. )(.*)' % (self.a, self.c), row.lstrip())
if rmtch:
ratts = self.pba(rmtch.group(1), 'tr')
row = rmtch.group(2)
else:
ratts = ''
cells = []
for cell in row.split('|')[1:-1]:
ctyp = 'd'
if re.search(r'^_', cell):
ctyp = "h"
cmtch = re.search(r'^(_?%s%s%s\. )(.*)' % (self.s, self.a, self.c), cell)
if cmtch:
catts = self.pba(cmtch.group(1), 'td')
cell = cmtch.group(2)
else:
catts = ''
cell = self.graf(self.span(cell))
cells.append('\t\t\t<t%s%s>%s</t%s>' % (ctyp, catts, cell, ctyp))
rows.append("\t\t<tr%s>\n%s\n\t\t</tr>" % (ratts, '\n'.join(cells)))
cells = []
catts = None
return "\t<table%s>\n%s\n\t</table>\n\n" % (tatts, '\n'.join(rows))
def lists(self, text):
"""
>>> t = Textile()
>>> t.lists("* one\\n* two\\n* three")
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
"""
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
return pattern.sub(self.fList, text)
def fList(self, match):
text = match.group(0).split("\n")
result = []
lists = []
for i, line in enumerate(text):
try:
nextline = text[i+1]
except IndexError:
nextline = ''
m = re.search(r"^([#*]+)(%s%s) (.*)$" % (self.a, self.c), line, re.S)
if m:
tl, atts, content = m.groups()
nl = ''
nm = re.search(r'^([#*]+)\s.*', nextline)
if nm:
nl = nm.group(1)
if tl not in lists:
lists.append(tl)
atts = self.pba(atts)
line = "\t<%sl%s>\n\t\t<li>%s" % (self.lT(tl), atts, self.graf(content))
else:
line = "\t\t<li>" + self.graf(content)
if len(nl) <= len(tl):
line = line + "</li>"
for k in reversed(lists):
if len(k) > len(nl):
line = line + "\n\t</%sl>" % self.lT(k)
if len(k) > 1:
line = line + "</li>"
lists.remove(k)
result.append(line)
return "\n".join(result)
def lT(self, input):
if re.search(r'^#+', input):
return 'o'
else:
return 'u'
def doPBr(self, in_):
return re.compile(r'<(p)([^>]*?)>(.*)(</\1>)', re.S).sub(self.doBr, in_)
def doBr(self, match):
if self.html_type == 'html':
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br>', match.group(3))
else:
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br />', match.group(3))
return '<%s%s>%s%s' % (match.group(1), match.group(2), content, match.group(4))
def block(self, text, head_offset = 0):
"""
>>> t = Textile()
>>> t.block('h1. foobar baby')
'\\t<h1>foobar baby</h1>'
"""
if not self.lite:
tre = '|'.join(self.btag)
else:
tre = '|'.join(self.btag_lite)
text = text.split('\n\n')
tag = 'p'
atts = cite = graf = ext = c1 = ''
out = []
anon = False
for line in text:
pattern = r'^(%s)(%s%s)\.(\.?)(?::(\S+))? (.*)$' % (tre, self.a, self.c)
match = re.search(pattern, line, re.S)
if match:
if ext:
out.append(out.pop() + c1)
tag, atts, ext, cite, graf = match.groups()
h_match = re.search(r'h([1-6])', tag)
if h_match:
head_level, = h_match.groups()
tag = 'h%i' % max(1,
min(int(head_level) + head_offset,
6))
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext,
cite, graf)
# leave off c1 if this block is extended,
# we'll close it at the start of the next block
if ext:
line = "%s%s%s%s" % (o1, o2, content, c2)
else:
line = "%s%s%s%s%s" % (o1, o2, content, c2, c1)
else:
anon = True
if ext or not re.search(r'^\s', line):
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext,
cite, line)
# skip $o1/$c1 because this is part of a continuing
# extended block
if tag == 'p' and not self.hasRawText(content):
line = content
else:
line = "%s%s%s" % (o2, content, c2)
else:
line = self.graf(line)
line = self.doPBr(line)
if self.html_type == 'xhtml':
line = re.sub(r'<br>', '<br />', line)
if ext and anon:
out.append(out.pop() + "\n" + line)
else:
out.append(line)
if not ext:
tag = 'p'
atts = ''
cite = ''
graf = ''
if ext:
out.append(out.pop() + c1)
return '\n\n'.join(out)
def fBlock(self, tag, atts, ext, cite, content):
"""
>>> t = Textile()
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bq", "", None, "http://google.com", "Hello BlockQuote")
('\\t<blockquote cite="http://google.com">\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bc", "", None, "", 'printf "Hello, World";') # doctest: +ELLIPSIS
('<pre>', '<code>', ..., '</code>', '</pre>')
>>> t.fBlock("h1", "", None, "", "foobar")
('', '\\t<h1>', 'foobar', '</h1>', '')
"""
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = "\t<blockquote%s%s>\n" % (cite, atts)
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = "\t<%s%s>" % (tag, atts)
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1
def footnoteRef(self, text):
"""
>>> t = Textile()
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
"""
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
def footnoteID(self, match):
id, t = match.groups()
if id not in self.fn:
self.fn[id] = str(uuid.uuid4())
fnid = self.fn[id]
if not t:
t = ''
return '<sup class="footnote"><a href="#fn%s">%s</a></sup>%s' % (fnid, id, t)
def glyphs(self, text):
"""
>>> t = Textile()
>>> t.glyphs("apostrophe's")
'apostrophe’s'
>>> t.glyphs("back in '88")
'back in ’88'
>>> t.glyphs('foo ...')
'foo …'
>>> t.glyphs('--')
'—'
>>> t.glyphs('FooBar[tm]')
'FooBar™'
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
"""
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
rules = []
if re.search(r'{.+?}', line):
rules = self.macro_defaults + self.glyph_defaults
else:
rules = self.glyph_defaults
for s, r in rules:
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def macros_only(self, text):
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
rules = []
if re.search(r'{.+?}', line):
rules = self.macro_defaults
for s, r in rules:
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def vAlign(self, input):
d = {'^':'top', '-':'middle', '~':'bottom'}
return d.get(input, '')
def hAlign(self, input):
d = {'<':'left', '=':'center', '>':'right', '<>': 'justify'}
return d.get(input, '')
def getRefs(self, text):
"""
what is this for?
"""
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http(?:s?):\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
def checkRefs(self, url):
return self.urlrefs.get(url, url)
def isRelURL(self, url):
"""
Identify relative urls.
>>> t = Textile()
>>> t.isRelURL("http://www.google.com/")
False
>>> t.isRelURL("/foo")
True
"""
(scheme, netloc) = urlparse(url)[0:2]
return not scheme and not netloc
def relURL(self, url):
scheme = urlparse(url)[0]
if self.restricted and scheme and scheme not in self.url_schemes:
return '#'
return url
def shelve(self, text):
id = str(uuid.uuid4()) + 'c'
self.shelf[id] = text
return id
def retrieve(self, text):
"""
>>> t = Textile()
>>> id = t.shelve("foobar")
>>> t.retrieve(id)
'foobar'
"""
while True:
old = text
for k, v in self.shelf.items():
text = text.replace(k, v)
if text == old:
break
return text
def encode_html(self, text, quotes=True):
a = (
('&', '&'),
('<', '<'),
('>', '>')
)
if quotes:
a = a + (
("'", '''),
('"', '"')
)
for k, v in a:
text = text.replace(k, v)
return text
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.lists(text)
text = self.table(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
def links(self, text):
"""
>>> t = Textile()
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
'fooobar ... and hello world ...'
"""
text = self.macros_only(text)
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
pattern = r'''
(?P<pre> [\s\[{(]|[%s] )?
" # start
(?P<atts> %s )
(?P<text> [^"]+? )
\s?
(?: \(([^)]+?)\)(?=") )? # $title
":
(?P<url> (?:ftp|https?)? (?: :// )? [-A-Za-z0-9+&@#/?=~_()|!:,.;]*[-A-Za-z0-9+&@#/=~_()|] )
(?P<post> [^\w\/;]*? )
(?=<|\s|$)
''' % (re.escape(punct), self.c)
text = re.compile(pattern, re.X).sub(self.fLink, text)
return text
def fLink(self, match):
pre, atts, text, title, url, post = match.groups()
if pre == None:
pre = ''
# assume ) at the end of the url is not actually part of the url
# unless the url also contains a (
if url.endswith(')') and not url.find('(') > -1:
post = url[-1] + post
url = url[:-1]
url = self.checkRefs(url)
atts = self.pba(atts)
if title:
atts = atts + ' title="%s"' % self.encode_html(title)
if not self.noimage:
text = self.image(text)
text = self.span(text)
text = self.glyphs(text)
url = self.relURL(url)
out = '<a href="%s"%s%s>%s</a>' % (self.encode_html(url), atts, self.rel, text)
out = self.shelve(out)
return ''.join([pre, out, post])
def span(self, text):
"""
>>> t = Textile()
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
"""
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:"
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s\(])|\[|([\]}]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::(\S+))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag':qtag, 'c':self.c, 'pnct':pnct,
'selfpnct':self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text
def fSpan(self, match):
_, tag, atts, cite, content, end, _ = match.groups()
qtags = {
'*': 'strong',
'**': 'b',
'??': 'cite',
'_' : 'em',
'__': 'i',
'-' : 'del',
'%' : 'span',
'+' : 'ins',
'~' : 'sub',
'^' : 'sup'
}
tag = qtags[tag]
atts = self.pba(atts)
if cite:
atts = atts + 'cite="%s"' % cite
content = self.span(content)
out = "<%s%s>%s%s</%s>" % (tag, atts, content, end, tag)
return out
def image(self, text):
"""
>>> t = Textile()
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
"""
pattern = re.compile(r"""
(?:[\[{])? # pre
\! # opening !
(%s) # optional style,class atts
(?:\. )? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}]|(?=\s|$)) # lookahead: space or end of string
""" % self.c, re.U|re.X)
return pattern.sub(self.fImage, text)
def fImage(self, match):
# (None, '', '/imgs/myphoto.jpg', None, None)
atts, url, title, href = match.groups()
atts = self.pba(atts)
if title:
atts = atts + ' title="%s" alt="%s"' % (title, title)
else:
atts = atts + ' alt=""'
if not self.isRelURL(url) and self.get_sizes:
size = getimagesize(url)
if (size):
atts += " %s" % size
if href:
href = self.checkRefs(href)
url = self.checkRefs(url)
url = self.relURL(url)
out = []
if href:
out.append('<a href="%s" class="img">' % href)
if self.html_type == 'html':
out.append('<img src="%s"%s>' % (url, atts))
else:
out.append('<img src="%s"%s />' % (url, atts))
if href:
out.append('</a>')
return ''.join(out)
def code(self, text):
text = self.doSpecial(text, '<code>', '</code>', self.fCode)
text = self.doSpecial(text, '@', '@', self.fCode)
text = self.doSpecial(text, '<pre>', '</pre>', self.fPre)
return text
def fCode(self, match):
before, text, after = match.groups()
if after == None:
after = ''
# text needs to be escaped
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, self.shelve('<code>%s</code>' % text), after])
def fPre(self, match):
before, text, after = match.groups()
if after == None:
after = ''
# text needs to be escapedd
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, '<pre>', self.shelve(text), '</pre>', after])
def doSpecial(self, text, start, end, method=None):
if method == None:
method = self.fSpecial
pattern = re.compile(r'(^|\s|[\[({>])%s(.*?)%s(\s|$|[\])}])?' % (re.escape(start), re.escape(end)), re.M|re.S)
return pattern.sub(method, text)
def fSpecial(self, match):
"""
special blocks like notextile or code
"""
before, text, after = match.groups()
if after == None:
after = ''
return ''.join([before, self.shelve(self.encode_html(text)), after])
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>', self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
def fTextile(self, match):
before, notextile, after = match.groups()
if after == None:
after = ''
return ''.join([before, self.shelve(notextile), after])
def textile(text, head_offset=0, html_type='xhtml', encoding=None, output=None):
"""
this function takes additional parameters:
head_offset - offset to apply to heading levels (default: 0)
html_type - 'xhtml' or 'html' style tags (default: 'xhtml')
"""
return Textile().textile(text, head_offset=head_offset,
html_type=html_type)
def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
"""
Restricted version of Textile designed for weblog comments and other
untrusted input.
Raw HTML is escaped.
Style attributes are disabled.
rel='nofollow' is added to external links.
When lite=True is set (the default):
Block tags are restricted to p, bq, and bc.
Lists and tables are disabled.
When noimage=True is set (the default):
Image tags are disabled.
"""
return Textile(restricted=True, lite=lite,
noimage=noimage).textile(text, rel='nofollow',
html_type=html_type)
|
gpl-3.0
| 4,155,345,643,385,993,700 | 5,343,938,463,846,575,000 | 34.776867 | 193 | 0.422931 | false |
kaichogami/scikit-learn
|
sklearn/utils/multiclass.py
|
40
|
12966
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
| -4,523,469,250,931,309,000 | 9,163,613,711,347,660,000 | 32.417526 | 79 | 0.57188 | false |
h2oai/h2o
|
py/testdir_single_jvm/test_GLM2_gaussian_rand2.py
|
9
|
1837
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
def define_params():
paramDict = {
'standardize': [None, 0,1],
'beta_epsilon': [None, 0.0001],
'ignored_cols': [None, 0, 1, 15, 33, 34],
'family': ['gaussian'],
'n_folds': [2, 3, 4, 9],
'lambda': [None, 0, 1e-8, 1e-4],
'alpha': [None, 0, 0.5, 0.75],
'beta_epsilon': [None, 0.0001],
'max_iter': [None, 10],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_gaussian_rand2(self):
csvPathname = 'standard/covtype.data'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put')
paramDict = define_params()
for trial in range(20):
# params is mutable. This is default.
params = {
'response': 54,
'n_folds': 3,
'family': "gaussian",
'alpha': 0.5,
'lambda': 1e-4,
'max_iter': 30
}
colX = h2o_glm.pickRandGlmParams(paramDict, params)
kwargs = params.copy()
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=300, parseResult=parseResult, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
| 7,596,977,274,197,445,000 | -8,356,473,391,746,987,000 | 30.135593 | 101 | 0.522591 | false |
ONEcampaign/humanitarian-data-service
|
displacement_tracker_data.py
|
1
|
27157
|
import requests
import pandas as pd
import os.path
import resources.constants
import json
from pandas.io.json import json_normalize
from utils.data_utils import get_ordinal_number
"""
This script aggregates data from multiple endpoints and returns a single .json file containing all data
used in the displacement tracker project.
Scheduling this script would mean that the /displacement_tracker endpoint always returned the latest data
contained within the Humanitarian Data Service API.
"""
# For development
#ROOT = 'http://localhost:5000'
# For live
ROOT = 'http://ec2-34-200-18-111.compute-1.amazonaws.com'
# Set year for country-level funding data
FUNDING_YEAR = 2016
# Define all endpoints
URL_POPULATIONS_REFUGEELIKE_ASYLUM = '/populations/refugeelike/asylum/index'
URL_POPULATIONS_REFUGEELIKE_ORIGIN = '/populations/refugeelike/origin/index'
URL_INDICATORS_GNI = '/indicators/gni/index'
URL_PLANS_PROGRESS = '/funding/plans/progress/index'
URL_POPULATION = '/populations/totals/index'
URL_FRAGILE_STATE = '/fragility/fragile-state-index/index'
URL_NEEDS = '/needs/plans/index'
URL_FUNDING_DEST_COUNTRY = '/funding/countries/destination/index/{}'.format(FUNDING_YEAR)
URL_FUNDING_DEST_DONORS = '/funding/countries/donors/index'
# Define path for raw country names data
country_names_path = os.path.join(resources.constants.EXAMPLE_RAW_DATA_PATH, 'UNSD Methodology.csv')
# Define path for relatable geography populations data
relatable_population_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, '2017_relatable_population_rankings.csv')
# Define path for stories of displacement
displacement_stories_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'stories_of_displacement_links.csv')
# Create a blank dictionary to store metadata for each field
metadata_dict = {}
def merge_data(
funding_year = FUNDING_YEAR,
country_names_path=country_names_path,
relatable_population_path=relatable_population_path,
displacement_stories_path=displacement_stories_path,
url_populations_refugeelike_asylum=(ROOT + URL_POPULATIONS_REFUGEELIKE_ASYLUM),
url_populations_refugeelike_origin=(ROOT + URL_POPULATIONS_REFUGEELIKE_ORIGIN),
url_indicators_gni=(ROOT + URL_INDICATORS_GNI),
url_plans_progress=(ROOT + URL_PLANS_PROGRESS),
url_population=(ROOT + URL_POPULATION),
url_fragile_state=(ROOT + URL_FRAGILE_STATE),
url_needs=(ROOT + URL_NEEDS),
url_funding_dest_country=(ROOT + URL_FUNDING_DEST_COUNTRY),
url_funding_dest_donors=(ROOT + URL_FUNDING_DEST_DONORS)
):
#################### COUNTRY NAMES ####################
# Get the data from .csv
df_country_names = pd.read_csv(country_names_path, encoding='utf-8')
# Select relevant fields
df_country_names = df_country_names[[
'Country or Area',
'ISO-alpha3 Code'
]]
# Add Taiwan
df_country_names.loc[-1] = ["Taiwan", "TWN"]
# Drop null values
df_country_names = df_country_names.dropna()
# Set country code to be the index
df_country_names = df_country_names.set_index('ISO-alpha3 Code')
# Rename fields
df_country_names.rename(columns={'Country or Area': 'Country'}, inplace=True)
#################### DISPLACEMENT STORIES ####################
# Get the data from .csv
df_displacement_stories = pd.read_csv(displacement_stories_path, encoding='utf-8')
# Set country code to be the index
df_displacement_stories = df_displacement_stories.set_index('countryCode')
# Select relevant fields
df_displacement_stories = df_displacement_stories[[
'storyTitle', 'storySource',
'storyTagLine', 'storyURL'
]]
# Drop null values
df_displacement_stories = df_displacement_stories.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_displacement_stories.columns:
metadata_dict[column] = {}
#################### POPULATIONS ####################
# Get the data from the API
population_data = requests.get(url_population).json()
# Extract metadata
if 'metadata' in population_data:
population_metadata = population_data['metadata']
else:
population_metadata = {}
# Build dataframe
df_population = pd.DataFrame(population_data['data']).T
# Select relevant fields
df_population = df_population[[
'PopTotal'
]]
# Rename fields
df_population.rename(columns={'PopTotal': 'Population'}, inplace=True)
# Drop null values
df_population = df_population.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_population.columns:
metadata_dict[column] = population_metadata
#################### FRAGILE STATE ####################
# Get the data from the API
fragile_state_data = requests.get(url_fragile_state).json()
# Extract metadata
if 'metadata' in fragile_state_data:
fragile_state_metadata = fragile_state_data['metadata']
else:
fragile_state_metadata = {}
# Build a dataframe
df_fragile_state = pd.DataFrame(fragile_state_data['data']).T
# Select relevant fields
df_fragile_state = df_fragile_state[[
'Total', 'Rank'
]]
# Rename fields
df_fragile_state.rename(columns={'Total': 'Fragile State Index Score',
'Rank': 'Fragile State Index Rank'}, inplace=True)
# Drop null values
df_fragile_state = df_fragile_state.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_fragile_state.columns:
metadata_dict[column] = fragile_state_metadata
#################### POPULATIONS_REFUGEELIKE_ASYLUM ####################
# Get the data from the API
populations_refugeelike_asylum_data = requests.get(url_populations_refugeelike_asylum).json()
# Extract metadata
if 'metadata' in populations_refugeelike_asylum_data:
populations_refugeelike_asylum_metadata = populations_refugeelike_asylum_data['metadata']
else:
populations_refugeelike_asylum_metadata = {}
# Build a dataframe
df_populations_refugeelike_asylum = pd.DataFrame(populations_refugeelike_asylum_data['data']).T
# Select relevant fields
df_populations_refugeelike_asylum = df_populations_refugeelike_asylum[[
'Total population of concern', 'Total Refugee and people in refugee-like situations',
'IDPs protected/assisted by UNHCR, incl. people in IDP-like situations','Asylum-seekers'
]]
# Rename fields
df_populations_refugeelike_asylum.rename(columns={
'IDPs protected/assisted by UNHCR, incl. people in IDP-like situations': 'IDPs protected/assisted by UNHCR',
'Asylum-seekers': 'Asylum-seekers (asylum)'
}, inplace=True)
# Add field to rank total total population of concern
df_populations_refugeelike_asylum['Rank of total population of concern'] = df_populations_refugeelike_asylum[
'Total population of concern'].rank(ascending=False, method='min').astype(int)
# Add field to add refugees and asylum-seekers
df_populations_refugeelike_asylum['Total refugees and asylum-seekers (asylum)'] = df_populations_refugeelike_asylum[
'Total Refugee and people in refugee-like situations'] + df_populations_refugeelike_asylum['Asylum-seekers (asylum)']
# Drop null values
df_populations_refugeelike_asylum = df_populations_refugeelike_asylum.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_populations_refugeelike_asylum.columns:
metadata_dict[column] = populations_refugeelike_asylum_metadata
#################### POPULATIONS_REFUGEELIKE_ORIGIN ####################
# Get the data from the API
populations_refugeelike_origin_data = requests.get(url_populations_refugeelike_origin).json()
# Extract metadata
if 'metadata' in populations_refugeelike_origin_data:
populations_refugeelike_origin_metadata = populations_refugeelike_origin_data['metadata']
else:
populations_refugeelike_origin_metadata = {}
# Build a dataframe
df_populations_refugeelike_origin = pd.DataFrame(populations_refugeelike_origin_data['data']).T
# Select relevant fields
df_populations_refugeelike_origin = df_populations_refugeelike_origin[[
'Total Refugee and people in refugee-like situations', 'Asylum-seekers'
]]
# Rename fields
df_populations_refugeelike_origin.rename(columns={
'Total Refugee and people in refugee-like situations': 'Total refugees who have fled from country',
'Asylum-seekers': 'Asylum-seekers (origin)'
}, inplace=True)
# Add field to add refugees and asylum-seekers
df_populations_refugeelike_origin['Total refugees and asylum-seekers (origin)'] = df_populations_refugeelike_origin[
'Total refugees who have fled from country'] + df_populations_refugeelike_origin['Asylum-seekers (origin)']
# Drop null values
df_populations_refugeelike_origin = df_populations_refugeelike_origin.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_populations_refugeelike_origin.columns:
metadata_dict[column] = populations_refugeelike_origin_metadata
#################### INDICATORS GNI ####################
# Get the data from the API
indicators_gni_data = requests.get(url_indicators_gni).json()
# Extract metadata
if 'metadata' in indicators_gni_data:
indicators_gni_metadata = indicators_gni_data['metadata']
else:
indicators_gni_metadata = {}
# Build a dataframe
df_indicators_gni = pd.DataFrame(indicators_gni_data['data']).T
# Select relevant fields
df_indicators_gni = df_indicators_gni[[
'2015'
]]
# Rename fields
df_indicators_gni.rename(columns={'2015': 'GDP Per Capita'}, inplace=True)
# Drop null values
df_indicators_gni = df_indicators_gni.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_indicators_gni.columns:
metadata_dict[column] = indicators_gni_metadata
#################### PLANS PROGRESS ####################
# Get the data from the API
plans_progress_data = requests.get(url_plans_progress).json()
# Extract metadata
if 'metadata' in plans_progress_data:
plans_progress_metadata = plans_progress_data['metadata']
else:
plans_progress_metadata = {}
# Build a dataframe
df_plans_progress = pd.DataFrame(plans_progress_data['data']).T
# Select relevant fields
df_plans_progress = df_plans_progress[[
'appealFunded', 'revisedRequirements', 'neededFunding'
]]
# Rename fields
df_plans_progress.rename(columns={'appealFunded': 'Appeal funds committed to date',
'revisedRequirements': 'Appeal funds requested',
'neededFunding': 'Appeal funds still needed'}, inplace=True)
df_plans_progress['Appeal percent funded'] = df_plans_progress['Appeal funds committed to date']/df_plans_progress['Appeal funds requested']
# Drop null values
df_plans_progress = df_plans_progress.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_plans_progress.columns:
metadata_dict[column] = plans_progress_metadata
# Add an FTS data as-of date so it can be included in the .csv data dump
df_plans_progress['FTS funding data as-of date'] = plans_progress_data['metadata']['source_data']
######## FUNDING BY DESTINATION COUNTRY ############
#Get the data from the API
funding_dest_country_data = requests.get(url_funding_dest_country).json()
# Extract metadata
if 'metadata' in funding_dest_country_data:
funding_dest_country_metadata = funding_dest_country_data['metadata']
else:
funding_dest_country_metadata = {}
# Build a dataframe
df_funding_dest_country = pd.DataFrame(funding_dest_country_data['data']).T
# Select relevant fields
df_funding_dest_country = df_funding_dest_country[[
'totalFunding'
]]
# Keep only records where totalFunding > 0
df_funding_dest_country = df_funding_dest_country[df_funding_dest_country['totalFunding'] > 0]
# Rename fields
df_funding_dest_country.rename(columns={'totalFunding': 'Humanitarian aid received'},
inplace=True)
# Add field to rank total total population of concern
df_funding_dest_country['Rank of humanitarian aid received'] = df_funding_dest_country[
'Humanitarian aid received'].rank(ascending=False, method='min').astype(int)
# Drop null values
df_funding_dest_country = df_funding_dest_country.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_funding_dest_country.columns:
metadata_dict[column] = funding_dest_country_metadata
################## TOP 5 DONORS TO EACH DESTINATION COUNTRY ###################
#Get the data from the API
funding_dest_donors_data = requests.get(url_funding_dest_donors).json()
# Extract metadata
if 'metadata' in funding_dest_donors_data:
funding_dest_donors_metadata = funding_dest_donors_data['metadata']
else:
funding_dest_donors_metadata = {}
# Build a dataframe
df_funding_dest_donors = json_normalize(funding_dest_donors_data['data']).T
#df_funding_dest_donors = pd.DataFrame(funding_dest_donors_data['data']).T
df_funding_dest_donors.columns = (['Top 5 Donors'])
# Add metadata for each field to overall metadata dictionary
for column in df_funding_dest_donors.columns:
metadata_dict[column] = funding_dest_donors_metadata
#################### NEEDS ####################
# Get the data from the API
needs_data = requests.get(url_needs).json()
# Extract metadata
if 'metadata' in needs_data:
needs_metadata = needs_data['metadata']
else:
needs_metadata = {}
# Build a dataframe
df_needs = pd.DataFrame(needs_data['data']).T
# Exclude rows where country code is missing
df_needs = df_needs.drop('null')
# Select relevant fields
df_needs = df_needs[[
'inNeedTotal', 'inNeedHealth', 'inNeedEducation',
'inNeedFoodSecurity', 'inNeedProtection', 'sourceURL',
'inNeedShelter-CCCM-NFI', 'inNeedWASH', 'sourceType'
]]
# Rename fields
df_needs.rename(columns={'inNeedTotal': 'Total people in need',
'inNeedHealth': 'People in need of health support',
'inNeedEducation': 'Children in need of education',
'inNeedFoodSecurity': 'People who are food insecure',
'inNeedProtection': 'People in need of protection',
'inNeedShelter-CCCM-NFI': 'People in need of shelter',
'inNeedWASH': 'People in need of water, sanitization & hygiene',
'sourceURL': 'Source of needs data',
'sourceType': 'Source type of needs data'
}, inplace=True)
# Add metadata for each field to overall metadata dictionary
for column in df_needs.columns:
metadata_dict[column] = needs_metadata
######## FIND PLACES WITH SIMILAR POPULATIONS TO PEOPLE IN NEED ########
# Get the relateable populations data from .csv
df_relatable_populations = pd.read_csv(relatable_population_path)
df_relatable_populations['Population'] = df_relatable_populations[[
'Population - World Bank (2015)','Population - UNFPA (2016)'
]].max(axis=1)
df_relatable_populations = df_relatable_populations[['City, State, Country','Population']].dropna()
def find_nearest_place_population(reference_value):
if reference_value:
nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]
nearest_population = nearest_row['Population']
else:
nearest_population = 0.00
return nearest_population
def find_nearest_place(reference_value):
if reference_value:
nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]
nearest_place = nearest_row['City, State, Country']
else:
nearest_place = ''
return nearest_place
df_needs['Place with similar population as people in need'] = df_needs['Total people in need'].apply(
find_nearest_place)
# Add metadata
metadata_dict['Place with similar population as people in need'] = {}
df_needs['Population of place with similar population'] = df_needs['Total people in need'].apply(
find_nearest_place_population)
# Add metadata
metadata_dict['Population of place with similar population'] = {}
#################### SAMPLE CLUSTERS ####################
# Build a dataframe
# df_clusters = pd.read_json('sample_clusters.json').T
# df_clusters = df_clusters[['clusters']]
################# COMBINE ALL DATA ##############
# Make a list of all dataframes
all_dataframes = [
df_country_names,
df_populations_refugeelike_asylum,
df_indicators_gni,
df_plans_progress,
df_population,
df_fragile_state,
df_needs,
df_funding_dest_country,
df_funding_dest_donors,
df_displacement_stories,
df_populations_refugeelike_origin
# df_clusters
]
df_final = pd.concat(all_dataframes, axis=1)
# Add calculation for displaced people as a ratio of total population
df_final['Population of concern per 1000 population'] = (df_final['Total population of concern'] / df_final[
'Population'])*1000
# And metadata
metadata_dict['Population of concern per 1000 population'] = {}
metadata_dict['Population of concern per 1000 population']['Calculation'] = '(Total population of concern / Population) * 1000'
# Add calculation for displaced people per million GDP
df_final['Population of concern per million GDP'] = ((df_final['Total population of concern'] * 1000000) / (df_final[
'GDP Per Capita'] * df_final['Population']))
# And metadata
metadata_dict['Population of concern per million GDP'] = {}
metadata_dict['Population of concern per million GDP']['Calculation'] = '(Total population of concern] * 1000000) / (GDP Per Capita * Population)'
# Add field to specify whether country has current humanitarian appeal in FTS
df_final['Country has current appeal'] = df_final['Appeal funds requested'].notnull()
# And metadata
metadata_dict['Country has current appeal'] = {}
metadata_dict['Country has current appeal']['Calculation'] = 'Is Appeal funds requested not null'
# Make the ranked variables ordinal
def get_ordinal_number(value):
try:
value = int(value)
except ValueError:
return value
if value % 100 // 10 != 1:
if value % 10 == 1:
ordval = u"%d%s" % (value, "st")
elif value % 10 == 2:
ordval = u"%d%s" % (value, "nd")
elif value % 10 == 3:
ordval = u"%d%s" % (value, "rd")
else:
ordval = u"%d%s" % (value, "th")
else:
ordval = u"%d%s" % (value, "th")
return ordval
df_final['Rank of total population of concern'] = df_final['Rank of total population of concern'].apply(
get_ordinal_number)
df_final['Rank of humanitarian aid received'] = df_final['Rank of humanitarian aid received'].apply(
get_ordinal_number)
################## STRUCTURE DICTIONARY ##################
# Clean up NaN values
df_final = df_final.fillna('')
# Transform dataframe to dictionary
df_as_dict = df_final.to_dict(orient='index')
# Define field names for each strand
strand_01_fields = ['Appeal funds still needed', 'Appeal funds requested', 'Appeal funds committed to date',
'Appeal percent funded', 'Source of needs data', 'Source type of needs data',
'Total people in need', 'Place with similar population as people in need',
'Population of place with similar population']
strand_02_fields = ['Population of concern per 1000 population', 'Fragile State Index Score',
'Total population of concern',
'IDPs protected/assisted by UNHCR',
'GDP Per Capita',
'Total refugees and asylum-seekers (asylum)',
'Total refugees and asylum-seekers (origin)']
strand_03_fields = ['Humanitarian aid received', 'Appeal funds requested', 'Appeal percent funded',
'Rank of total population of concern', 'Rank of humanitarian aid received']
needs_fields = ['People in need of health support','Children in need of education',
'People who are food insecure','People in need of protection','People in need of shelter',
'People in need of water, sanitization & hygiene']
story_fields = ['storyTitle', 'storySource', 'storyTagLine', 'storyURL']
# For every object, get / group the values by strand
data = {}
for x in df_as_dict.keys():
# Create an empty dict
country_dict = {}
# Populate the dict with those value that don't require nesting
country_dict['Country'] = df_as_dict[x]['Country']
country_dict['Fragile State Index Rank'] = df_as_dict[x]['Fragile State Index Rank']
country_dict['Country has current appeal'] = df_as_dict[x]['Country has current appeal']
# Populate the dict with story fields
story_fields_dict = {}
if df_as_dict[x]['storyURL']:
for field in story_fields:
story_fields_dict[field] = (df_as_dict[x][field])
country_dict['Displacement_story'] = story_fields_dict
# Populate the dict with strand 1 data if the country has a current appeal
strand_01_dict = {}
if df_as_dict[x]['Country has current appeal']:
strand_01_dict['Needs_Data'] = {}
for names_01 in strand_01_fields:
strand_01_dict[names_01] = (df_as_dict[x][names_01])
for name in needs_fields:
if df_as_dict[x][name] != '':
strand_01_dict['Needs_Data'][name] = (df_as_dict[x][name])
country_dict['Strand_01_Needs'] = strand_01_dict
# Populate the dict with strand 2 data
strand_02_dict = {}
for names_02 in strand_02_fields:
strand_02_dict[names_02] = (df_as_dict[x][names_02])
country_dict['Strand_02_People'] = strand_02_dict
# Populate the dict with strand 3 data
strand_03_dict = {}
strand_03_dict['Top 5 donors of humanitarian aid'] = []
for names_03 in strand_03_fields:
strand_03_dict[names_03] = (df_as_dict[x][names_03])
if df_as_dict[x]['Top 5 Donors']:
strand_03_dict['Top 5 donors of humanitarian aid'] = df_as_dict[x]['Top 5 Donors']
country_dict['Strand_03_Aid'] = strand_03_dict
# Add the country dict to the data dict
data[x] = country_dict
# Add World totals
# Create an empty dict
world_dict = {}
# Populate the dict with aggregated strand 1 data
strand_01_dict = {}
strand_01_dict['Needs_Data'] = {}
strand_01_dict['Total people in need'] = df_needs['Total people in need'].sum()
strand_01_dict['Count of current crises with people in need'] = df_needs['Total people in need'].count()
strand_01_dict['Place with similar population as people in need'] = find_nearest_place(
df_needs['Total people in need'].sum()
)
strand_01_dict['Population of place with similar population'] = find_nearest_place_population(
df_needs['Total people in need'].sum()
)
for name in needs_fields:
strand_01_dict['Needs_Data'][name] = df_needs[name].sum()
world_dict['Strand_01_Needs'] = strand_01_dict
# Add the world dict to the data dict
data['WORLD'] = world_dict
# Create the metadata dict
metadata = {}
# Populate the dict with those value that don't require nesting
#metadata['Country'] = metadata_dict['Country']
metadata['Fragile State Index Rank'] = metadata_dict['Fragile State Index Rank']
metadata['Country has current appeal'] = metadata_dict['Country has current appeal']
# Populate the dict with story fields
story_fields_dict = {}
if metadata_dict['storyURL']:
for field in story_fields:
story_fields_dict[field] = (metadata_dict[field])
metadata['Displacement_story'] = story_fields_dict
# Populate the dict with strand 1 data if the country has a current appeal
strand_01_dict = {}
strand_01_dict['Needs_Data'] = {}
for names_01 in strand_01_fields:
strand_01_dict[names_01] = (metadata_dict[names_01])
metadata['Strand_01_Needs'] = strand_01_dict
# Populate the dict with strand 2 data
strand_02_dict = {}
for names_02 in strand_02_fields:
strand_02_dict[names_02] = (metadata_dict[names_02])
metadata['Strand_02_People'] = strand_02_dict
# Populate the dict with strand 3 data
strand_03_dict = {}
strand_03_dict['Top 5 donors of humanitarian aid'] = []
for names_03 in strand_03_fields:
strand_03_dict[names_03] = (metadata_dict[names_03])
if metadata_dict['Top 5 Donors']:
strand_03_dict['Top 5 donors of humanitarian aid'] = metadata_dict['Top 5 Donors']
metadata['Strand_03_Aid'] = strand_03_dict
# At the higher level, structure the json with 'data' and 'metadata'
final_json = {
'data': data,
'metadata': metadata
}
return final_json, metadata, df_final
def run():
print 'Pulling and merging data'
final_json, metadata, final_csv = merge_data()
print 'Writing Combined JSON file'
with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.json'), 'w') as outfile:
json.dump(final_json, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)
print 'Writing Combined JSON metadata file'
with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker_metadata.json'), 'w') as outfile:
json.dump(metadata, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)
print 'Writing Combined CSV file'
final_csv.to_csv(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.csv'), index_label='CountryCode', encoding='utf-8')
if __name__ == "__main__":
run()
|
mit
| -8,832,100,329,281,189,000 | -191,655,108,420,183,740 | 38.07482 | 154 | 0.647347 | false |
anthonyalmarza/trex
|
tests/test_pipelining.py
|
1
|
4322
|
# import sys
from twisted.trial import unittest
from twisted.internet import defer
# from twisted.python import log
import trex
from trex import redis
from .mixins import REDIS_HOST, REDIS_PORT
# log.startLogging(sys.stdout)
class InspectableTransport(object):
def __init__(self, transport):
self.original_transport = transport
self.write_history = []
def __getattr__(self, method):
if method == "write":
def write(data, *args, **kwargs):
self.write_history.append(data)
return self.original_transport.write(data, *args, **kwargs)
return write
return getattr(self.original_transport, method)
class TestRedisConnections(unittest.TestCase):
@defer.inlineCallbacks
def _assert_simple_sets_on_pipeline(self, db):
pipeline = yield db.pipeline()
self.assertTrue(pipeline.pipelining)
# Hook into the transport so we can inspect what is happening
# at the protocol level.
pipeline.transport = InspectableTransport(pipeline.transport)
pipeline.set("trex:test_pipeline", "foo")
pipeline.set("trex:test_pipeline", "bar")
pipeline.set("trex:test_pipeline2", "zip")
yield pipeline.execute_pipeline()
self.assertFalse(pipeline.pipelining)
result = yield db.get("trex:test_pipeline")
self.assertEqual(result, "bar")
result = yield db.get("trex:test_pipeline2")
self.assertEqual(result, "zip")
# Make sure that all SET commands were sent in a single pipelined write.
write_history = pipeline.transport.write_history
lines_in_first_write = write_history[0].split("\n")
sets_in_first_write = sum([1 for w in lines_in_first_write if "SET" in w])
self.assertEqual(sets_in_first_write, 3)
@defer.inlineCallbacks
def _wait_for_lazy_connection(self, db):
# For lazy connections, wait for the internal deferred to indicate
# that the connection is established.
yield db._connected
@defer.inlineCallbacks
def test_Connection(self):
db = yield redis.Connection(REDIS_HOST, REDIS_PORT, reconnect=False)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_ConnectionDB1(self):
db = yield redis.Connection(REDIS_HOST, REDIS_PORT, dbid=1,
reconnect=False)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_ConnectionPool(self):
db = yield redis.ConnectionPool(REDIS_HOST, REDIS_PORT, poolsize=2,
reconnect=False)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_lazyConnection(self):
db = redis.lazyConnection(REDIS_HOST, REDIS_PORT, reconnect=False)
yield self._wait_for_lazy_connection(db)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_lazyConnectionPool(self):
db = redis.lazyConnectionPool(REDIS_HOST, REDIS_PORT, reconnect=False)
yield self._wait_for_lazy_connection(db)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_ShardedConnection(self):
hosts = ["%s:%s" % (REDIS_HOST, REDIS_PORT)]
db = yield redis.ShardedConnection(hosts, reconnect=False)
try:
yield db.pipeline()
raise self.failureException("Expected sharding to disallow pipelining")
except NotImplementedError, e:
self.assertTrue("not supported" in str(e).lower())
yield db.disconnect()
@defer.inlineCallbacks
def test_ShardedConnectionPool(self):
hosts = ["%s:%s" % (REDIS_HOST, REDIS_PORT)]
db = yield redis.ShardedConnectionPool(hosts, reconnect=False)
try:
yield db.pipeline()
raise self.failureException("Expected sharding to disallow pipelining")
except NotImplementedError, e:
self.assertTrue("not supported" in str(e).lower())
yield db.disconnect()
|
mit
| 7,847,216,167,272,910,000 | -2,177,217,913,688,288,300 | 32.246154 | 83 | 0.64484 | false |
adedayo/intellij-community
|
python/lib/Lib/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py
|
71
|
8207
|
import os
import sys
import shutil
from optparse import make_option
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.management.base import CommandError, NoArgsCommand
from django.contrib.staticfiles import finders
class Command(NoArgsCommand):
"""
Command that allows to copy or symlink media files from different
locations to the settings.STATIC_ROOT.
"""
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive',
default=True, help="Do NOT prompt the user for input of any "
"kind."),
make_option('-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more."),
make_option('-n', '--dry-run', action='store_true', dest='dry_run',
default=False, help="Do everything except modify the filesystem."),
make_option('-l', '--link', action='store_true', dest='link',
default=False, help="Create a symbolic link to each file instead of copying."),
make_option('--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'."),
)
help = "Collect static files from apps and other locations in a single location."
def handle_noargs(self, **options):
symlink = options['link']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
self.copied_files = set()
self.symlinked_files = set()
self.unmodified_files = set()
self.destination_storage = get_storage_class(settings.STATICFILES_STORAGE)()
try:
self.destination_storage.path('')
except NotImplementedError:
self.destination_local = False
else:
self.destination_local = True
if symlink:
if sys.platform == 'win32':
raise CommandError("Symlinking is not supported by this "
"platform (%s)." % sys.platform)
if not self.destination_local:
raise CommandError("Can't symlink to a remote destination.")
# Warn before doing anything more.
if options.get('interactive'):
confirm = raw_input("""
You have requested to collate static files and collect them at the destination
location as specified in your settings file.
This will overwrite existing files.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """)
if confirm != 'yes':
raise CommandError("Static files build cancelled.")
# Use ints for file times (ticket #14665)
os.stat_float_times(False)
for finder in finders.get_finders():
for source, prefix, storage in finder.list(ignore_patterns):
self.copy_file(source, prefix, storage, **options)
verbosity = int(options.get('verbosity', 1))
actual_count = len(self.copied_files) + len(self.symlinked_files)
unmodified_count = len(self.unmodified_files)
if verbosity >= 1:
self.stdout.write("\n%s static file%s %s to '%s'%s.\n"
% (actual_count, actual_count != 1 and 's' or '',
symlink and 'symlinked' or 'copied',
settings.STATIC_ROOT,
unmodified_count and ' (%s unmodified)'
% unmodified_count or ''))
def copy_file(self, source, prefix, source_storage, **options):
"""
Attempt to copy (or symlink) ``source`` to ``destination``,
returning True if successful.
"""
source_path = source_storage.path(source)
try:
source_last_modified = source_storage.modified_time(source)
except (OSError, NotImplementedError):
source_last_modified = None
if prefix:
destination = '/'.join([prefix, source])
else:
destination = source
symlink = options['link']
dry_run = options['dry_run']
verbosity = int(options.get('verbosity', 1))
if destination in self.copied_files:
if verbosity >= 2:
self.stdout.write("Skipping '%s' (already copied earlier)\n"
% destination)
return False
if destination in self.symlinked_files:
if verbosity >= 2:
self.stdout.write("Skipping '%s' (already linked earlier)\n"
% destination)
return False
if self.destination_storage.exists(destination):
try:
destination_last_modified = \
self.destination_storage.modified_time(destination)
except (OSError, NotImplementedError):
# storage doesn't support ``modified_time`` or failed.
pass
else:
destination_is_link = os.path.islink(
self.destination_storage.path(destination))
if destination_last_modified >= source_last_modified:
if (not symlink and not destination_is_link):
if verbosity >= 2:
self.stdout.write("Skipping '%s' (not modified)\n"
% destination)
self.unmodified_files.add(destination)
return False
if dry_run:
if verbosity >= 2:
self.stdout.write("Pretending to delete '%s'\n"
% destination)
else:
if verbosity >= 2:
self.stdout.write("Deleting '%s'\n" % destination)
self.destination_storage.delete(destination)
if symlink:
destination_path = self.destination_storage.path(destination)
if dry_run:
if verbosity >= 1:
self.stdout.write("Pretending to symlink '%s' to '%s'\n"
% (source_path, destination_path))
else:
if verbosity >= 1:
self.stdout.write("Symlinking '%s' to '%s'\n"
% (source_path, destination_path))
try:
os.makedirs(os.path.dirname(destination_path))
except OSError:
pass
os.symlink(source_path, destination_path)
self.symlinked_files.add(destination)
else:
if dry_run:
if verbosity >= 1:
self.stdout.write("Pretending to copy '%s' to '%s'\n"
% (source_path, destination))
else:
if self.destination_local:
destination_path = self.destination_storage.path(destination)
try:
os.makedirs(os.path.dirname(destination_path))
except OSError:
pass
shutil.copy2(source_path, destination_path)
if verbosity >= 1:
self.stdout.write("Copying '%s' to '%s'\n"
% (source_path, destination_path))
else:
source_file = source_storage.open(source)
self.destination_storage.save(destination, source_file)
if verbosity >= 1:
self.stdout.write("Copying %s to %s\n"
% (source_path, destination))
self.copied_files.add(destination)
return True
|
apache-2.0
| 3,720,365,188,949,467,600 | 4,265,319,630,085,376,500 | 42.887701 | 91 | 0.532472 | false |
ganeshgore/myremolab
|
server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment/experiment16/server_config.py
|
242
|
1525
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg'''
|
bsd-2-clause
| 1,161,479,225,985,893,400 | -6,655,863,340,427,296,000 | 32.911111 | 116 | 0.681967 | false |
stshine/servo
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/py.py
|
817
|
1763
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
mpl-2.0
| 2,235,072,754,526,293,000 | -5,213,545,445,697,407,000 | 25.313433 | 66 | 0.551333 | false |
rhertzog/django
|
django/contrib/admin/models.py
|
72
|
5618
|
from __future__ import unicode_literals
import json
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import NoReverseMatch, reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.text import get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
use_in_migrations = True
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
if isinstance(change_message, list):
change_message = json.dumps(change_message)
self.model.objects.create(
user_id=user_id,
content_type_id=content_type_id,
object_id=smart_text(object_id),
object_repr=object_repr[:200],
action_flag=action_flag,
change_message=change_message,
)
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(
_('action time'),
default=timezone.now,
editable=False,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
# Translators: 'repr' means representation (https://docs.python.org/3/library/functions.html#repr)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
# change_message is either a string or a JSON structure
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.is_addition():
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.is_change():
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.get_change_message(),
}
elif self.is_deletion():
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_change_message(self):
"""
If self.change_message is a JSON structure, interpret it as a change
string, properly translated.
"""
if self.change_message and self.change_message[0] == '[':
try:
change_message = json.loads(self.change_message)
except ValueError:
return self.change_message
messages = []
for sub_message in change_message:
if 'added' in sub_message:
if sub_message['added']:
sub_message['added']['name'] = ugettext(sub_message['added']['name'])
messages.append(ugettext('Added {name} "{object}".').format(**sub_message['added']))
else:
messages.append(ugettext('Added.'))
elif 'changed' in sub_message:
sub_message['changed']['fields'] = get_text_list(
sub_message['changed']['fields'], ugettext('and')
)
if 'name' in sub_message['changed']:
sub_message['changed']['name'] = ugettext(sub_message['changed']['name'])
messages.append(ugettext('Changed {fields} for {name} "{object}".').format(
**sub_message['changed']
))
else:
messages.append(ugettext('Changed {fields}.').format(**sub_message['changed']))
elif 'deleted' in sub_message:
sub_message['deleted']['name'] = ugettext(sub_message['deleted']['name'])
messages.append(ugettext('Deleted {name} "{object}".').format(**sub_message['deleted']))
change_message = ' '.join(msg[0].upper() + msg[1:] for msg in messages)
return change_message or ugettext('No fields changed.')
else:
return self.change_message
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
|
bsd-3-clause
| 1,979,075,996,683,884,000 | -3,648,475,473,741,753,000 | 36.959459 | 108 | 0.583126 | false |
silenceli/nova
|
nova/objects/network.py
|
6
|
10160
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova import utils
network_opts = [
cfg.BoolOpt('share_dhcp_address',
default=False,
help='DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE '
'NETWORK. If True in multi_host mode, all compute hosts '
'share the same dhcp address. The same IP address used '
'for DHCP will be added on each nova-network node which '
'is only visible to the vms on the same host.'),
cfg.IntOpt('network_device_mtu',
help='DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE '
'NETWORK. MTU setting for network interface.'),
]
CONF = cfg.CONF
CONF.register_opts(network_opts)
# TODO(berrange): Remove NovaObjectDictCompat
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added in_use_on_host()
# Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'label': fields.StringField(),
'injected': fields.BooleanField(),
'cidr': fields.IPV4NetworkField(nullable=True),
'cidr_v6': fields.IPV6NetworkField(nullable=True),
'multi_host': fields.BooleanField(),
'netmask': fields.IPV4AddressField(nullable=True),
'gateway': fields.IPV4AddressField(nullable=True),
'broadcast': fields.IPV4AddressField(nullable=True),
'netmask_v6': fields.IPV6AddressField(nullable=True),
'gateway_v6': fields.IPV6AddressField(nullable=True),
'bridge': fields.StringField(nullable=True),
'bridge_interface': fields.StringField(nullable=True),
'dns1': fields.IPAddressField(nullable=True),
'dns2': fields.IPAddressField(nullable=True),
'vlan': fields.IntegerField(nullable=True),
'vpn_public_address': fields.IPAddressField(nullable=True),
'vpn_public_port': fields.IntegerField(nullable=True),
'vpn_private_address': fields.IPAddressField(nullable=True),
'dhcp_start': fields.IPV4AddressField(nullable=True),
'rxtx_base': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'priority': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'mtu': fields.IntegerField(nullable=True),
'dhcp_server': fields.IPAddressField(nullable=True),
'enable_dhcp': fields.BooleanField(),
'share_address': fields.BooleanField(),
}
@staticmethod
def _convert_legacy_ipv6_netmask(netmask):
"""Handle netmask_v6 possibilities from the database.
Historically, this was stored as just an integral CIDR prefix,
but in the future it should be stored as an actual netmask.
Be tolerant of either here.
"""
try:
prefix = int(netmask)
return netaddr.IPNetwork('1::/%i' % prefix).netmask
except ValueError:
pass
try:
return netaddr.IPNetwork(netmask).netmask
except netaddr.AddrFormatError:
raise ValueError(_('IPv6 netmask "%s" must be a netmask '
'or integral prefix') % netmask)
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'mtu' in primitive:
del primitive['mtu']
if 'enable_dhcp' in primitive:
del primitive['enable_dhcp']
if 'dhcp_server' in primitive:
del primitive['dhcp_server']
if 'share_address' in primitive:
del primitive['share_address']
@staticmethod
def _from_db_object(context, network, db_network):
for field in network.fields:
db_value = db_network[field]
if field is 'netmask_v6' and db_value is not None:
db_value = network._convert_legacy_ipv6_netmask(db_value)
if field is 'mtu' and db_value is None:
db_value = CONF.network_device_mtu
if field is 'dhcp_server' and db_value is None:
db_value = db_network['gateway']
if field is 'share_address' and CONF.share_dhcp_address:
db_value = CONF.share_dhcp_address
network[field] = db_value
network._context = context
network.obj_reset_changes()
return network
@obj_base.remotable_classmethod
def get_by_id(cls, context, network_id, project_only='allow_none'):
db_network = db.network_get(context, network_id,
project_only=project_only)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_uuid(cls, context, network_uuid):
db_network = db.network_get_by_uuid(context, network_uuid)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_cidr(cls, context, cidr):
db_network = db.network_get_by_cidr(context, cidr)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def associate(cls, context, project_id, network_id=None, force=False):
db.network_associate(context, project_id, network_id=network_id,
force=force)
@obj_base.remotable_classmethod
def disassociate(cls, context, network_id, host=False, project=False):
db.network_disassociate(context, network_id, host, project)
@obj_base.remotable_classmethod
def in_use_on_host(cls, context, network_id, host):
return db.network_in_use_on_host(context, network_id, host)
def _get_primitive_changes(self):
changes = {}
for key, value in self.obj_get_changes().items():
if isinstance(value, netaddr.IPAddress):
changes[key] = str(value)
else:
changes[key] = value
return changes
@obj_base.remotable
def create(self, context):
updates = self._get_primitive_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
db_network = db.network_create_safe(context, updates)
self._from_db_object(context, self, db_network)
@obj_base.remotable
def destroy(self, context):
db.network_delete_safe(context, self.id)
self.deleted = True
self.obj_reset_changes(['deleted'])
@obj_base.remotable
def save(self, context):
updates = self._get_primitive_changes()
if 'netmask_v6' in updates:
# NOTE(danms): For some reason, historical code stores the
# IPv6 netmask as just the CIDR mask length, so convert that
# back here before saving for now.
updates['netmask_v6'] = netaddr.IPNetwork(
updates['netmask_v6']).netmask
set_host = 'host' in updates
if set_host:
db.network_set_host(context, self.id, updates.pop('host'))
if updates:
db_network = db.network_update(context, self.id, updates)
elif set_host:
db_network = db.network_get(context, self.id)
else:
db_network = None
if db_network is not None:
self._from_db_object(context, self, db_network)
class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_project()
# Version 1.2: Network <= version 1.2
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Network'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.2',
}
@obj_base.remotable_classmethod
def get_all(cls, context, project_only='allow_none'):
db_networks = db.network_get_all(context, project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_uuids(cls, context, network_uuids, project_only='allow_none'):
db_networks = db.network_get_all_by_uuids(context, network_uuids,
project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_networks = db.network_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id, associate=True):
db_networks = db.project_get_networks(context, project_id,
associate=associate)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
|
apache-2.0
| 436,595,354,953,930,200 | -5,489,369,235,244,943,000 | 40.133603 | 78 | 0.612894 | false |
MCGarvey/django-calaccess-raw-data
|
calaccess_raw/models/campaign.py
|
15
|
138941
|
from __future__ import unicode_literals
from calaccess_raw import fields
from django.utils.encoding import python_2_unicode_compatible
from .base import CalAccessBaseModel
@python_2_unicode_compatible
class CvrSoCd(CalAccessBaseModel):
"""
Cover page for a statement of organization creation or termination
form filed by a slate-mailer organization or recipient committee.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
acct_opendt = fields.DateTimeField(
db_column="ACCT_OPENDT",
null=True,
help_text='This field is undocumented',
)
ACTIVITY_LEVEL_CHOICES = (
("CI", "City"),
("CO", "County"),
("ST", "State"),
("", "Unknown"),
)
actvty_lvl = fields.CharField(
max_length=2,
db_column="ACTVTY_LVL",
blank=True,
choices=ACTIVITY_LEVEL_CHOICES,
verbose_name="Activity level",
help_text="Organization's level of activity"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bank_adr1 = fields.CharField(
max_length=55,
db_column="BANK_ADR1",
blank=True,
help_text='This field is undocumented',
)
bank_adr2 = fields.CharField(
max_length=55,
db_column="BANK_ADR2",
blank=True,
help_text='This field is undocumented',
)
bank_city = fields.CharField(
max_length=30,
db_column="BANK_CITY",
blank=True,
help_text='This field is undocumented',
)
bank_nam = fields.CharField(
max_length=200,
db_column="BANK_NAM",
blank=True,
help_text='This field is undocumented',
)
bank_phon = fields.CharField(
max_length=20,
db_column="BANK_PHON",
blank=True,
help_text='This field is undocumented',
)
bank_st = fields.CharField(
max_length=2,
db_column="BANK_ST",
blank=True,
help_text='This field is undocumented',
)
bank_zip4 = fields.CharField(
max_length=10,
db_column="BANK_ZIP4",
blank=True,
help_text='This field is undocumented',
)
brdbase_cb = fields.CharField(
max_length=1,
db_column="BRDBASE_CB",
blank=True,
help_text='This field is undocumented',
)
city = fields.CharField(
max_length=30,
db_column="CITY",
blank=True,
help_text='This field is undocumented',
)
cmte_email = fields.CharField(
max_length=60,
db_column="CMTE_EMAIL",
blank=True,
help_text='This field is undocumented',
)
cmte_fax = fields.CharField(
max_length=20,
db_column="CMTE_FAX",
blank=True,
help_text='This field is undocumented',
)
com82013id = fields.CharField(
max_length=9,
db_column="COM82013ID",
blank=True,
help_text='This field is undocumented',
)
com82013nm = fields.CharField(
max_length=200,
db_column="COM82013NM",
blank=True,
help_text='This field is undocumented',
)
com82013yn = fields.CharField(
max_length=1,
db_column="COM82013YN",
blank=True,
help_text='This field is undocumented',
)
control_cb = fields.CharField(
max_length=1,
db_column="CONTROL_CB",
blank=True,
help_text='This field is undocumented',
)
county_act = fields.CharField(
max_length=20,
db_column="COUNTY_ACT",
blank=True,
help_text='This field is undocumented',
)
county_res = fields.CharField(
max_length=20,
db_column="COUNTY_RES",
blank=True,
help_text='This field is undocumented',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('BMC', 'Ballot measure committee'),
('CAO', 'Candidate/officeholder'),
('COM', 'Committee'),
('CTL', 'Controlled committee'),
('RCP', 'Recipient committee'),
('SMO', 'Slate-mailer organization'),
)
entity_cd = fields.CharField(
max_length=3,
db_column="ENTITY_CD",
blank=True,
choices=ENTITY_CODE_CHOICES,
verbose_name="Entity code"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=9,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
filer_namf = fields.CharField(
max_length=45,
db_column="FILER_NAMF",
blank=True,
verbose_name="Filer first name"
)
filer_naml = fields.CharField(
max_length=200,
db_column="FILER_NAML",
blank=True,
verbose_name="Filer last name"
)
filer_nams = fields.CharField(
max_length=10,
db_column="FILER_NAMS",
blank=True,
verbose_name="Filer name suffix"
)
filer_namt = fields.CharField(
max_length=10,
db_column="FILER_NAMT",
blank=True,
verbose_name="Filer name title"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F400', 'Form 400 (Statement of organization, \
slate mailer organization)'),
('F402', 'Form 402 (Statement of termination, \
slate mailer organization'),
('F410', 'Form 410 (Statement of organization, recipient committee)'),
)
form_type = fields.CharField(
max_length=4,
db_column="FORM_TYPE",
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
genpurp_cb = fields.CharField(
max_length=1,
db_column="GENPURP_CB",
blank=True,
help_text='This field is undocumented',
)
gpc_descr = fields.CharField(
max_length=300,
db_column="GPC_DESCR",
blank=True,
help_text='This field is undocumented',
)
mail_city = fields.CharField(
max_length=30,
db_column="MAIL_CITY",
blank=True,
help_text='This field is undocumented',
)
mail_st = fields.CharField(
max_length=2,
db_column="MAIL_ST",
blank=True,
help_text='This field is undocumented',
)
mail_zip4 = fields.CharField(
max_length=10,
db_column="MAIL_ZIP4",
blank=True,
help_text='This field is undocumented',
)
phone = fields.CharField(
max_length=20,
db_column="PHONE",
blank=True,
help_text='This field is undocumented',
)
primfc_cb = fields.CharField(
max_length=1,
db_column="PRIMFC_CB",
blank=True,
help_text='This field is undocumented',
)
qualfy_dt = fields.DateTimeField(
db_column="QUALFY_DT",
null=True,
verbose_name="Date qualified",
help_text="Date qualified as an organization"
)
qual_cb = fields.CharField(
max_length=1,
db_column="QUAL_CB",
blank=True,
help_text='This field is undocumented',
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
report_num = fields.CharField(
max_length=3,
db_column="REPORT_NUM",
blank=True,
help_text='This field is undocumented',
)
rpt_date = fields.DateTimeField(
db_column="RPT_DATE",
null=True,
help_text='This field is undocumented',
)
smcont_qualdt = fields.DateTimeField(
db_column="SMCONT_QUALDT",
null=True,
help_text='This field is undocumented',
)
sponsor_cb = fields.CharField(
max_length=1,
db_column="SPONSOR_CB",
blank=True,
help_text='This field is undocumented',
)
st = fields.CharField(
max_length=2,
db_column="ST",
blank=True,
help_text='This field is undocumented',
)
surplusdsp = fields.CharField(
max_length=90,
db_column="SURPLUSDSP",
blank=True,
help_text='This field is undocumented',
)
term_date = fields.DateTimeField(
db_column="TERM_DATE",
null=True,
help_text='This field is undocumented',
)
tres_city = fields.CharField(
max_length=30,
db_column="TRES_CITY",
blank=True,
verbose_name="Treasurer's city"
)
tres_namf = fields.CharField(
max_length=45,
db_column="TRES_NAMF",
blank=True,
verbose_name="Treasurer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column="TRES_NAML",
blank=True,
verbose_name="Treasurer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column="TRES_NAMS",
blank=True,
verbose_name="Treasurer's name suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column="TRES_NAMT",
blank=True,
verbose_name="Treasurer's name title"
)
tres_phon = fields.CharField(
max_length=20,
db_column="TRES_PHON",
blank=True,
verbose_name="Treasurer's phone number"
)
tres_st = fields.CharField(
max_length=2,
db_column="TRES_ST",
blank=True,
verbose_name="Treasurer's street",
)
tres_zip4 = fields.CharField(
max_length=10,
db_column="TRES_ZIP4",
blank=True,
help_text="Treasurer's ZIP Code"
)
zip4 = fields.CharField(
max_length=10,
db_column="ZIP4",
blank=True,
help_text='This field is undocumented',
)
class Meta:
app_label = 'calaccess_raw'
db_table = "CVR_SO_CD"
verbose_name = 'CVR_SO_CD'
verbose_name_plural = 'CVR_SO_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class Cvr2SoCd(CalAccessBaseModel):
"""
Additional names and committees information included on the second page
of a statement of organization creation form filed
by a slate-mailer organization or recipient committee.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("CVR2", "CVR2"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F400', 'Form 400 (Statement of organization, \
slate mailer organization)'),
('F410', 'Form 410 (Statement of organization, recipient committee)'),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
db_column='FORM_TYPE',
max_length=4,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('ATH', 'Authorizing individual'),
('ATR', 'Assistant treasurer'),
('BMN', 'BMN (Unknown)'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officeholder'),
('COM', 'Committee'),
('CTL', 'Controlled committee'),
('OFF', 'Officer'),
('POF', 'Principal officer'),
('PRO', 'Proponent'),
('SPO', 'Sponsor'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=3,
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
enty_naml = fields.CharField(
db_column='ENTY_NAML',
max_length=194,
blank=True,
help_text="Entity's business name or last name if the entity is an \
individual"
)
enty_namf = fields.CharField(
db_column='ENTY_NAMF',
max_length=34,
blank=True,
help_text="Entity's first name if the entity is an individual"
)
enty_namt = fields.CharField(
db_column='ENTY_NAMT',
max_length=9,
blank=True,
help_text="Entity's name prefix or title if the entity is an \
individual"
)
enty_nams = fields.CharField(
db_column='ENTY_NAMS',
max_length=10,
blank=True,
help_text="Entity's name suffix if the entity is an individual"
)
item_cd = fields.CharField(
db_column='ITEM_CD',
max_length=4,
blank=True,
help_text="Section of the Statement of Organization this \
itemization relates to. See CAL document for the definition \
of legal values for this column."
)
mail_city = fields.CharField(
db_column='MAIL_CITY',
max_length=25,
blank=True,
help_text="City portion of the entity's mailing address"
)
mail_st = fields.CharField(
db_column='MAIL_ST',
max_length=4,
blank=True,
help_text="State portion of the entity's mailing address"
)
mail_zip4 = fields.CharField(
db_column='MAIL_ZIP4',
max_length=10,
blank=True,
help_text="Zipcode portion of the entity's mailing address"
)
day_phone = fields.CharField(
db_column='DAY_PHONE',
max_length=20,
blank=True,
help_text="Entity's daytime phone number"
)
fax_phone = fields.CharField(
db_column='FAX_PHONE',
max_length=20,
blank=True,
help_text="Entity's fax number"
)
email_adr = fields.CharField(
db_column='EMAIL_ADR',
max_length=40,
blank=True,
help_text="Email address. Not contained in current forms."
)
cmte_id = fields.IntegerField(
db_column='CMTE_ID',
blank=True,
null=True,
verbose_name="Committee ID",
help_text="Entity's identification number"
)
ind_group = fields.CharField(
db_column='IND_GROUP',
max_length=87,
blank=True,
help_text="Industry group/affiliation description"
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=4,
blank=True,
help_text="Code that identifies the office being sought. See \
CAL document for a list of valid codes."
)
offic_dscr = fields.CharField(
db_column='OFFIC_DSCR',
max_length=40,
blank=True,
help_text="Office sought description used if the office sought code \
(OFFICE_CD) equals other (OTH)."
)
juris_cd = fields.CharField(
db_column='JURIS_CD',
max_length=4,
blank=True,
help_text="Office jurisdiction code. See CAL document for a \
list of legal values."
)
juris_dscr = fields.CharField(
db_column='JURIS_DSCR',
max_length=40,
blank=True,
help_text="Office jurisdiction description provided if the \
jurisdiction code (JURIS_CD) equals other (OTH)."
)
dist_no = fields.CharField(
db_column='DIST_NO',
max_length=4,
blank=True,
help_text="Office district number for Senate, Assembly, and Board \
of Equalization districts."
)
off_s_h_cd = fields.CharField(
db_column='OFF_S_H_CD',
max_length=4,
blank=True,
help_text="Office sought/held code. Legal values are 'S' for sought \
and 'H' for held."
)
non_pty_cb = fields.CharField(
db_column='NON_PTY_CB',
max_length=4,
blank=True,
help_text="Non-partisan check-box. Legal values are 'X' and null."
)
party_name = fields.CharField(
db_column='PARTY_NAME',
max_length=63,
blank=True,
help_text="Name of party (if partisan)"
)
bal_num = fields.CharField(
db_column='BAL_NUM',
max_length=7,
blank=True,
help_text="Ballot measure number or letter"
)
bal_juris = fields.CharField(
db_column='BAL_JURIS',
max_length=40,
blank=True,
help_text="Jurisdiction of ballot measure"
)
sup_opp_cd = fields.CharField(
db_column='SUP_OPP_CD',
max_length=4,
blank=True,
help_text="Support/oppose code (S/O). Legal values are 'S' for \
support and 'O' for oppose."
)
year_elect = fields.CharField(
db_column='YEAR_ELECT',
max_length=4,
blank=True,
help_text="Year of election"
)
pof_title = fields.CharField(
db_column='POF_TITLE',
max_length=44,
blank=True,
help_text="Position/title of the principal officer"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR2_SO_CD'
verbose_name = 'CVR2_SO_CD'
verbose_name_plural = 'CVR2_SO_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class CvrCampaignDisclosureCd(CalAccessBaseModel):
"""
Cover page information from campaign disclosure forms
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amendexp_1 = fields.CharField(
max_length=100,
db_column='AMENDEXP_1',
blank=True,
help_text='Amendment explanation line 1'
)
amendexp_2 = fields.CharField(
max_length=100,
db_column='AMENDEXP_2',
blank=True,
help_text="Amendment explanation line 2"
)
amendexp_3 = fields.CharField(
max_length=100,
db_column='AMENDEXP_3',
blank=True,
help_text="Amendment explanation line 3"
)
assoc_cb = fields.CharField(
max_length=4,
db_column='ASSOC_CB',
blank=True,
help_text="Association Interests info included check-box. Legal \
values are 'X' and null."
)
assoc_int = fields.CharField(
max_length=90,
db_column='ASSOC_INT',
blank=True,
help_text="Description of association interests"
)
bal_id = fields.CharField(
max_length=9,
db_column='BAL_ID',
blank=True,
help_text="This field is undocumented"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=4,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
brdbase_yn = fields.CharField(
max_length=1,
db_column='BRDBASE_YN',
blank=True,
help_text="Broad Base Committee (yes/no) check box. Legal \
values are 'Y' or 'N'."
)
# bus_adr1 = fields.CharField(
# max_length=55, db_column='BUS_ADR1', blank=True
# )
# bus_adr2 = fields.CharField(
# max_length=55, db_column='BUS_ADR2', blank=True
# )
bus_city = fields.CharField(
max_length=30,
db_column='BUS_CITY',
blank=True,
help_text="Employer/business address city"
)
bus_inter = fields.CharField(
max_length=40,
db_column='BUS_INTER',
blank=True,
help_text="Employer/business interest description"
)
bus_name = fields.CharField(
max_length=200,
db_column='BUS_NAME',
blank=True,
help_text="Name of employer/business. Applies to the form 461."
)
bus_st = fields.CharField(
max_length=2,
db_column='BUS_ST',
blank=True,
help_text="Employer/business address state"
)
bus_zip4 = fields.CharField(
max_length=10,
db_column='BUS_ZIP4',
blank=True,
help_text="Employer/business address ZIP Code"
)
busact_cb = fields.CharField(
max_length=10,
db_column='BUSACT_CB',
blank=True,
help_text="Business activity info included check-box. Valid values \
are 'X' and null"
)
busactvity = fields.CharField(
max_length=90,
db_column='BUSACTVITY',
blank=True,
help_text="Business activity description"
)
# cand_adr1 = fields.CharField(
# max_length=55, db_column='CAND_ADR1', blank=True
# )
# cand_adr2 = fields.CharField(
# max_length=55, db_column='CAND_ADR2', blank=True
# )
cand_city = fields.CharField(
max_length=30,
db_column='CAND_CITY',
blank=True,
help_text='Candidate/officeholder city'
)
cand_email = fields.CharField(
max_length=60,
db_column='CAND_EMAIL',
blank=True,
help_text='Candidate/officeholder email. This field \
is not contained on the forms.'
)
cand_fax = fields.CharField(
max_length=20,
db_column='CAND_FAX',
blank=True,
help_text='Candidate/officeholder fax. This field \
is not contained on the forms.'
)
cand_id = fields.CharField(
max_length=9,
db_column='CAND_ID',
blank=True,
help_text="This field is not documented"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text='Candidate/officeholder first name'
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name. Applies to forms \
460, 465, and 496."
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's name suffix"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's prefix or title"
)
cand_phon = fields.CharField(
max_length=20,
db_column='CAND_PHON',
blank=True,
help_text='Candidate/officeholder phone'
)
cand_st = fields.CharField(
max_length=4,
db_column='CAND_ST',
blank=True,
help_text="Candidate/officeholder's state"
)
cand_zip4 = fields.CharField(
max_length=10,
db_column='CAND_ZIP4',
blank=True,
help_text="Candidate/officeholder's ZIP Code"
)
cmtte_id = fields.CharField(
max_length=9,
db_column='CMTTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee ID (Filer_id) of recipient Committee who's \
campaign statement is attached. This field applies to the form 401."
)
cmtte_type = fields.CharField(
max_length=1,
db_column='CMTTE_TYPE',
blank=True,
verbose_name="Committee type",
help_text="Type of Recipient Committee. Applies to the 450/460."
)
control_yn = fields.IntegerField(
null=True,
db_column='CONTROL_YN',
blank=True,
help_text="Controlled Committee (yes/no) check box. Legal values \
are 'Y' or 'N'."
)
dist_no = fields.CharField(
max_length=4,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
elect_date = fields.DateTimeField(
null=True,
db_column='ELECT_DATE',
blank=True,
help_text="Date of the General Election"
)
emplbus_cb = fields.CharField(
max_length=4,
db_column='EMPLBUS_CB',
blank=True,
help_text="Employer/Business Info included check-box. Legal \
values are 'X' or null. Applies to the Form 461."
)
employer = fields.CharField(
max_length=200,
db_column='EMPLOYER',
blank=True,
help_text="Employer. This field is most likely unused."
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('BMC', 'Ballot measure committee'),
('CAO', 'Candidate/officeholder'),
('COM', 'Committee'),
('CTL', 'Controlled committee'),
('IND', 'Person (Spending > $5,000)'),
('MDI', 'Major donor/independent expenditure'),
('OTH', 'Other'),
('PTY', 'Political party'),
('RCP', 'Recipient committee'),
('SCC', 'Small contributor committee'),
('SMO', 'Slate mailer organization'),
)
entity_cd = fields.CharField(
max_length=4,
db_column='ENTITY_CD',
blank=True,
choices=ENTITY_CODE_CHOICES,
verbose_name='entity code'
)
file_email = fields.CharField(
max_length=60,
db_column='FILE_EMAIL',
blank=True,
help_text="Filer's email address"
)
# filer_adr1 = fields.CharField(
# max_length=55, db_column='FILER_ADR1', blank=True
# )
# filer_adr2 = fields.CharField(
# max_length=55, db_column='FILER_ADR2', blank=True
# )
filer_city = fields.CharField(
max_length=30,
db_column='FILER_CITY',
blank=True,
help_text="Filer's city"
)
filer_fax = fields.CharField(
max_length=20,
db_column='FILER_FAX',
blank=True,
help_text="Filer's fax"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=15,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
filer_namf = fields.CharField(
max_length=45,
db_column='FILER_NAMF',
blank=True,
help_text="Filer's first name, if an individual"
)
filer_naml = fields.CharField(
max_length=200,
db_column='FILER_NAML',
help_text="The committee's or organization's name or if an \
individual the filer's last name."
)
filer_nams = fields.CharField(
max_length=10,
db_column='FILER_NAMS',
blank=True,
help_text="Filer's suffix, if an individual"
)
filer_namt = fields.CharField(
max_length=10,
db_column='FILER_NAMT',
blank=True,
help_text="Filer's title or prefix, if an individual"
)
filer_phon = fields.CharField(
max_length=20,
db_column='FILER_PHON',
blank=True,
help_text="Filer phone number"
)
filer_st = fields.CharField(
max_length=4,
db_column='FILER_ST',
blank=True,
help_text="Filer state"
)
filer_zip4 = fields.CharField(
max_length=10,
db_column='FILER_ZIP4',
blank=True,
help_text="Filer ZIP Code"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F511', 'Form 511 (Paid spokesman report)'),
('F900', 'Form 900 (Public employee\'s retirement board, \
candidate campaign statement)'),
('F425', 'Form 425 (Semi-annual statement of no activity, \
non-controlled recipient committee)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F401', 'Form 401 (Slate mailer organization campaign statement)'),
('F498', 'Form 498 (Late payment report, slate mailer organizations'),
('F465', 'Form 465 (Supplemental independent expenditure report'),
('F496', 'Form 496 (Late independent expenditure report)'),
('F461', 'Form 461 (Independent expenditure committee \
and major donor committee campaign statement)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F497', 'Form 497 (Late contribution report)')
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=4,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
from_date = fields.DateTimeField(
null=True,
db_column='FROM_DATE',
blank=True,
help_text="Reporting period from date"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction description if the field JURIS_CD is \
set to city (CIT), county (CTY), local (LOC), or other \
(OTH)."
)
late_rptno = fields.CharField(
max_length=30,
db_column='LATE_RPTNO',
blank=True,
help_text="Identifying Report Number used to distinguish multiple \
reports filed during the same filing period. For example, \
this field allows for multiple form 497s to be filed on the \
same day."
)
# mail_adr1 = fields.CharField(
# max_length=55, db_column='MAIL_ADR1', blank=True
# )
# mail_adr2 = fields.CharField(
# max_length=55, db_column='MAIL_ADR2', blank=True
# )
mail_city = fields.CharField(
max_length=30,
db_column='MAIL_CITY',
blank=True,
help_text="Filer mailing address city"
)
mail_st = fields.CharField(
max_length=4,
db_column='MAIL_ST',
blank=True,
help_text="Filer mailing address state"
)
mail_zip4 = fields.CharField(
max_length=10,
db_column='MAIL_ZIP4',
blank=True,
help_text="Filer mailing address ZIP Code"
)
occupation = fields.CharField(
max_length=60,
db_column='OCCUPATION',
blank=True,
help_text="Occupation. This field is most likely unused."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office Sought/Held Code. Legal values are "S" for \
sought and "H" for held.'
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description if the field OFFICE_CD is set \
to other (OTH)"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Code that identifies the office being sought"
)
other_cb = fields.CharField(
max_length=1,
db_column='OTHER_CB',
blank=True,
help_text="Other entity interests info included check-box. Legal \
values are 'X' and null."
)
other_int = fields.CharField(
max_length=90,
db_column='OTHER_INT',
blank=True,
help_text="Other entity interests description"
)
primfrm_yn = fields.CharField(
max_length=1,
db_column='PRIMFRM_YN',
blank=True,
help_text="Primarily Formed Committee (yes/no) checkbox. Legal \
values are 'Y' or 'N'."
)
REC_TYPE_CHOICES = (
("CVR", "Cover"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
report_num = fields.CharField(
max_length=3,
db_column='REPORT_NUM',
help_text="Amendment number, as reported by the filer \
Report Number 000 represents an original filing. 001-999 are amendments."
)
reportname = fields.CharField(
max_length=3,
db_column='REPORTNAME',
blank=True,
help_text="Attached campaign disclosure statement type. Legal \
values are 450, 460, and 461."
)
rpt_att_cb = fields.CharField(
max_length=4,
db_column='RPT_ATT_CB',
blank=True,
help_text="Committee Report Attached check-box. Legal values \
are 'X' or null. This field applies to the form 401."
)
rpt_date = fields.DateTimeField(
db_column='RPT_DATE',
null=True,
help_text="Date this report was filed, according to the filer"
)
rptfromdt = fields.DateTimeField(
null=True,
db_column='RPTFROMDT',
blank=True,
help_text="Attached campaign disclosure statement - Period from \
date."
)
rptthrudt = fields.DateTimeField(
null=True,
db_column='RPTTHRUDT',
blank=True,
help_text="Attached campaign disclosure statement - Period \
through date."
)
selfemp_cb = fields.CharField(
max_length=1,
db_column='SELFEMP_CB',
blank=True,
help_text="Self employed check-box"
)
sponsor_yn = fields.IntegerField(
null=True,
db_column='SPONSOR_YN',
blank=True,
help_text="Sponsored Committee (yes/no) checkbox. Legal values \
are 'Y' or 'N'."
)
stmt_type = fields.CharField(
max_length=2,
db_column='STMT_TYPE',
blank=True,
help_text='Type of statement'
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/oppose code. Legal values are "S" for support \
or "O" for oppose.'
)
thru_date = fields.DateTimeField(
null=True,
db_column='THRU_DATE',
blank=True,
help_text='Reporting period through date'
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="City portion of the treasurer or responsible \
officer's street address."
)
tres_email = fields.CharField(
max_length=60,
db_column='TRES_EMAIL',
blank=True,
help_text="Treasurer or responsible officer's email"
)
tres_fax = fields.CharField(
max_length=20,
db_column='TRES_FAX',
blank=True,
help_text="Treasurer or responsible officer's fax number"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
tres_phon = fields.CharField(
max_length=20,
db_column='TRES_PHON',
blank=True,
help_text="Treasurer or responsible officer's phone number"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer or responsible officer's state"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer or responsible officer's ZIP Code"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR_CAMPAIGN_DISCLOSURE_CD'
verbose_name = 'CVR_CAMPAIGN_DISCLOSURE_CD'
verbose_name_plural = 'CVR_CAMPAIGN_DISCLOSURE_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class Cvr2CampaignDisclosureCd(CalAccessBaseModel):
"""
Record used to carry additional names for the campaign
disclosure forms below.
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Commitee identification number, when the entity \
is a committee"
)
control_yn = fields.IntegerField(
null=True,
db_column='CONTROL_YN',
blank=True,
help_text='Controlled Committee (yes/no) checkbox. Legal values \
are "Y" or "N".'
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('ATR', 'Assistant treasurer'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officeholder'),
('CTL', 'Controlled committee'),
('COM', 'Committee'),
('FIL', 'Candidate filing/ballot fees'),
('OFF', 'Officer (Responsible)'),
('PEX', 'PEX (Unknown)'),
('POF', 'Principal officer'),
('PRO', 'Proponent'),
('RCP', 'Recipient committee'),
('RDP', 'RDP (Unknown)'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
# enty_adr1 = fields.CharField(
# max_length=55, db_column='ENTY_ADR1', blank=True
# )
# enty_adr2 = fields.CharField(
# max_length=55, db_column='ENTY_ADR2', blank=True
# )
enty_city = fields.CharField(
max_length=30,
db_column='ENTY_CITY',
blank=True,
help_text="Entity city"
)
enty_email = fields.CharField(
max_length=60,
db_column='ENTY_EMAIL',
blank=True,
help_text="Entity email address"
)
enty_fax = fields.CharField(
max_length=20,
db_column='ENTY_FAX',
blank=True,
help_text="Entity fax number"
)
enty_namf = fields.CharField(
max_length=45,
db_column='ENTY_NAMF',
blank=True,
help_text="Entity first name, if an individual"
)
enty_naml = fields.CharField(
max_length=200,
db_column='ENTY_NAML',
blank=True,
help_text="Entity name, or last name if an individual"
)
enty_nams = fields.CharField(
max_length=10,
db_column='ENTY_NAMS',
blank=True,
help_text="Entity suffix, if an individual"
)
enty_namt = fields.CharField(
max_length=10,
db_column='ENTY_NAMT',
blank=True,
help_text="Entity prefix or title, if an individual"
)
enty_phon = fields.CharField(
max_length=20,
db_column='ENTY_PHON',
blank=True,
help_text="Entity phone number"
)
enty_st = fields.CharField(
max_length=2,
db_column='ENTY_ST',
blank=True,
help_text="Entity state"
)
enty_zip4 = fields.CharField(
max_length=10,
db_column='ENTY_ZIP4',
blank=True,
help_text="Entity ZIP code"
)
f460_part = fields.CharField(
max_length=2,
db_column='F460_PART',
blank=True,
help_text="Part of 460 cover page coded on ths cvr2 record. Legal \
values are 3, 4a, 4b, 5a, 5b, or 6."
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F425', 'Form 425 (Semi-annual statement of no activity, \
non-controlled committees)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F465', 'Form 465 (Supplemental independent expenditure report)'),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=4,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
# mail_adr1 = fields.CharField(
# max_length=55, db_column='MAIL_ADR1', blank=True
# )
# mail_adr2 = fields.CharField(
# max_length=55, db_column='MAIL_ADR2', blank=True
# )
mail_city = fields.CharField(
max_length=30,
db_column='MAIL_CITY',
blank=True,
help_text="Filer's mailing city"
)
mail_st = fields.CharField(
max_length=2,
db_column='MAIL_ST',
blank=True,
help_text="Filer's mailing state"
)
mail_zip4 = fields.CharField(
max_length=10,
db_column='MAIL_ZIP4',
blank=True,
help_text="Filer's mailing ZIP Code"
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office sought/held code. Indicates if the candidate is an \
incumbent. Legal values are "S" for sought and "H" for held.'
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Code that identifies the office being sought"
)
REC_TYPE_CHOICES = (
("CVR2", "Cover, Page 2"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/Oppose (S/O) code for the ballot measure. \
Legal values are "S" for support or "O" for oppose.'
)
title = fields.CharField(
max_length=90,
db_column='TITLE',
blank=True,
help_text="Official title of filing officer. Applies to the form 465."
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
verbose_name = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
verbose_name_plural = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class RcptCd(CalAccessBaseModel):
"""
Receipts schedules for the following forms.
Form 460 (Recipient Committee Campaign Statement)
Schedules A, C, I, and A-1.
Form 401 (Slate Mailer Organization Campaign Statement) Schedule A.
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amount = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMOUNT',
help_text="Amount Received (Monetary, Inkkind, Promise)"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to a transaction identifier of a parent \
record"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction of ballot measure. Used on the Form 401 \
Schedule A"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name. Used on the Form 401 Schedule A"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter. Used on the Form 401 \
Schedule A"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder's first name. Used on the Form \
401 Schedule A"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name. Used on the Form \
401 Schedule A"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's name suffix. Used on the Form \
401 Schedule A"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's name prefix or title. Used on \
the Form 401 Schedule A"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee Identification number"
)
# ctrib_adr1 = fields.CharField(
# max_length=55,
# db_column='CTRIB_ADR1',
# blank=True,
# default="",
# help_text="First line of the contributor's street address"
# )
# ctrib_adr2 = fields.CharField(
# max_length=55,
# db_column='CTRIB_ADR2',
# blank=True,
# help_text="Second line of the contributor's street address"
# )
ctrib_city = fields.CharField(
max_length=30,
db_column='CTRIB_CITY',
blank=True,
help_text="Contributor's City"
)
ctrib_dscr = fields.CharField(
max_length=90,
db_column='CTRIB_DSCR',
blank=True,
help_text="Description of goods/services received"
)
ctrib_emp = fields.CharField(
max_length=200,
db_column='CTRIB_EMP',
blank=True,
help_text="Employer"
)
ctrib_namf = fields.CharField(
max_length=45,
db_column='CTRIB_NAMF',
blank=True,
help_text="Contributor's First Name"
)
ctrib_naml = fields.CharField(
max_length=200,
db_column='CTRIB_NAML',
help_text="Contributor's last name or business name"
)
ctrib_nams = fields.CharField(
max_length=10,
db_column='CTRIB_NAMS',
blank=True,
help_text="Contributor's Suffix"
)
ctrib_namt = fields.CharField(
max_length=10,
db_column='CTRIB_NAMT',
blank=True,
help_text="Contributor's Prefix or Title"
)
ctrib_occ = fields.CharField(
max_length=60,
db_column='CTRIB_OCC',
blank=True,
help_text="Occupation"
)
ctrib_self = fields.CharField(
max_length=1,
db_column='CTRIB_SELF',
blank=True,
help_text="Self Employed Check-box"
)
ctrib_st = fields.CharField(
max_length=2,
db_column='CTRIB_ST',
blank=True,
help_text="Contributor's State"
)
ctrib_zip4 = fields.CharField(
max_length=10,
db_column='CTRIB_ZIP4',
blank=True,
help_text="Contributor's ZIP+4"
)
cum_oth = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_OTH',
blank=True,
help_text="Cumulative Other (Sched A, A-1)"
)
cum_ytd = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_YTD',
blank=True,
help_text="Cumulative year to date amount (Form 460 Schedule A \
and Form 401 Schedule A, A-1)"
)
date_thru = fields.DateField(
null=True,
db_column='DATE_THRU',
blank=True,
help_text="End of date range for items received"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="Office District Number (used on F401A)"
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
("", "None"),
("0", "0 (Unknown)"),
("BNM", "Ballot measure\'s name/title"),
("COM", "Committee"),
("IND", "Individual"),
("OFF", "Officer (Responsible)"),
("OTH", "Other"),
("PTY", "Political party"),
("RCP", "Recipient commmittee"),
("SCC", "Small contributor committee"),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
help_text="Entity code: Values [CMO|RCP|IND|OTH]",
choices=ENTITY_CODE_CHOICES
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F900', 'Form 900 (Public employee\'s retirement board, \
candidate campaign statement): Schedule A'),
('A-1', 'Form 460: Schedule A-1, contributions transferred \
to special election committees'),
('E530', 'Form E530 (Issue advocacy receipts)'),
('F496P3', 'Form 496 (Late independent expenditure): \
Part 3, contributions > $100 received'),
('F401A', 'Form 401 (Slate mailer organization): Schedule A, \
payments received'),
('I', 'Form 460 (Recipient committee campaign statement): \
Schedule I, miscellanous increases to cash'),
('C', 'Form 460 (Recipient committee campaign statement): \
Schedule C, non-monetary contributions received'),
('A', 'Form 460 (Recipient committee campaign statement): \
Schedule A, monetary contributions received')
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=9,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
int_rate = fields.CharField(
max_length=9,
db_column='INT_RATE',
blank=True,
help_text="This field is undocumented"
)
# intr_adr1 = fields.CharField(
# max_length=55,
# db_column='INTR_ADR1',
# blank=True,
# help_text="First line of the intermediary's street address."
# )
# intr_adr2 = fields.CharField(
# max_length=55,
# db_column='INTR_ADR2',
# blank=True,
# help_text="Second line of the Intermediary's street address."
# )
intr_city = fields.CharField(
max_length=30,
db_column='INTR_CITY',
blank=True,
help_text="Intermediary's City"
)
intr_cmteid = fields.CharField(
max_length=9,
db_column='INTR_CMTEID',
blank=True,
help_text="This field is undocumented"
)
intr_emp = fields.CharField(
max_length=200,
db_column='INTR_EMP',
blank=True,
help_text="Intermediary's Employer"
)
intr_namf = fields.CharField(
max_length=45,
db_column='INTR_NAMF',
blank=True,
help_text="Intermediary's First Name"
)
intr_naml = fields.CharField(
max_length=200,
db_column='INTR_NAML',
blank=True,
help_text="Intermediary's Last Name"
)
intr_nams = fields.CharField(
max_length=10,
db_column='INTR_NAMS',
blank=True,
help_text="Intermediary's Suffix"
)
intr_namt = fields.CharField(
max_length=10,
db_column='INTR_NAMT',
blank=True,
help_text="Intermediary's Prefix or Title"
)
intr_occ = fields.CharField(
max_length=60,
db_column='INTR_OCC',
blank=True,
help_text="Intermediary's Occupation"
)
intr_self = fields.CharField(
max_length=1,
db_column='INTR_SELF',
blank=True,
help_text="Intermediary's self employed check box"
)
intr_st = fields.CharField(
max_length=2,
db_column='INTR_ST',
blank=True,
help_text="Intermediary's state"
)
intr_zip4 = fields.CharField(
max_length=10,
db_column='INTR_ZIP4',
blank=True,
help_text="Intermediary's zip code"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code. See the CAL document for the \
list of legal values. Used on Form 401 Schedule A"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction Description (used on F401A)"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag (Date/Amount are informational only)"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text="Office Sought/Held Code. Used on the Form 401 \
Schedule A. Legal values are 'S' for sought and 'H' for \
held"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office Sought Description (used on F401A)"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
help_text="Code that identifies the office being sought. See the \
CAL document for a list of valid codes. Used on the \
Form 401 Schedule A)"
)
rcpt_date = fields.DateField(
db_column='RCPT_DATE',
null=True,
help_text="Date item received"
)
REC_TYPE_CHOICES = (
("E530", "E530"),
("RCPT", "RCPT"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support/oppose code. Legal values are 'S' for support \
or 'O' for oppose. Used on Form 401 Sechedule A. \
Transaction identifier - permanent value unique to this item"
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
tran_type = fields.CharField(
max_length=1,
db_column='TRAN_TYPE',
blank=True,
help_text="Transaction Type: Values T- third party | F Forgiven \
loan | R Returned (Negative amount)"
)
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="First line of the treasurer or responsible officer's \
# street address"
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Second line of the treasurer or responsible officer's \
# street address"
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="City portion of the treasurer or responsible officer's \
street address"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="State portion of the treasurer or responsible officer's \
address"
)
tres_zip4 = fields.CharField(
null=True,
max_length=10,
blank=True,
db_column='TRES_ZIP4',
help_text="Zip code portion of the treasurer or responsible officer's \
address"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text="Related item on other schedule has same transaction \
identifier. 'X' indicates this condition is true"
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related record is included on Sched 'B2' or 'F'"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'RCPT_CD'
verbose_name = 'RCPT_CD'
verbose_name_plural = 'RCPT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class Cvr3VerificationInfoCd(CalAccessBaseModel):
"""
Cover page verification information from campaign disclosure forms
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("CVR3", "CVR3"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F400', 'Form 400 (Statement of organization, \
slate mailer organization)'),
('F401', 'Form 401 (Slate mailer organization campaign statement)'),
('F402', 'Form 402 (Statement of termination, \
slate mailer organization'),
('F410', 'Form 410 (Statement of organization, recipient committee)'),
('F425', 'Form 425 (Semi-annual statement of no activity, \
non-controlled committees)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F461', 'Form 461 (Independent expenditure and major donor \
committee campaign statement)'),
('F465', 'Form 465 (Supplemental independent expenditure report)'),
('F511', 'Form 511 (Paid spokesman report)'),
('F900', 'Form 900 (Public employee\'s retirement board, \
candidate campaign statement)'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES,
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('0', '0 (Unknown)'),
('ATR', 'Assistant treasurer'),
('BBB', 'BBB (Unknown)'),
('COA', 'COA (Unknown)'),
('CAO', 'Candidate/officeholder'),
('CON', 'State controller'),
('MAI', 'MAI (Unknown)'),
('MDI', 'Major donor/independent expenditure'),
('OFF', 'Officer (Responsible)'),
('POF', 'Principal officer'),
('PRO', 'Proponent'),
('RCP', 'Recipient committee'),
('SPO', 'Sponsor'),
('TRE', 'Treasurer'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=3,
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
sig_date = fields.DateField(
verbose_name='signed date',
db_column='SIG_DATE',
blank=True,
null=True,
help_text='date when signed',
)
sig_loc = fields.CharField(
verbose_name='signed location',
db_column='SIG_LOC',
max_length=39,
blank=True,
help_text='city and state where signed',
)
sig_naml = fields.CharField(
verbose_name='last name',
db_column='SIG_NAML',
max_length=56,
blank=True,
help_text='last name of the signer',
)
sig_namf = fields.CharField(
verbose_name='first name',
db_column='SIG_NAMF',
max_length=45,
blank=True,
help_text='first name of the signer',
)
sig_namt = fields.CharField(
verbose_name='title',
db_column='SIG_NAMT',
max_length=10,
blank=True,
help_text='title of the signer',
)
sig_nams = fields.CharField(
verbose_name='suffix',
db_column='SIG_NAMS',
max_length=8,
blank=True,
help_text='suffix of the signer',
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR3_VERIFICATION_INFO_CD'
verbose_name = 'CVR3_VERIFICATION_INFO_CD'
verbose_name_plural = 'CVR3_VERIFICATION_INFO_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class LoanCd(CalAccessBaseModel):
"""
Loans received and made
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to transaction identifier of parent record"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('COM', "Committee"),
("IND", "Person (spending > $5,000)"),
("OTH", "Other"),
("PTY", "Political party"),
('RCP', 'Recipient committee'),
('SCC', 'Small contributor committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name="entity code",
choices=ENTITY_CODE_CHOICES,
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('B1', 'Form 460 (Recipient committee campaign statement): \
Schedule B1'),
('B2', 'Form 460 (Recipient committee campaign statement): \
Schedule B2'),
('B3', 'Form 460 (Recipient committee campaign statement): \
Schedule B3'),
('H', 'Form 460 (Recipient committee campaign statement): \
Schedule H'),
('H1', 'Form 460 (Recipient committee campaign statement): \
Schedule H1'),
('H2', 'Form 460 (Recipient committee campaign statement): \
Schedule H2'),
('H3', 'Form 460 (Recipient committee campaign statement): \
Schedule H3'),
)
form_type = fields.CharField(
max_length=2,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
# intr_adr1 = fields.CharField(
# max_length=55, db_column='INTR_ADR1', blank=True
# )
# intr_adr2 = fields.CharField(
# max_length=55, db_column='INTR_ADR2', blank=True
# )
intr_city = fields.CharField(
max_length=30,
db_column='INTR_CITY',
blank=True,
help_text="Intermediary's city"
)
intr_namf = fields.CharField(
max_length=45,
db_column='INTR_NAMF',
blank=True,
help_text="Intermediary's first name"
)
intr_naml = fields.CharField(
max_length=200,
db_column='INTR_NAML',
blank=True,
help_text="Intermediary's last name"
)
intr_nams = fields.CharField(
max_length=10,
db_column='INTR_NAMS',
blank=True,
help_text="Intermediary's suffix"
)
intr_namt = fields.CharField(
max_length=10,
db_column='INTR_NAMT',
blank=True,
help_text="Intermediary's title or prefix"
)
intr_st = fields.CharField(
max_length=2,
db_column='INTR_ST',
blank=True,
help_text="Intermediary's state"
)
intr_zip4 = fields.CharField(
max_length=10,
db_column='INTR_ZIP4',
blank=True,
help_text="Intermediary's ZIP Code"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
lndr_namf = fields.CharField(
max_length=45,
db_column='LNDR_NAMF',
blank=True,
help_text="Lender's first name"
)
lndr_naml = fields.CharField(
max_length=200,
db_column='LNDR_NAML',
help_text="Lender's last name or business name"
)
lndr_nams = fields.CharField(
max_length=10,
db_column='LNDR_NAMS',
blank=True,
help_text="Lender's suffix"
)
lndr_namt = fields.CharField(
max_length=10,
db_column='LNDR_NAMT',
blank=True,
help_text="Lender's title or prefix"
)
# loan_adr1 = fields.CharField(
# max_length=55, db_column='LOAN_ADR1', blank=True
# )
# loan_adr2 = fields.CharField(
# max_length=55, db_column='LOAN_ADR2', blank=True
# )
loan_amt1 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT1',
blank=True,
help_text="Repaid or forgiven amount; Original loan amount. The \
content of this column varies based on the \
schedule/part that the record applies to. See the CAL \
document for a description of the value of this field."
)
loan_amt2 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT2',
blank=True,
help_text="Outstanding Principal; unpaid balance. The content of \
this column varies based on the schedule/part that the \
record applies to. See the CAL document for a \
description of the value of this field."
)
loan_amt3 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT3',
blank=True,
help_text="Interest Paid; Unpaid interest; Interest received. The \
content of this column varies based on the \
schedule/part that the record applies to. See the CAL \
document for a description of the value of this field."
)
loan_amt4 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT4',
blank=True,
help_text="Cumulative Amount/Other. The content of this column \
varies based on the schedule/part that the record \
applies to. See the CAL document for a description of the \
value of this field."
)
loan_amt5 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT5',
blank=True,
help_text="This field is undocumented"
)
loan_amt6 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT6',
blank=True,
help_text="This field is undocumented"
)
loan_amt7 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT7',
blank=True,
help_text="This field is undocumented"
)
loan_amt8 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT8',
blank=True,
help_text="This field is undocumented"
)
loan_city = fields.CharField(
max_length=30,
db_column='LOAN_CITY',
blank=True,
help_text="Lender's city"
)
loan_date1 = fields.DateField(
db_column='LOAN_DATE1',
null=True,
help_text="Date the loan was made or recieved. The content of this \
column varies based on the schedule/part that the \
record applies to. See the CAL document for a description of the value."
)
loan_date2 = fields.DateField(
null=True,
db_column='LOAN_DATE2',
blank=True,
help_text="Date repaid/forgiven; date loan due. The content of this \
column varies based on the schedule/part that the \
record applies to. See the CAL document for a \
description of the value of this field."
)
loan_emp = fields.CharField(
max_length=200,
db_column='LOAN_EMP',
blank=True,
help_text="Loan employer. Applies to the Form 460 Schedule B \
Part 1."
)
loan_occ = fields.CharField(
max_length=60,
db_column='LOAN_OCC',
blank=True,
help_text="Loan occupation. Applies to the Form 460 Schedule B \
Part 1."
)
loan_rate = fields.CharField(
max_length=30,
db_column='LOAN_RATE',
blank=True,
help_text="Interest Rate. The content of this column varies based \
on the schedule/part that the record applies to. See the \
CAL document for a description of the value of this field."
)
loan_self = fields.CharField(
max_length=1,
db_column='LOAN_SELF',
blank=True,
help_text="Self-employed checkbox"
)
loan_st = fields.CharField(
max_length=2,
db_column='LOAN_ST',
blank=True,
help_text="Lender's state"
)
loan_type = fields.CharField(
max_length=3,
db_column='LOAN_TYPE',
blank=True,
help_text="Type of loan"
)
loan_zip4 = fields.CharField(
max_length=10,
db_column='LOAN_ZIP4',
blank=True,
help_text="Lender's ZIP Code"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
REC_TYPE_CHOICES = (
("LOAN", "LOAN"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="Treasurer or responsible officer's city"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's title or prefix"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer or responsible officer's street address"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer or responsible officer's ZIP Code"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text='Related item on other schedule has same transaction \
identifier. "X" indicates this condition is true.'
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related record is included on Form 460 Schedule 'A' or 'E'"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'LOAN_CD'
verbose_name = 'LOAN_CD'
verbose_name_plural = 'LOAN_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class S401Cd(CalAccessBaseModel):
"""
This table contains Form 401 (Slate Mailer Organization) payment and other
disclosure schedule (F401B, F401B-1, F401C, F401D) information.
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S401", "S401"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F401B', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B, payments made'),
('F401B-1', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B-1, payments made by agent or independent contractor'),
('F401C', 'Form 401 (Slate mailer organization campaign statement): \
Schedule C, persons receiving $1,000 or more'),
('F401D', 'Form 401 (Slate mailer organization campaign statement): \
Schedule D, candidates or measures supported or opposed with < $100 payment'),
)
form_type = fields.CharField(
max_length=7,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
agent_naml = fields.CharField(
max_length=200,
db_column='AGENT_NAML',
blank=True,
help_text="Agent or independent contractor's last name"
)
agent_namf = fields.CharField(
max_length=45,
db_column='AGENT_NAMF',
blank=True,
help_text="Agent or independent contractor's first name"
)
agent_namt = fields.CharField(
max_length=200,
db_column='AGENT_NAMT',
blank=True,
help_text="Agent or independent contractor's title or prefix"
)
agent_nams = fields.CharField(
max_length=10,
db_column='AGENT_NAMS',
blank=True,
help_text="Agent or independent contractor's suffix"
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
blank=True,
help_text="Payee's business name or last name if the payee is an \
individual"
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text="Payee's first name if the payee is an individual"
)
payee_namt = fields.CharField(
max_length=10,
db_column='PAYEE_NAMT',
blank=True,
help_text="Payee's title or prefix if the payee is an individual"
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text="Payee's suffix if the payee is an individual"
)
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text="Payee's city address"
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text="Payee state address"
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text="Payee ZIP Code"
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Amount (Sched F401B, 401B-1, 401C)"
)
aggregate = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AGGREGATE',
help_text="Aggregate year-to-date amount (Sched 401C)"
)
expn_dscr = fields.CharField(
max_length=90,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder suffix"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Code that identifies the office being sought"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text="Office sought/held code"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/oppose code. Legal values are "S" for support \
or "O" for oppose. Used on Form 401.'
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in the TEXT record"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back reference to transaction identifier of parent record"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S401_CD'
verbose_name = 'S401_CD'
verbose_name_plural = 'S401_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class ExpnCd(CalAccessBaseModel):
"""
Campaign expenditures from a variety of forms
"""
agent_namf = fields.CharField(
max_length=45,
db_column='AGENT_NAMF',
blank=True,
help_text="Agent of Ind. Contractor's First name"
)
agent_naml = fields.CharField(
max_length=200,
db_column='AGENT_NAML',
blank=True,
help_text="Agent of Ind. Contractor's Last name (Sched G)"
)
agent_nams = fields.CharField(
max_length=10,
db_column='AGENT_NAMS',
blank=True,
help_text="Agent of Ind. Contractor's Suffix"
)
agent_namt = fields.CharField(
max_length=10,
db_column='AGENT_NAMT',
blank=True,
help_text="Agent of Ind. Contractor's Prefix or Title"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amount = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMOUNT',
help_text="Amount of Payment"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to a Tran_ID of a 'parent' record"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot Measure Name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot Number or Letter"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate's First name"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate's Last name"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate's Suffix"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate's Prefix or Title"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee ID (If [COM|RCP] & no ID#, Treas info Req.)"
)
cum_oth = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_OTH',
blank=True,
help_text="Cumulative / 'Other' (No Cumulative on Sched E & G)"
)
cum_ytd = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_YTD',
blank=True,
help_text="Cumulative / Year-to-date amount \
(No Cumulative on Sched E & G)"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="Office District Number (Req. if Juris_Cd=[SEN|ASM|BOE]"
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('0', '0 (Unknown)'),
('COM', 'Committee'),
('RCP', 'Recipient Committee'),
('IND', 'Person (spending > $5,000)'),
('OTH', 'Other'),
('PTY', 'Political party'),
('SCC', 'Small contributor committee'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officeholder'),
('OFF', 'Officer'),
('PTH', 'PTH (Unknown)'),
('RFD', 'RFD (Unknown)'),
('MBR', 'MBR (Unknown)'),
)
entity_cd = fields.CharField(
choices=ENTITY_CODE_CHOICES,
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
)
expn_chkno = fields.CharField(
max_length=20,
db_column='EXPN_CHKNO',
blank=True,
help_text="Check Number (Optional)"
)
expn_code = fields.CharField(
max_length=3,
db_column='EXPN_CODE',
blank=True,
help_text="Expense Code - Values: (Refer to list in Overview) \
Note: CTB & IND need explanation & listing on Sched D TRC & TRS require \
explanation."
)
expn_date = fields.DateField(
null=True,
db_column='EXPN_DATE',
blank=True,
help_text="Date of Expenditure (Note: Date not on Sched E & G)"
)
expn_dscr = fields.CharField(
max_length=400,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of Expense and/or Description/explanation"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('D', 'Form 460 (Recipient committee campaign statement): \
Schedule D, summary of expenditure supporting/opposing other candidates, \
measures and committees'),
('E', 'Form 460 (Recipient committee campaign statement): \
Schedule E, payments made'),
('G', 'Form 460 (Recipient committee campaign statement): \
Schedule G, payments made by agent of independent contractor'),
('F450P5', 'Form 450 (Recipient Committee Campaign Statement \
Short Form): Part 5, payments made'),
('F461P5', 'Form 461 (Independent expenditure and major donor \
committee campaign statement): Part 5, contributions and expenditures made'),
('F465P3', 'Form 465 (Supplemental independent expenditure \
report): Part 3, independent expenditures made'),
('F900', 'Form 900 (Public Employee\'s Retirement Board Candidate \
Campaign Statement), Schedule B, expenditures made'),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=6,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
g_from_e_f = fields.CharField(
max_length=1,
db_column='G_FROM_E_F',
blank=True,
help_text="Back Reference from Sched G to Sched 'E' or 'F'?"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office Jurisdiction Code Values: STW=Statewide; \
SEN=Senate District; ASM=Assembly District; \
BOE=Board of Equalization District; \
CIT=City; CTY=County; LOC=Local; OTH=Other"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction Description \
(Req. if Juris_Cd=[CIT|CTY|LOC|OTH]"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo Amount? (Date/Amount are informational only)"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record."
)
OFF_S_H_CD_CHOICES = (
('H', 'Office Held'),
('S', 'Office Sought'),
('A', 'A - Unknown'),
('8', '8 - Unknown'),
('O', 'O - Unknown'),
)
off_s_h_cd = fields.CharField(
choices=OFF_S_H_CD_CHOICES,
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text="Office Sought/Held Code: H=Held; S=Sought"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office Sought Description (Req. if Office_Cd=OTH)"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
help_text="Office Sought (See table of code in Overview)"
)
# payee_adr1 = fields.CharField(
# max_length=55,
# db_column='PAYEE_ADR1',
# blank=True,
# help_text="Address of Payee"
# )
# payee_adr2 = fields.CharField(
# max_length=55,
# db_column='PAYEE_ADR2',
# blank=True,
# help_text="Optional 2nd line of Address"
# )
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text="Payee City"
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text="Payee's First name"
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
blank=True,
help_text="Payee's Last name"
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text="Payee's Suffix"
)
payee_namt = fields.CharField(
max_length=10,
db_column='PAYEE_NAMT',
blank=True,
help_text="Payee's Prefix or Title"
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text="State code"
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text="Zip+4"
)
REC_TYPE_CHOICES = (
("EXPN", "EXPN"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support/Oppose? Values: S; O (F450, F461)"
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="Treasurer Street 1(Req if [COM|RCP] & no ID#)"
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Treasurer Street 2"
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="Treasurer City"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer's First name (Req if [COM|RCP] & no ID#)"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer's Last name (Req if [COM|RCP] & no ID#)"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer's Suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer's Prefix or Title"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer State"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer ZIP+4"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text="X = Related item on other Sched has same Tran_ID"
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related item is included on Sched 'C' or 'H2'"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'EXPN_CD'
verbose_name = 'EXPN_CD'
verbose_name_plural = 'EXPN_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class F495P2Cd(CalAccessBaseModel):
"""
F495 Supplemental Preelection Campaign Statement
It's attatchment to the forms below
F450 Recipient Committee Campaign Statement Short Form
F460 Recipient Committee Campaign Statement
Form 495 is for use by a recipient committee that
makes contributions totaling $10,000 or more in
connection with an election for which the committee
is not required to file regular preelection reports.
Form 495 is filed as an attachment to a campaign
disclosure statement (Form 450 or 460). On the
Form 450 or 460, the committee will report all
contributions received and expenditures made since
its last report.
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('F495', 'F495'),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
elect_date = fields.DateField(
db_column='ELECT_DATE',
blank=True,
null=True,
help_text="Date of the General Election This date will be the same \
as on the filing's cover (CVR) record."
)
electjuris = fields.CharField(
db_column='ELECTJURIS',
max_length=40,
help_text="Jurisdiction of the election"
)
contribamt = fields.FloatField(
db_column='CONTRIBAMT',
help_text="Contribution amount (For the period of 6 months prior to \
17 days before the election)"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'F495P2_CD'
verbose_name = 'F495P2_CD'
verbose_name_plural = 'F495P2_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class DebtCd(CalAccessBaseModel):
"""
Form 460 (Recipient Committee Campaign Statement)
Schedule (F) Accrued Expenses (Unpaid Bills) records
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amt_incur = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMT_INCUR',
help_text='Amount incurred this period',
)
amt_paid = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMT_PAID',
help_text='Amount paid this period.'
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text='Back reference to a transaction identifier \
of a parent record.'
)
beg_bal = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='BEG_BAL',
help_text='Outstanding balance at beginning of period',
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text='Committee identification number',
)
end_bal = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='END_BAL',
help_text='Outstanding balance at close of this period',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('BNM', 'Ballot measure\'s name/title'),
('COM', 'Committee'),
('IND', 'Person (spending > $5,000)'),
('OTH', 'Other'),
('PTY', 'Political party'),
('RCP', 'Recipient Committee'),
('SCC', 'Small contributor committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
help_text='Entity code of the payee',
)
expn_code = fields.CharField(
max_length=3,
db_column='EXPN_CODE',
blank=True,
help_text='Expense code',
)
expn_dscr = fields.CharField(
max_length=400,
db_column='EXPN_DSCR',
blank=True,
help_text='Purpose of expense and/or description/explanation',
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number of the parent filing",
)
FORM_TYPE_CHOICES = (
('F', 'Form 460 (Recipient committee campaign statement): \
Schedule F, accrued expenses (unpaid bills)'),
)
form_type = fields.CharField(
max_length=1,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Schedule Name/ID: (F - Sched F / Accrued Expenses)'
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Record line item number",
db_index=True,
)
memo_code = fields.CharField(
max_length=1, db_column='MEMO_CODE', blank=True,
help_text='Memo amount flag',
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text='Reference to text contained in a TEXT record.'
)
# payee_adr1 = fields.CharField(
# max_length=55, db_column='PAYEE_ADR1', blank=True
# )
# payee_adr2 = fields.CharField(
# max_length=55, db_column='PAYEE_ADR2', blank=True
# )
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text='First line of the payee\'s street address',
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text='Payee\'s first name if the payee is an individual',
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
help_text="Payee's business name or last name if the payee is an \
individual."
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text='Payee\'s name suffix if the payee is an individual',
)
payee_namt = fields.CharField(
max_length=100,
db_column='PAYEE_NAMT',
blank=True,
help_text='Payee\'s prefix or title if the payee is an individual',
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text='Payee\'s state',
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text='Payee\'s ZIP Code',
)
REC_TYPE_CHOICES = (
("DEBT", "DEBT"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text='Record type value: DEBT',
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Transaction identifier - permanent value unique to \
this item',
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text='City portion of the treasurer or responsible \
officer\'s street address',
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text='Treasurer or responsible officer\'s first name'
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text='Treasurer or responsible officer\'s last name'
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text='Treasurer or responsible officer\'s suffix',
)
tres_namt = fields.CharField(
max_length=100,
db_column='TRES_NAMT',
blank=True,
help_text='Treasurer or responsible officer\'s prefix or title',
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text='State portion of the treasurer or responsible \
officer\'s address',
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text='ZIP Code portion of the treasurer or responsible \
officer\'s address',
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text='Related item on other schedule has same \
transaction identifier. /"X/" indicates this condition is true'
)
xref_schnm = fields.CharField(
max_length=2, db_column='XREF_SCHNM', blank=True,
help_text='Related record is included on Schedule C.'
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'DEBT_CD'
verbose_name = 'DEBT_CD'
verbose_name_plural = 'DEBT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class S496Cd(CalAccessBaseModel):
"""
Form 496 Late Independent Expenditures
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('S496', 'S496'),
)
rec_type = fields.CharField(
verbose_name='record type',
max_length=4,
db_column='REC_TYPE',
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F496', 'F496 (Late independent expenditure report)'),
)
form_type = fields.CharField(
max_length=4, db_column='FORM_TYPE', blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Expenditure amount"
)
exp_date = fields.DateField(
db_column='EXP_DATE',
null=True,
help_text="Expenditure dates"
)
expn_dscr = fields.CharField(
max_length=90,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
date_thru = fields.DateField(
db_column='DATE_THRU',
null=True,
help_text="End of date range for items paid"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S496_CD'
verbose_name = 'S496_CD'
verbose_name_plural = 'S496_CD'
def __str__(self):
return "{} Filing {}, Amendment {}".format(
self.form_type,
self.filing_id,
self.amend_id
)
@python_2_unicode_compatible
class SpltCd(CalAccessBaseModel):
"""
Split records
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
elec_amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='ELEC_AMOUNT',
help_text="This field is undocumented"
)
elec_code = fields.CharField(
max_length=2,
db_column='ELEC_CODE',
blank=True,
help_text='This field is undocumented',
)
elec_date = fields.DateField(
db_column='ELEC_DATE',
null=True,
help_text="This field is undocumented"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
PFORM_TYPE_CHOICES = (
('A', ''),
('B1', ''),
('B2', ''),
('C', ''),
('D', ''),
('F450P5', ''),
('H', ''),
)
pform_type = fields.CharField(
max_length=7,
db_column='PFORM_TYPE',
db_index=True,
choices=PFORM_TYPE_CHOICES,
help_text='This field is undocumented',
)
ptran_id = fields.CharField(
verbose_name='transaction ID',
max_length=32,
db_column='PTRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'SPLT_CD'
verbose_name = 'SPLT_CD'
verbose_name_plural = 'SPLT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class S497Cd(CalAccessBaseModel):
"""
Form 497: Late Contributions Received/Made
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S497", "S497"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F497P1', 'Form 497 (Late contribution report): \
Part 1, late contributions received'),
('F497P2', 'Form 497 (Late contribution report): \
Part 2, late contributions made')
)
form_type = fields.CharField(
max_length=6,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('0', '0 (Unknown)'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officerholder'),
('CTL', 'Controlled committee'),
('COM', 'Committee'),
('IND', 'Person (spending > $5,000)'),
('OFF', 'Officer'),
('OTH', 'Other'),
('PTY', 'Political party'),
('RCP', 'Recipient Committee'),
('SCC', 'Small contributor committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
enty_naml = fields.CharField(
max_length=200,
db_column='ENTY_NAML',
blank=True,
help_text="Entity's last name or business name"
)
enty_namf = fields.CharField(
max_length=45,
db_column='ENTY_NAMF',
blank=True,
help_text="Entity's first name"
)
enty_namt = fields.CharField(
max_length=10,
db_column='ENTY_NAMT',
blank=True,
help_text="Entity's title or prefix"
)
enty_nams = fields.CharField(
max_length=10,
db_column='ENTY_NAMS',
blank=True,
help_text="Entity's suffix"
)
enty_city = fields.CharField(
max_length=30,
db_column='ENTY_CITY',
blank=True,
help_text="Filing committee's city address"
)
enty_st = fields.CharField(
max_length=2,
db_column='ENTY_ST',
blank=True,
help_text="Filing committee's state address"
)
enty_zip4 = fields.CharField(
max_length=10,
db_column='ENTY_ZIP4',
blank=True,
help_text="Filing committee's ZIP Code"
)
ctrib_emp = fields.CharField(
max_length=200,
db_column='CTRIB_EMP',
blank=True,
help_text="Employer"
)
ctrib_occ = fields.CharField(
max_length=60,
db_column='CTRIB_OCC',
blank=True,
help_text="Occupation"
)
ctrib_self = fields.CharField(
max_length=1,
db_column='CTRIB_SELF',
blank=True,
help_text='Self employed checkbox. "X" indicates the contributor is \
self-employed.'
)
elec_date = fields.DateField(
db_column='ELEC_DATE',
null=True,
help_text="Date of election"
)
ctrib_date = fields.DateField(
db_column='CTRIB_DATE',
null=True,
help_text="Date item received/made"
)
date_thru = fields.DateField(
db_column='DATE_THRU',
null=True,
help_text="End of date range for items received"
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Amount received/made"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder's first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's suffix"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Office sought code"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
verbose_name="Jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office Sought/Held Code. Legal values are "S" for \
sought and "H" for held.'
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in TEXT code"
)
bal_id = fields.CharField(
max_length=9,
db_column='BAL_ID',
blank=True,
help_text="This field is undocumented"
)
cand_id = fields.CharField(
max_length=9,
db_column='CAND_ID',
blank=True,
help_text="This field is undocumented"
)
sup_off_cd = fields.CharField(
max_length=1,
db_column='SUP_OFF_CD',
blank=True,
help_text="This field is undocumented"
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="This field is undocumented"
)
def __str__(self):
return "{} Filing {}, Amendment {}".format(
self.get_form_type_display(),
self.filing_id,
self.amend_id
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S497_CD'
verbose_name = 'S497_CD'
verbose_name_plural = 'S497_CD'
@python_2_unicode_compatible
class F501502Cd(CalAccessBaseModel):
"""
Candidate intention statement
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F501', 'Form 501 (Candidate intention statement)'),
('F502', 'Form 502 (Campaign bank account statement)')
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=9,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
committee_id = fields.CharField(
db_column='COMMITTEE_ID',
max_length=8,
blank=True,
verbose_name="Committee ID",
help_text='Committee identification number'
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
blank=True,
max_length=3,
help_text='Entity code'
)
report_num = fields.IntegerField(
db_column='REPORT_NUM',
blank=True,
null=True,
help_text='Report Number; 000 Original; 001-999 Amended'
)
rpt_date = fields.DateTimeField(
db_column='RPT_DATE',
blank=True,
null=True,
help_text='date this report is filed'
)
stmt_type = fields.IntegerField(
db_column='STMT_TYPE',
help_text="Type of statement"
)
from_date = fields.CharField(
db_column='FROM_DATE',
max_length=32,
blank=True,
help_text='Reporting period from date'
)
thru_date = fields.CharField(
db_column='THRU_DATE',
max_length=32,
blank=True,
help_text="Reporting period through date"
)
elect_date = fields.CharField(
db_column='ELECT_DATE',
max_length=32,
blank=True,
help_text='Date of election'
)
cand_naml = fields.CharField(
db_column='CAND_NAML',
max_length=81,
blank=True,
help_text="Candidate/officerholder last name"
)
cand_namf = fields.CharField(
db_column='CAND_NAMF',
max_length=25,
blank=True,
help_text="Candidate/officerholder first name"
)
can_namm = fields.CharField(
db_column='CAN_NAMM',
max_length=10,
blank=True,
help_text='Candidate/officeholder middle name'
)
cand_namt = fields.CharField(
db_column='CAND_NAMT',
max_length=7,
blank=True,
help_text="Candidate/officerholder title or prefix"
)
cand_nams = fields.CharField(
db_column='CAND_NAMS',
max_length=7,
blank=True,
help_text="Candidate/officeholder suffix"
)
moniker_pos = fields.CharField(
db_column='MONIKER_POS',
max_length=32,
blank=True,
help_text="Location of the candidate/officeholder's moniker"
)
moniker = fields.CharField(
db_column='MONIKER',
max_length=4,
blank=True,
help_text="Candidate/officeholder's moniker"
)
cand_city = fields.CharField(
db_column='CAND_CITY',
max_length=22,
blank=True,
help_text="Candidate/officerholder city"
)
cand_st = fields.CharField(
db_column='CAND_ST',
max_length=4,
blank=True,
help_text='Candidate/officeholder state'
)
cand_zip4 = fields.CharField(
db_column='CAND_ZIP4',
max_length=10,
blank=True,
help_text='Candidate/officeholder zip +4'
)
cand_phon = fields.CharField(
db_column='CAND_PHON',
max_length=14,
blank=True,
help_text='Candidate/officeholder phone number'
)
cand_fax = fields.CharField(
db_column='CAND_FAX',
max_length=14,
blank=True,
help_text="Candidate/officerholder fax"
)
cand_email = fields.CharField(
db_column='CAND_EMAIL',
max_length=37,
blank=True,
help_text='Candidate/officeholder email address'
)
fin_naml = fields.CharField(
db_column='FIN_NAML',
max_length=53,
blank=True,
help_text="Financial institution's business name"
)
fin_namf = fields.CharField(
db_column='FIN_NAMF',
max_length=32,
blank=True,
help_text="Unused. Financial institution's first name."
)
fin_namt = fields.CharField(
db_column='FIN_NAMT',
max_length=32,
blank=True,
help_text="Unused. Financial institution's title."
)
fin_nams = fields.CharField(
db_column='FIN_NAMS',
max_length=32,
blank=True,
help_text="Unused. Financial institution's suffix."
)
fin_city = fields.CharField(
db_column='FIN_CITY',
max_length=20,
blank=True,
help_text="Financial institution's city."
)
fin_st = fields.CharField(
db_column='FIN_ST',
max_length=4,
blank=True,
help_text="Financial institution's state."
)
fin_zip4 = fields.CharField(
db_column='FIN_ZIP4',
max_length=9,
blank=True,
help_text="Financial institution's zip code."
)
fin_phon = fields.CharField(
db_column='FIN_PHON',
max_length=14,
blank=True,
help_text="Financial institution's phone number."
)
fin_fax = fields.CharField(
db_column='FIN_FAX',
max_length=10,
blank=True,
help_text="Financial institution's FAX Number."
)
fin_email = fields.CharField(
db_column='FIN_EMAIL',
max_length=15,
blank=True,
help_text="Financial institution's e-mail address."
)
office_cd = fields.IntegerField(
db_column='OFFICE_CD',
help_text="Office sought code"
)
offic_dscr = fields.CharField(
db_column='OFFIC_DSCR',
max_length=50,
blank=True,
help_text="Office sought description"
)
agency_nam = fields.CharField(
db_column='AGENCY_NAM',
max_length=63,
blank=True,
help_text="Agency name"
)
juris_cd = fields.IntegerField(
db_column='JURIS_CD',
blank=True,
null=True,
help_text='Office jurisdiction code'
)
juris_dscr = fields.CharField(
db_column='JURIS_DSCR',
max_length=14,
blank=True,
help_text='office jurisdiction description'
)
dist_no = fields.CharField(
db_column='DIST_NO',
max_length=4,
blank=True,
help_text='District number for the office being sought. \
Populated for Senate, Assembly or Board of Equalization races.'
)
party = fields.CharField(
db_column='PARTY',
max_length=20,
blank=True,
help_text="Political party"
)
yr_of_elec = fields.IntegerField(
db_column='YR_OF_ELEC',
blank=True,
null=True,
help_text='Year of election'
)
elec_type = fields.IntegerField(
db_column='ELEC_TYPE',
blank=True,
null=True,
verbose_name="Election type"
)
execute_dt = fields.DateTimeField(
db_column='EXECUTE_DT',
blank=True,
null=True,
help_text='Execution date'
)
can_sig = fields.CharField(
db_column='CAN_SIG',
max_length=13,
blank=True,
help_text='Candidate signature'
)
account_no = fields.CharField(
db_column='ACCOUNT_NO',
max_length=22,
blank=True,
help_text='Account number'
)
acct_op_dt = fields.DateField(
db_column='ACCT_OP_DT',
blank=True,
null=True,
help_text='Account open date'
)
party_cd = fields.IntegerField(
db_column='PARTY_CD',
blank=True,
null=True,
help_text="Party code"
)
district_cd = fields.IntegerField(
db_column='DISTRICT_CD',
blank=True,
null=True,
help_text='District number for the office being sought. \
Populated for Senate, Assembly, or Board of Equalization races.'
)
accept_limit_yn = fields.IntegerField(
db_column='ACCEPT_LIMIT_YN',
blank=True,
null=True,
help_text='This field is undocumented'
)
did_exceed_dt = fields.DateField(
db_column='DID_EXCEED_DT',
blank=True,
null=True,
help_text='This field is undocumented'
)
cntrb_prsnl_fnds_dt = fields.DateField(
db_column='CNTRB_PRSNL_FNDS_DT',
blank=True,
null=True,
help_text="This field is undocumented"
)
def __str__(self):
return str(self.filing_id)
class Meta:
app_label = 'calaccess_raw'
db_table = 'F501_502_CD'
verbose_name = 'F501_502_CD'
verbose_name_plural = 'F501_502_CD'
@python_2_unicode_compatible
class S498Cd(CalAccessBaseModel):
"""
Form 498: Slate Mailer Late Independent Expenditures Made
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S498", "S498"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F498-A', 'Form 498 (Slate mailer late payment report): \
Part A: late payments attributed to'),
('F498-R', 'Form 498 (Slate mailer late payment report): \
Part R: late payments received from')
)
form_type = fields.CharField(
max_length=9,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('CAO', 'Candidate/officerholder'),
('COM', 'Committee'),
('IND', 'Person (spending > $5,000)'),
('OTH', 'Other'),
('RCP', 'Recipient Committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
payor_naml = fields.CharField(
max_length=200,
db_column='PAYOR_NAML',
blank=True,
help_text="Payor's last name or business name"
)
payor_namf = fields.CharField(
max_length=45,
db_column='PAYOR_NAMF',
blank=True,
help_text="Payor's first name."
)
payor_namt = fields.CharField(
max_length=10,
db_column='PAYOR_NAMT',
blank=True,
help_text="Payor's Prefix or title."
)
payor_nams = fields.CharField(
max_length=10,
db_column='PAYOR_NAMS',
blank=True,
help_text="Payor's suffix."
)
payor_city = fields.CharField(
max_length=30,
db_column='PAYOR_CITY',
blank=True,
help_text="Payor's city."
)
payor_st = fields.CharField(
max_length=2,
db_column='PAYOR_ST',
blank=True,
help_text="Payor's State."
)
payor_zip4 = fields.CharField(
max_length=10,
db_column='PAYOR_ZIP4',
blank=True,
help_text="Payor's zip code"
)
date_rcvd = fields.DateField(
db_column='DATE_RCVD',
null=True,
help_text="Date received"
)
amt_rcvd = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMT_RCVD',
help_text="Amount received"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officerholder last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officerholder first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officerholder title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officerholder suffix"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name='Office code',
help_text="Code that identifies the office being sought"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Description of office sought"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. \
Populated for Senate, Assembly, or Board of Equalization races."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office Sought/Held Code. Legal values are "S" for \
sought and "H" for held'
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter."
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction of ballot measure"
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/oppose code. Legal values are "S" for support \
or "O" for oppose.'
)
amt_attrib = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMT_ATTRIB',
help_text="Amount attributed (only if Form_type = 'F498-A')"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flat"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text='Reference text contained in TEXT record'
)
employer = fields.CharField(
max_length=200,
db_column='EMPLOYER',
blank=True,
help_text="This field is undocumented"
)
occupation = fields.CharField(
max_length=60,
db_column='OCCUPATION',
blank=True,
help_text='This field is undocumented'
)
selfemp_cb = fields.CharField(
max_length=1,
db_column='SELFEMP_CB',
blank=True,
help_text='Self-employed checkbox'
)
def __str__(self):
return str(self.filing_id)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S498_CD'
verbose_name = 'S498_CD'
verbose_name_plural = 'S498_CD'
|
mit
| 8,818,675,491,064,973,000 | -1,035,888,777,332,519,600 | 28.293907 | 79 | 0.576021 | false |
QijunPan/ansible
|
lib/ansible/modules/system/osx_defaults.py
|
25
|
14472
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, GeekChimp - Franck Nijhof <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: osx_defaults
author: Franck Nijhof (@frenck)
short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible
description:
- osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts.
Mac OS X applications and other programs use the defaults system to record user preferences and other
information that must be maintained when the applications aren't running (such as default font for new
documents, or the position of an Info panel).
version_added: "2.0"
options:
domain:
description:
- The domain is a domain name of the form com.companyname.appname.
required: false
default: NSGlobalDomain
host:
description:
- The host on which the preference should apply. The special value "currentHost" corresponds to the
"-currentHost" switch of the defaults commandline tool.
required: false
default: null
version_added: "2.1"
key:
description:
- The key of the user preference
required: true
type:
description:
- The type of value to write.
required: false
default: string
choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ]
array_add:
description:
- Add new elements to the array for a key which has an array as its value.
required: false
default: false
choices: [ "true", "false" ]
value:
description:
- The value to write. Only required when state = present.
required: false
default: null
state:
description:
- The state of the user defaults
required: false
default: present
choices: [ "present", "absent" ]
notes:
- Apple Mac caches defaults. You may need to logout and login to apply the changes.
'''
EXAMPLES = '''
- osx_defaults:
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
state: present
- osx_defaults:
domain: NSGlobalDomain
key: AppleMeasurementUnits
type: string
value: Centimeters
state: present
- osx_defaults:
domain: com.apple.screensaver
host: currentHost
key: showClock
type: int
value: 1
- osx_defaults:
key: AppleMeasurementUnits
type: string
value: Centimeters
- osx_defaults:
key: AppleLanguages
type: array
value:
- en
- nl
- osx_defaults:
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
'''
import datetime
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
pass
# /exceptions -------------------------------------------------------------- }}}
# class MacDefaults -------------------------------------------------------- {{{
class OSXDefaults(object):
""" Class to manage Mac OS user defaults """
# init ---------------------------------------------------------------- {{{
""" Initialize this module. Finds 'defaults' executable and preps the parameters """
def __init__(self, **kwargs):
# Initial var for storing current defaults value
self.current_value = None
# Just set all given parameters
for key, val in kwargs.items():
setattr(self, key, val)
# Try to find the defaults executable
self.executable = self.module.get_bin_path(
'defaults',
required=False,
opt_dirs=self.path.split(':'),
)
if not self.executable:
raise OSXDefaultsException("Unable to locate defaults executable.")
# When state is present, we require a parameter
if self.state == "present" and self.value is None:
raise OSXDefaultsException("Missing value parameter")
# Ensure the value is the correct type
self.value = self._convert_type(self.type, self.value)
# /init --------------------------------------------------------------- }}}
# tools --------------------------------------------------------------- {{{
""" Converts value to given type """
def _convert_type(self, type, value):
if type == "string":
return str(value)
elif type in ["bool", "boolean"]:
if isinstance(value, basestring):
value = value.lower()
if value in [True, 1, "true", "1", "yes"]:
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif type == "date":
try:
return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
)
elif type in ["int", "integer"]:
if not str(value).isdigit():
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
return int(value)
elif type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
return value
elif type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(type))
""" Returns a normalized list of commandline arguments based on the "host" attribute """
def _host_args(self):
if self.host is None:
return []
elif self.host == 'currentHost':
return ['-currentHost']
else:
return ['-host', self.host]
""" Returns a list containing the "defaults" executable and any common base arguments """
def _base_command(self):
return [self.executable] + self._host_args()
""" Converts array output from defaults to an list """
@staticmethod
def _convert_defaults_str_to_list(value):
# Split output of defaults. Every line contains a value
value = value.splitlines()
# Remove first and last item, those are not actual values
value.pop(0)
value.pop(-1)
# Remove extra spaces and comma (,) at the end of values
value = [re.sub(',$', '', x.strip(' ')) for x in value]
return value
# /tools -------------------------------------------------------------- }}}
# commands ------------------------------------------------------------ {{{
""" Reads value of this domain & key from defaults """
def read(self):
# First try to find out the type
rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exists
if rc == 1:
return None
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out)
# Ok, lets parse the type from output
type = out.strip().replace('Type is ', '')
# Now get the current value
rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
# An non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out)
# Convert string to list when type is array
if type == "array":
out = self._convert_defaults_str_to_list(out)
# Store the current_value
self.current_value = self._convert_type(type, out)
""" Writes value to this domain & key to defaults """
def write(self):
# We need to convert some values so the defaults commandline understands it
if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
elif isinstance(self.value, datetime.datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
# When the type is array and array_add is enabled, morph the type :)
if self.type == "array" and self.array_add:
self.type = "array-add"
# All values should be a list, for easy passing it to the command
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
""" Deletes defaults key from domain """
def delete(self):
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
# /commands ----------------------------------------------------------- }}}
# run ----------------------------------------------------------------- {{{
""" Does the magic! :) """
def run(self):
# Get the current value from defaults
self.read()
# Handle absent state
if self.state == "absent":
if self.current_value is None:
return False
if self.module.check_mode:
return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value):
return False
elif self.type == "array" and self.current_value is not None and self.array_add and \
len(list(set(self.value) - set(self.current_value))) == 0:
return False
elif self.current_value == self.value:
return False
if self.module.check_mode:
return True
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
# /run ---------------------------------------------------------------- }}}
# /class MacDefaults ------------------------------------------------------ }}}
# main -------------------------------------------------------------------- {{{
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(
default="NSGlobalDomain",
required=False,
),
host=dict(
default=None,
required=False,
),
key=dict(
default=None,
),
type=dict(
default="string",
required=False,
choices=[
"array",
"bool",
"boolean",
"date",
"float",
"int",
"integer",
"string",
],
),
array_add=dict(
default=False,
required=False,
type='bool',
),
value=dict(
default=None,
required=False,
type='raw'
),
state=dict(
default="present",
required=False,
choices=[
"absent", "present"
],
),
path=dict(
default="/usr/bin:/usr/local/bin",
required=False,
)
),
supports_check_mode=True,
)
domain = module.params['domain']
host = module.params['host']
key = module.params['key']
type = module.params['type']
array_add = module.params['array_add']
value = module.params['value']
state = module.params['state']
path = module.params['path']
try:
defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
array_add=array_add, value=value, state=state, path=path)
changed = defaults.run()
module.exit_json(changed=changed)
except OSXDefaultsException:
e = get_exception()
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
if __name__ == '__main__':
main()
|
gpl-3.0
| -2,047,490,247,506,656,500 | 1,591,513,267,675,990,800 | 33.212766 | 128 | 0.543463 | false |
sam17/room-of-requirement
|
alexa_skill/alexa_dumbledore_skill.py
|
1
|
3214
|
import logging
import json
from flask_ask import Ask,request,session, question, statement
from flask import Flask
import requests
import datetime
SERVER_IP = "http://ec2-52-221-204-189.ap-southeast-1.compute.amazonaws.com:3000/"
THIS = "Saturn"
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def launch():
speech_text = 'Hi, I am Dumbledore. I can tell you everything aboout Rooms at App Dynamics'
return question(speech_text).reprompt(speech_text).simple_card('DumbledoreRespone', speech_text)
@ask.intent('BookingIntent', mapping={'room': 'ROOM', 'fromTime': 'FROMTIME', 'toTime':'TOTIME', 'team':'TEAM', 'date':'DATE' }, default={'date': datetime.datetime.now().strftime ("%Y-%m-%d"), 'team': 'Platform' })
def book(room, fromTime, toTime, team, date):
if room == 'this':
room = THIS
startTime = date + "T" + str(fromTime)
endTime = date + "T" + str(toTime)
resp = requests.post(SERVER_IP+'listAvailableRooms', json={"startTime": startTime, "endTime": endTime})
if resp.status_code !=200:
return statement("Node Server Error, Please check node log").simple_card('DumbledoreResponse', speech_text)
available_rooms = json.loads(resp.text)
if(room in available_rooms):
resp = requests.post(SERVER_IP+'bookRoom', json={"organizer": team, "invitees" : "", "room": room, "startTime": startTime , "endTime": endTime })
if resp.status_code !=200:
return statement("Node Server Error, Please check node log").simple_card('DumbledoreResponse', speech_text)
speech_text = "Booking done for " + room + " by " + str(team) + " on " + date + " at " + fromTime
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
else:
speech_text = "Sorry, Room is already booked."
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
speech_text = "Sorry, I did not get all information"
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
@ask.intent('EmptyIntent', mapping={'fromTime': 'FROMTIME', 'toTime':'TOTIME','date':'DATE' }, default={'date': datetime.datetime.now().strftime ("%Y-%m-%d") })
def findEmtpy(fromTime, toTime, date):
startTime = date + "T" + str(fromTime)
endTime = date + "T" + str(toTime)
print startTime, endTime
resp = requests.post(SERVER_IP+'listAvailableRooms', json={"startTime": startTime, "endTime": endTime})
if resp.status_code !=200:
return statement("Node Server Error, Please check node log").simple_card('DumbledoreResponse', speech_text)
available_rooms = json.loads(resp.text)
print available_rooms
speech_text = "Available Rooms are " + ", ".join([r.encode('utf-8') for r in available_rooms])
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
@ask.intent('AMAZON.HelpIntent')
def help():
speech_text = 'Ask me about occupancy only now'
return question(speech_text).reprompt(speech_text).simple_card('DumbledoreResponse', speech_text)
@ask.session_ended
def session_ended():
return "", 200
if __name__ == '__main__':
app.run(debug=True)
|
mit
| -847,165,251,930,744,700 | -3,838,891,409,697,982,500 | 45.57971 | 214 | 0.679838 | false |
alrusdi/lettuce
|
tests/integration/lib/Django-1.3/tests/regressiontests/views/tests/debug.py
|
50
|
6467
|
import inspect
import sys
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.template import TemplateSyntaxError
from django.views.debug import ExceptionReporter
from regressiontests.views import BrokenException, except_args
class DebugViewTests(TestCase):
def setUp(self):
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
settings.TEMPLATE_DEBUG = self.old_template_debug
def test_files(self):
response = self.client.get('/views/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', 'haha'),
}
response = self.client.post('/views/raises/', data)
self.assertTrue('file_data.txt' in response.content)
self.assertFalse('haha' in response.content)
def test_404(self):
response = self.client.get('/views/raises404/')
self.assertEqual(response.status_code, 404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except TemplateSyntaxError, e:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertFalse(raising_loc.find('raise BrokenException') == -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
response = self.client.get(reverse('raises_template_does_not_exist'))
self.assertContains(response, 'templates/i_dont_exist.html</code> (File does not exist)</li>', status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
|
gpl-3.0
| 952,987,554,876,933,100 | 5,611,890,007,382,413,000 | 44.865248 | 119 | 0.632287 | false |
GcsSloop/PythonNote
|
PythonCode/Python入门/函数/定义可变参数.py
|
1
|
1027
|
#coding=utf-8
#author: sloop
'''
ÈÎÎñ
Çë±àд½ÓÊܿɱä²ÎÊýµÄ average() º¯Êý¡£
'''
#´úÂë
def average(*args):
s = sum(args)*1.0 #ºÍ
l = len(args) #¸öÊý
return 0.0 if l==0 else s/l #ƽ¾ùÖµ
print average()
print average(1, 2)
print average(1, 2, 2, 3, 4)
'''
def average(*args):
return 0.0 if len(args)==0 else sum(args)*1.0/len(args)
'''
'''
¶¨Òå¿É±ä²ÎÊý
Èç¹ûÏëÈÃÒ»¸öº¯ÊýÄܽÓÊÜÈÎÒâ¸ö²ÎÊý£¬ÎÒÃǾͿÉÒÔ¶¨ÒåÒ»¸ö¿É±ä²ÎÊý£º
def fn(*args):
print args
¿É±ä²ÎÊýµÄÃû×ÖÇ°ÃæÓиö * ºÅ£¬ÎÒÃÇ¿ÉÒÔ´«Èë0¸ö¡¢1¸ö»ò¶à¸ö²ÎÊý¸ø¿É±ä²ÎÊý£º
>>> fn()
()
>>> fn('a')
('a',)
>>> fn('a', 'b')
('a', 'b')
>>> fn('a', 'b', 'c')
('a', 'b', 'c')
¿É±ä²ÎÊýÒ²²»ÊǺÜÉñÃØ£¬Python½âÊÍÆ÷»á°Ñ´«ÈëµÄÒ»×é²ÎÊý×é×°³ÉÒ»¸ötuple´«µÝ¸ø¿É±ä²ÎÊý£¬Òò´Ë£¬ÔÚº¯ÊýÄÚ²¿£¬Ö±½Ó°Ñ±äÁ¿ args ¿´³ÉÒ»¸ö tuple ¾ÍºÃÁË¡£
¶¨Òå¿É±ä²ÎÊýµÄÄ¿µÄÒ²ÊÇΪÁ˼ò»¯µ÷Ó᣼ÙÉèÎÒÃÇÒª¼ÆËãÈÎÒâ¸öÊýµÄƽ¾ùÖµ£¬¾Í¿ÉÒÔ¶¨ÒåÒ»¸ö¿É±ä²ÎÊý£º
def average(*args):
...
ÕâÑù£¬ÔÚµ÷ÓõÄʱºò£¬¿ÉÒÔÕâÑùд£º
>>> average()
0
>>> average(1, 2)
1.5
>>> average(1, 2, 2, 3, 4)
2.4
'''
|
apache-2.0
| 4,950,917,752,924,009,000 | -8,179,393,595,831,426,000 | 18.769231 | 140 | 0.571568 | false |
Bostonncity/omaha
|
installers/tagged_installer.py
|
65
|
3236
|
#!/usr/bin/python2.4
# Copyright 2009-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import os
import re
from installers import tag_meta_installers
def TagOneBundle(env, bundle, untagged_binary_path, output_dir):
tag_str = tag_meta_installers.BuildTagStringForBundle(bundle)
# Need to find relative path to output file under source dir, to allow
# it to be redirected under the output directory.
indx = bundle.output_file_name.find('installers')
relative_filepath = bundle.output_file_name[indx+len('installers')+1:]
tag_exe = '$TESTS_DIR/ApplyTag.exe'
tag_output = env.Command(
target='%s/%s' % (output_dir, relative_filepath),
source=untagged_binary_path,
action='%s $SOURCES $TARGET %s' % (
env.File(tag_exe).abspath, tag_str)
)
# Add extra (hidden) dependency plus a dependency on the tag executable.
env.Depends(tag_output, [bundle.installers_txt_filename, tag_exe])
return tag_output
def _ReadAllBundleInstallerFiles(installers_txt_files_path):
"""Enumerates all the .*_installers.txt files in the installers_txt_files_path
directory, and creates bundles corresponding to the info in each line in
the *_installers.txt file.
Returns:
Returns a dictionary of Bundles with key=lang.
"""
bundles = {}
files = os.listdir(installers_txt_files_path)
for file in files:
regex = re.compile('^(.*)_installers.txt$')
if not regex.match(file):
continue
installer_file = os.path.join(installers_txt_files_path, file)
# Read in the installer file.
read_bundles = tag_meta_installers.ReadBundleInstallerFile(installer_file)
for (key, bundle_list) in read_bundles.items():
if not bundle_list or not key:
continue
if not bundles.has_key(key):
bundles[key] = bundle_list
else:
new_bundles_list = bundles[key] + bundle_list
bundles[key] = new_bundles_list
return bundles
def CreateTaggedInstallers(env, installers_txt_files_path, product_name,
prefix = ''):
"""For each application with an installers.txt file in installer_files_path,
create tagged metainstaller(s).
"""
bundles = _ReadAllBundleInstallerFiles(installers_txt_files_path)
untagged_binary = '%s%sSetup.exe' % (prefix, product_name)
tag_meta_installers.SetOutputFileNames(untagged_binary, bundles, '')
for bundles_lang in bundles.itervalues():
for bundle in bundles_lang:
TagOneBundle(
env=env,
bundle=bundle,
untagged_binary_path='$STAGING_DIR/%s' % (untagged_binary),
output_dir='$TARGET_ROOT/Tagged_Installers',
)
|
apache-2.0
| 4,567,503,387,501,468,700 | 818,414,975,962,251,000 | 33.425532 | 80 | 0.681397 | false |
batxes/4Cin
|
SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/mtx1_models/SHH_INV_models752.py
|
4
|
17573
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-207.536, 2706.03, 7917.36), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((1684.38, 2907.3, 7418.01), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2671.05, 4365.58, 7351.74), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2186.77, 5810.76, 9108.36), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3329.61, 5579.94, 9302.3), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4653.13, 3971.93, 8239.47), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((5619.79, 1960.64, 10117.4), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((5968.15, 1845.22, 11491.8), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5530.3, 1584.13, 6937.88), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5001.09, 801.888, 6351.18), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3569.13, 1628.14, 5590.41), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1740.6, 1967.36, 4789.98), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((2805.79, 2717.89, 3907.71), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((3099.01, 4521.59, 4161.69), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((1444.17, 5013.24, 4746.8), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((-1046.68, 5460.68, 4552.49), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((301.217, 4203.71, 5195.8), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((595.756, 2816.11, 5818.69), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((108.534, 4103.83, 5643.7), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((624.405, 4209.43, 4169.54), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2411.96, 3896.98, 4144.71), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2000.53, 4239.23, 4159.93), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((1742.34, 2927.09, 4069.99), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((1663.14, 2121.01, 5106.55), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((1817.67, 3538.84, 5854.66), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2084.82, 4224.3, 4706.3), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((812.209, 3668.45, 4360.17), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2063.61, 3437.53, 3453.95), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2309.34, 4748.94, 3645.29), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2730.54, 5664.55, 3149.44), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2933.81, 4737.39, 4435.11), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3808.47, 4674.35, 5680.5), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3438.26, 3579.11, 4630.85), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((2043.07, 3340.69, 4134.85), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1787.54, 2969.07, 3380.98), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1645.76, 1695.63, 2744.43), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4088.18, 3123.64, 3942.17), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((5851.9, 2237.99, 5250.2), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((6254.9, 2864.86, 4784.87), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((7842, 3352.29, 4988.8), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8442.78, 1706.41, 5330.33), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8712.35, 721.168, 5440.16), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((8226.46, 1717.29, 5605.1), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7544.8, 4487.46, 5711.28), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7695.9, 6446.19, 4957.51), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7256.93, 6039.91, 6381.91), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7497.79, 6638.54, 6421.06), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((6413.48, 8122.47, 6844.33), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6629.36, 8875.54, 6493.19), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7435.87, 8371.95, 6510.56), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5989.03, 9002.13, 5702.32), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((4391.34, 8324.38, 5529.45), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5378.06, 8405.9, 3980.26), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6200.24, 6878.48, 3151.95), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4450.93, 6621.99, 4060.48), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5338, 7988.48, 4527.63), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((5367.09, 8253.16, 2873.56), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((5479.75, 8879.3, 1980.47), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((5947.12, 6583.23, 2908.12), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((7349.08, 7707.97, 3099.42), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((8479.76, 8059.54, 4193.14), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((9005.78, 8746.7, 5528.34), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((9534.56, 9031.12, 7035.6), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((8290.42, 9809.39, 7604.57), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6941.69, 8739.69, 7554.86), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((7010.92, 8963.91, 5742.91), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((6550.28, 9238.93, 6352.59), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6205.33, 8570.56, 8049.29), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((7021.52, 9898.13, 6690.05), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((6856.63, 8957.73, 5793.29), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((6189.93, 9564.67, 6718.62), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
gpl-3.0
| 2,744,589,444,852,783,000 | 458,275,748,327,204,600 | 46.112601 | 75 | 0.699767 | false |
WielderOfMjoelnir/pypeira
|
pypeira/core/time.py
|
1
|
1497
|
# This might no have any use anymore, as the HDU objects is the new thing.
# Possibly changed it a bit to a time converter instead of getter I suppose.
def hdu_get_time(hdu, time_format='bmjd'):
"""
Will be used as a key function for the list.sort() or sorted() functions.
Example,
hdus.sort(key=hdu_fits_by_time)
where hdus is a list of HDU objects, will call hdu_by_time() on each element
in the list, and then sort the elements according to the value returned from the
key function, that is in this case hdu_by_time().
Parameters
----------
hdu: HDU object
The HDU object which is an element in the list that is to be sorted.
time_format: str, optional
The time format you want to sort by, even though it should not matter.
Returns
-------
float
It's the header entry BMJD_OBS (Barycentric Julian Date of observation), which
will then be used as the comparison attribute of each element in the list to
sorted. If a different time format is specified using the 'time_format' keyword,
then the returned value will be the corresponding header value.
"""
format_to_kwrd = {
'bmjd': 'BMJD_OBS',
'hmjd': 'HMJD_OSB',
'mjd': 'MJD_OBS',
'utc': 'UTCS_OSB',
'date': 'DATE_OBS',
'dce': 'ET_OBS'
}
if format_to_kwrd.get(time_format):
return hdu.hdr[format_to_kwrd.get(time_format)]
else:
return hdu.timestamp
|
mit
| 2,081,138,467,051,176,000 | -3,127,769,344,065,873,000 | 34.642857 | 88 | 0.637275 | false |
Ritiek/Spotify-Downloader
|
spotdl/encode/encoders/ffmpeg.py
|
1
|
5171
|
import subprocess
import os
from spotdl.encode import EncoderBase
from spotdl.encode.exceptions import EncoderNotFoundError
from spotdl.encode.exceptions import FFmpegNotFoundError
import logging
logger = logging.getLogger(__name__)
# Key: from format
# Subkey: to format
RULES = {
"m4a": {
"mp3": "-codec:v copy -codec:a libmp3lame",
"opus": "-codec:a libopus",
"m4a": "-acodec copy",
"flac": "-codec:a flac",
"ogg": "-codec:a libvorbis -q:a 5",
},
"opus": {
"mp3": "-codec:a libmp3lame",
"m4a": "-cutoff 20000 -codec:a aac",
"flac": "-codec:a flac",
"ogg": "-codec:a libvorbis -q:a 5",
"opus": "-acodec copy",
},
}
class EncoderFFmpeg(EncoderBase):
"""
A class for encoding media files using FFmpeg.
Parameters
----------
encoder_path: `str`
Path to FFmpeg.
must_exist: `bool`
Error out immediately if the encoder isn't found in
``encoder_path``.
Examples
--------
+ Re-encode an OPUS stream from STDIN to an MP3:
>>> import os
>>> input_path = "audio.opus"
>>> target_path = "audio.mp3"
>>> input_path_size = os.path.getsize(input_path)
>>>
>>> from spotdl.encode.encoders import EncoderFFmpeg
>>> ffmpeg = EncoderFFmpeg()
>>> process = ffmpeg.re_encode_from_stdin(
... input_encoding="opus",
... target_path=target_path
... )
>>>
>>> chunk_size = 4096
>>> total_chunks = (input_path_size // chunk_size) + 1
>>>
>>> with open(input_path, "rb") as fin:
... for chunk_number in range(1, total_chunks+1):
... chunk = fin.read(chunk_size)
... process.stdin.write(chunk)
... print("chunks encoded: {}/{}".format(
... chunk_number,
... total_chunks,
... ))
>>>
>>> process.stdin.close()
>>> process.wait()
"""
def __init__(self, encoder_path="ffmpeg", must_exist=True):
_loglevel = "-hide_banner -nostats -v warning"
_additional_arguments = ["-b:a", "192k", "-vn"]
try:
super().__init__(encoder_path, must_exist, _loglevel, _additional_arguments)
except EncoderNotFoundError as e:
raise FFmpegNotFoundError(e.args[0])
self._rules = RULES
def set_trim_silence(self):
self.set_argument("-af silenceremove=start_periods=1")
def get_encoding(self, path):
return super().get_encoding(path)
def _generate_encoding_arguments(self, input_encoding, target_encoding):
initial_arguments = self._rules.get(input_encoding)
if initial_arguments is None:
raise TypeError(
'The input format ("{}") is not supported.'.format(
input_encoding,
))
arguments = initial_arguments.get(target_encoding)
if arguments is None:
raise TypeError(
'The output format ("{}") is not supported.'.format(
target_encoding,
))
return arguments
def set_debuglog(self):
self._loglevel = "-loglevel debug"
def _generate_encode_command(self, input_path, target_path,
input_encoding=None, target_encoding=None):
if input_encoding is None:
input_encoding = self.get_encoding(input_path)
if target_encoding is None:
target_encoding = self.get_encoding(target_path)
arguments = self._generate_encoding_arguments(
input_encoding,
target_encoding
)
command = [self.encoder_path] \
+ ["-y", "-nostdin"] \
+ self._loglevel.split() \
+ ["-i", input_path] \
+ arguments.split() \
+ self._additional_arguments \
+ ["-f", self.target_format_from_encoding(target_encoding)] \
+ [target_path]
return command
def re_encode(self, input_path, target_path, target_encoding=None, delete_original=False):
encode_command = self._generate_encode_command(
input_path,
target_path,
target_encoding=target_encoding
)
logger.debug("Calling FFmpeg with:\n{command}".format(
command=encode_command,
))
process = subprocess.Popen(encode_command)
process.wait()
encode_successful = process.returncode == 0
if encode_successful and delete_original:
os.remove(input_path)
return process
def re_encode_from_stdin(self, input_encoding, target_path, target_encoding=None):
encode_command = self._generate_encode_command(
"-",
target_path,
input_encoding=input_encoding,
target_encoding=target_encoding,
)
logger.debug("Calling FFmpeg with:\n{command}".format(
command=encode_command,
))
process = subprocess.Popen(encode_command, stdin=subprocess.PIPE)
return process
|
mit
| 7,247,366,997,119,361,000 | 8,521,635,176,446,147,000 | 31.522013 | 94 | 0.553278 | false |
ClusterLabs/pcs
|
pcs_test/tools/command_env/config_http_files.py
|
3
|
4688
|
import base64
import json
from pcs_test.tools.command_env.mock_node_communicator import (
place_multinode_call,
)
class FilesShortcuts:
def __init__(self, calls):
self.__calls = calls
def put_files(
self,
node_labels=None,
pcmk_authkey=None,
corosync_authkey=None,
corosync_conf=None,
pcs_disaster_recovery_conf=None,
pcs_settings_conf=None,
communication_list=None,
name="http.files.put_files",
):
# pylint: disable=too-many-arguments
"""
Create a call for the files distribution to the nodes.
node_labels list -- create success responses from these nodes
pcmk_authkey bytes -- content of pacemaker authkey file
corosync_authkey bytes -- content of corosync authkey file
corosync_conf string -- content of corosync.conf
pcs_disaster_recovery_conf string -- content of pcs DR config
pcs_settings_conf string -- content of pcs_settings.conf
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
written_output_dict = dict(
code="written",
message="",
)
if pcmk_authkey:
file_id = "pacemaker_remote authkey"
input_data[file_id] = dict(
data=base64.b64encode(pcmk_authkey).decode("utf-8"),
type="pcmk_remote_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_authkey:
file_id = "corosync authkey"
input_data[file_id] = dict(
data=base64.b64encode(corosync_authkey).decode("utf-8"),
type="corosync_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_conf:
file_id = "corosync.conf"
input_data[file_id] = dict(
data=corosync_conf,
type="corosync_conf",
)
output_data[file_id] = written_output_dict
if pcs_disaster_recovery_conf:
file_id = "disaster-recovery config"
input_data[file_id] = dict(
data=base64.b64encode(pcs_disaster_recovery_conf).decode(
"utf-8"
),
type="pcs_disaster_recovery_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if pcs_settings_conf:
file_id = "pcs_settings.conf"
input_data[file_id] = dict(
data=pcs_settings_conf,
type="pcs_settings_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/put_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
def remove_files(
self,
node_labels=None,
pcsd_settings=False,
pcs_disaster_recovery_conf=False,
communication_list=None,
name="http.files.remove_files",
):
"""
Create a call for removing the files on the nodes.
node_labels list -- create success responses from these nodes
pcsd_settings bool -- if True, remove file pcsd_settings
pcs_disaster_recovery_conf bool -- if True, remove pcs DR config
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
if pcsd_settings:
file_id = "pcsd settings"
input_data[file_id] = dict(type="pcsd_settings")
output_data[file_id] = dict(
code="deleted",
message="",
)
if pcs_disaster_recovery_conf:
file_id = "pcs disaster-recovery config"
input_data[file_id] = dict(type="pcs_disaster_recovery_conf")
output_data[file_id] = dict(
code="deleted",
message="",
)
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/remove_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
|
gpl-2.0
| -1,442,039,745,629,232,600 | -7,691,171,866,760,471,000 | 31.555556 | 73 | 0.539036 | false |
mdavid/pledgeservice
|
testlib/setuptools/command/test.py
|
113
|
6526
|
import unittest
from unittest import TestLoader
from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import (resource_listdir, resource_exists,
normalize_path, working_set, _namespace_packages, add_activation_listener,
require, EntryPoint)
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__ != 'setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = (
sys.version_info >= (3,)
and getattr(self.distribution, 'use_2to3', False)
)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__]+self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.load(require=False)()
|
agpl-3.0
| -7,241,244,868,911,049,000 | -1,233,445,081,220,242,400 | 35.458101 | 86 | 0.568802 | false |
hfeeki/transifex
|
transifex/projects/search_indexes.py
|
1
|
1115
|
import datetime
from haystack.indexes import *
from haystack import site
from transifex.projects.models import Project
class ProjectIndex(RealTimeSearchIndex):
text = CharField(document=True, use_template=True)
slug = CharField(model_attr='slug', null=False)
name = CharField(model_attr='name', null=False, boost=1.125)
description = CharField(model_attr='description', null=True)
# django-haystack-1.2 needs it along with the custom prepare method
suggestions = CharField()
def prepare(self, obj):
prepared_data = super(ProjectIndex, self).prepare(obj)
prepared_data['suggestions'] = prepared_data['text']
return prepared_data
def index_queryset(self):
"""Used when the entire index for model is updated."""
# Do not index private projects
return Project.objects.exclude(private=True).filter(
modified__lte=datetime.datetime.now())
def get_updated_field(self):
"""Project mode field used to identify new/modified object to index."""
return 'modified'
site.register(Project, ProjectIndex)
|
gpl-2.0
| -2,698,500,773,670,376,000 | 952,386,967,640,039,400 | 31.823529 | 79 | 0.69417 | false |
MiltosD/CEF-ELRC
|
lib/python2.7/site-packages/django/contrib/auth/tests/models.py
|
318
|
1493
|
from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import User, SiteProfileNotAvailable
class ProfileTestCase(TestCase):
fixtures = ['authtestdata.json']
def setUp(self):
"""Backs up the AUTH_PROFILE_MODULE"""
self.old_AUTH_PROFILE_MODULE = getattr(settings,
'AUTH_PROFILE_MODULE', None)
def tearDown(self):
"""Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted,
otherwise the old value is restored"""
if self.old_AUTH_PROFILE_MODULE is None and \
hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
if self.old_AUTH_PROFILE_MODULE is not None:
settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE
def test_site_profile_not_available(self):
# calling get_profile without AUTH_PROFILE_MODULE set
if hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
user = User.objects.get(username='testclient')
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
|
bsd-3-clause
| 2,560,460,752,852,459,500 | -7,916,789,866,165,109,000 | 41.657143 | 79 | 0.662425 | false |
GheRivero/ansible
|
lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py
|
45
|
13964
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_hostgroup
version_added: "2.2"
short_description: Manage NetApp Storage Array Host Groups
author: Kevin Hulquest (@hulquest)
description:
- Create, update or destroy host groups on a NetApp E-Series storage array.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified host group should exist or not.
choices: ['present', 'absent']
name:
required: false
description:
- The name of the host group to manage. Either this or C(id_num) must be supplied.
new_name:
required: false
description:
- specify this when you need to update the name of a host group
id:
required: false
description:
- The id number of the host group to manage. Either this or C(name) must be supplied.
hosts:
required: false
description:
- a list of host names/labels to add to the group
'''
EXAMPLES = '''
- name: Configure Hostgroup
netapp_e_hostgroup:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
'''
RETURN = '''
clusterRef:
description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
returned: always except when state is absent
type: string
sample: "3233343536373839303132333100000000000000"
confirmLUNMappingCreation:
description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
will alter the volume access rights of other clusters, in addition to this one.
returned: always
type: boolean
sample: false
hosts:
description: A list of the hosts that are part of the host group after all operations.
returned: always except when state is absent
type: list
sample: ["HostA","HostB"]
id:
description: The id number of the hostgroup
returned: always except when state is absent
type: string
sample: "3233343536373839303132333100000000000000"
isSAControlled:
description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
returned: always except when state is absent
type: boolean
sample: false
label:
description: The user-assigned, descriptive label string for the cluster.
returned: always
type: string
sample: "MyHostGroup"
name:
description: same as label
returned: always except when state is absent
type: string
sample: "MyHostGroup"
protectionInformationCapableAccessMethod:
description: This field is true if the host has a PI capable access method.
returned: always except when state is absent
type: boolean
sample: true
'''
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def group_exists(module, id_type, ident, ssid, api_url, user, pwd):
rc, data = get_hostgroups(module, ssid, api_url, user, pwd)
for group in data:
if group[id_type] == ident:
return True, data
else:
continue
return False, data
def get_hostgroups(module, ssid, api_url, user, pwd):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return rc, data
except HTTPError as e:
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, to_native(e)))
def get_hostref(module, ssid, name, api_url, user, pwd):
all_hosts = 'storage-systems/%s/hosts' % ssid
url = api_url + all_hosts
try:
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, to_native(e)))
for host in data:
if host['name'] == name:
return host['hostRef']
else:
continue
module.fail_json(msg="No host with the name %s could be found" % name)
def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
hostrefs = []
if hosts:
for host in hosts:
href = get_hostref(module, ssid, host, api_url, user, pwd)
hostrefs.append(href)
post_data = json.dumps(dict(name=name, hosts=hostrefs))
try:
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, to_native(e)))
return rc, data
def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None):
gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
groups = "storage-systems/%s/host-groups/%s" % (ssid, gid)
url = api_url + groups
hostrefs = []
if hosts:
for host in hosts:
href = get_hostref(module, ssid, host, api_url, user, pwd)
hostrefs.append(href)
if new_name:
post_data = json.dumps(dict(name=new_name, hosts=hostrefs))
else:
post_data = json.dumps(dict(hosts=hostrefs))
try:
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid,
to_native(e)))
return rc, data
def delete_hostgroup(module, ssid, group_id, api_url, user, pwd):
groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id)
url = api_url + groups
# TODO: Loop through hosts, do mapping to href, make new list to pass to data
try:
rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, to_native(e)))
return rc, data
def get_hostgroup_id(module, ssid, name, api_url, user, pwd):
all_groups = 'storage-systems/%s/host-groups' % ssid
url = api_url + all_groups
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
for hg in data:
if hg['name'] == name:
return hg['id']
else:
continue
module.fail_json(msg="A hostgroup with the name %s could not be found" % name)
def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
all_groups = 'storage-systems/%s/host-groups' % ssid
g_url = api_url + all_groups
try:
g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(
msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name,
ssid,
to_native(e)))
all_hosts = 'storage-systems/%s/hosts' % ssid
h_url = api_url + all_hosts
try:
h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(
msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (
group_name,
ssid,
to_native(e)))
hosts_in_group = []
for hg in g_data:
if hg['name'] == group_name:
clusterRef = hg['clusterRef']
for host in h_data:
if host['clusterRef'] == clusterRef:
hosts_in_group.append(host['name'])
return hosts_in_group
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
new_name=dict(required=False),
ssid=dict(required=True),
id=dict(required=False),
state=dict(required=True, choices=['present', 'absent']),
hosts=dict(required=False, type='list'),
api_url=dict(required=True),
api_username=dict(required=True),
validate_certs=dict(required=False, default=True),
api_password=dict(required=True, no_log=True)
),
supports_check_mode=False,
mutually_exclusive=[['name', 'id']],
required_one_of=[['name', 'id']]
)
name = module.params['name']
new_name = module.params['new_name']
ssid = module.params['ssid']
id_num = module.params['id']
state = module.params['state']
hosts = module.params['hosts']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
if name:
id_type = 'name'
id_key = name
elif id_num:
id_type = 'id'
id_key = id_num
exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd)
if state == 'present':
if not exists:
try:
rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts)
except Exception as e:
module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, to_native(e)))
hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
module.exit_json(changed=True, hosts=hosts, **data)
else:
current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
if not current_hosts:
current_hosts = []
if not hosts:
hosts = []
if set(current_hosts) != set(hosts):
try:
rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name)
except Exception as e:
module.fail_json(
msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, to_native(e)))
module.exit_json(changed=True, hosts=hosts, **data)
else:
for group in group_data:
if group['name'] == name:
module.exit_json(changed=False, hosts=current_hosts, **group)
elif state == 'absent':
if exists:
hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
try:
rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd)
except Exception as e:
module.fail_json(
msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, to_native(e)))
module.exit_json(changed=True, msg="Host Group deleted")
else:
module.exit_json(changed=False, msg="Host Group is already absent")
if __name__ == '__main__':
main()
|
gpl-3.0
| -7,302,903,493,152,294,000 | 817,050,013,353,294,000 | 34.351899 | 151 | 0.602048 | false |
fxtentacle/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
|
125
|
5394
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from .attachment import Attachment
class Bug(object):
# FIXME: This class is kinda a hack for now. It exists so we have one
# place to hold bug logic, even if much of the code deals with
# dictionaries still.
def __init__(self, bug_dictionary, bugzilla):
self.bug_dictionary = bug_dictionary
self._bugzilla = bugzilla
def id(self):
return self.bug_dictionary["id"]
def title(self):
# FIXME: Do we need to HTML unescape the title?
return self.bug_dictionary["title"]
def reporter_email(self):
return self.bug_dictionary["reporter_email"]
def assigned_to_email(self):
return self.bug_dictionary["assigned_to_email"]
def cc_emails(self):
return self.bug_dictionary["cc_emails"]
# FIXME: This information should be stored in some sort of webkit_config.py instead of here.
unassigned_emails = frozenset([
"[email protected]",
"[email protected]",
])
def is_unassigned(self):
return self.assigned_to_email() in self.unassigned_emails
def status(self):
return self.bug_dictionary["bug_status"]
# Bugzilla has many status states we don't really use in WebKit:
# https://bugs.webkit.org/page.cgi?id=fields.html#status
_open_states = ["UNCONFIRMED", "NEW", "ASSIGNED", "REOPENED"]
_closed_states = ["RESOLVED", "VERIFIED", "CLOSED"]
def is_open(self):
return self.status() in self._open_states
def is_closed(self):
return not self.is_open()
def duplicate_of(self):
return self.bug_dictionary.get('dup_id', None)
# Rarely do we actually want obsolete attachments
def attachments(self, include_obsolete=False):
attachments = self.bug_dictionary["attachments"]
if not include_obsolete:
attachments = filter(lambda attachment:
not attachment["is_obsolete"], attachments)
return [Attachment(attachment, self) for attachment in attachments]
def patches(self, include_obsolete=False):
return [patch for patch in self.attachments(include_obsolete)
if patch.is_patch()]
def unreviewed_patches(self):
return [patch for patch in self.patches() if patch.review() == "?"]
def reviewed_patches(self, include_invalid=False):
patches = [patch for patch in self.patches() if patch.review() == "+"]
if include_invalid:
return patches
# Checking reviewer() ensures that it was both reviewed and has a valid
# reviewer.
return filter(lambda patch: patch.reviewer(), patches)
def commit_queued_patches(self, include_invalid=False):
patches = [patch for patch in self.patches()
if patch.commit_queue() == "+"]
if include_invalid:
return patches
# Checking committer() ensures that it was both commit-queue+'d and has
# a valid committer.
return filter(lambda patch: patch.committer(), patches)
def comments(self):
return self.bug_dictionary["comments"]
def is_in_comments(self, message):
for comment in self.comments():
if message in comment["text"]:
return True
return False
def commit_revision(self):
# Sort the comments in reverse order as we want the latest committed revision.
r = re.compile("Committed r(?P<svn_revision>\d+)")
for comment in sorted(self.comments(), reverse=True):
rev = r.search(comment['text'])
if rev:
return int(rev.group('svn_revision'))
return None
|
bsd-3-clause
| 4,168,109,248,269,114,000 | 8,180,560,828,583,236,000 | 38.661765 | 96 | 0.67297 | false |
wireservice/agate
|
agate/csv_py3.py
|
1
|
4555
|
#!/usr/bin/env python
"""
This module contains the Python 3 replacement for :mod:`csv`.
"""
import csv
import six
from agate.exceptions import FieldSizeLimitError
POSSIBLE_DELIMITERS = [',', '\t', ';', ' ', ':', '|']
class Reader(six.Iterator):
"""
A wrapper around Python 3's builtin :func:`csv.reader`.
"""
def __init__(self, f, field_size_limit=None, line_numbers=False, header=True, **kwargs):
self.line_numbers = line_numbers
self.header = header
if field_size_limit:
csv.field_size_limit(field_size_limit)
self.reader = csv.reader(f, **kwargs)
def __iter__(self):
return self
def __next__(self):
try:
row = next(self.reader)
except csv.Error as e:
# Terrible way to test for this exception, but there is no subclass
if 'field larger than field limit' in str(e):
raise FieldSizeLimitError(csv.field_size_limit())
else:
raise e
if not self.line_numbers:
return row
else:
if self.line_numbers:
if self.header and self.line_num == 1:
row.insert(0, 'line_numbers')
else:
row.insert(0, str(self.line_num - 1 if self.header else self.line_num))
return row
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
class Writer(object):
"""
A wrapper around Python 3's builtin :func:`csv.writer`.
"""
def __init__(self, f, line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
self.writer = csv.writer(f, **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row.insert(0, 'line_number')
else:
row.insert(0, self.row_count)
self.row_count += 1
def writerow(self, row):
if self.line_numbers:
row = list(row)
self._append_line_number(row)
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = [i.replace('\r', '\n') if isinstance(i, six.string_types) else i for i in row]
self.writer.writerow(row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class DictReader(csv.DictReader):
"""
A wrapper around Python 3's builtin :class:`csv.DictReader`.
"""
pass
class DictWriter(csv.DictWriter):
"""
A wrapper around Python 3's builtin :class:`csv.DictWriter`.
"""
def __init__(self, f, fieldnames, line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
if self.line_numbers:
fieldnames.insert(0, 'line_number')
csv.DictWriter.__init__(self, f, fieldnames, **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row['line_number'] = 'line_number'
else:
row['line_number'] = self.row_count
self.row_count += 1
def writerow(self, row):
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = dict([(k, v.replace('\r', '\n')) if isinstance(v, six.string_types) else (k, v) for k, v in row.items()])
if self.line_numbers:
self._append_line_number(row)
csv.DictWriter.writerow(self, row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class Sniffer(object):
"""
A functional wrapper of ``csv.Sniffer()``.
"""
def sniff(self, sample):
"""
A functional version of ``csv.Sniffer().sniff``, that extends the
list of possible delimiters to include some seen in the wild.
"""
try:
dialect = csv.Sniffer().sniff(sample, POSSIBLE_DELIMITERS)
except:
dialect = None
return dialect
def reader(*args, **kwargs):
"""
A replacement for Python's :func:`csv.reader` that uses
:class:`.csv_py3.Reader`.
"""
return Reader(*args, **kwargs)
def writer(*args, **kwargs):
"""
A replacement for Python's :func:`csv.writer` that uses
:class:`.csv_py3.Writer`.
"""
return Writer(*args, **kwargs)
|
mit
| 7,669,659,199,516,043,000 | 5,971,017,959,864,027,000 | 25.32948 | 119 | 0.568606 | false |
atpy/atpy
|
atpy/fitstable.py
|
1
|
11686
|
from __future__ import print_function, division
import os
import numpy as np
from astropy.io import fits
from .exceptions import TableException
from .helpers import smart_dtype, smart_mask
from .decorators import auto_download_to_file, auto_fileobj_to_file
standard_keys = ['XTENSION', 'NAXIS', 'NAXIS1', 'NAXIS2', 'TFIELDS', \
'PCOUNT', 'GCOUNT', 'BITPIX', 'EXTNAME']
# Define type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "L"
type_dict[np.int8] = "B"
type_dict[np.uint8] = "B"
type_dict[np.int16] = "I"
type_dict[np.uint16] = "I"
type_dict[np.int32] = "J"
type_dict[np.uint32] = "J"
type_dict[np.int64] = "K"
type_dict[np.uint64] = "K"
type_dict[np.float32] = "E"
type_dict[np.float64] = "D"
type_dict[np.str] = "A"
type_dict[np.string_] = "A"
type_dict[str] = "A"
def _list_tables(filename):
hdulist = fits.open(filename)
tables = {}
for i, hdu in enumerate(hdulist[1:]):
if hdu.header['XTENSION'] in ['BINTABLE', 'ASCIITABLE', 'TABLE']:
tables[i + 1] = hdu.name
hdulist.close()
return tables
# PyFITS can handle compression, so no decompression detection
@auto_download_to_file
@auto_fileobj_to_file
def read(self, filename, hdu=None, memmap=False, verbose=True):
'''
Read a table from a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to read the table from
Optional Keyword Arguments:
*hdu*: [ integer ]
The HDU to read from the FITS file (this is only required
if there are more than one table in the FITS file)
*memmap*: [ bool ]
Whether PyFITS should use memory mapping
'''
self.reset()
# If no hdu is requested, check that there is only one table
if not hdu:
tables = _list_tables(filename)
if len(tables) == 0:
raise Exception("No tables in file")
elif len(tables) == 1:
hdu = tables.keys()[0]
else:
raise TableException(tables, 'hdu')
hdulist = fits.open(filename, memmap=memmap)
hdu = hdulist[hdu]
table = hdu.data
header = hdu.header
columns = hdu.columns
# Construct dtype for table
dtype = []
for i in range(len(hdu.data.dtype)):
name = hdu.data.dtype.names[i]
type = hdu.data.dtype[name]
if type.subdtype:
type, shape = type.subdtype
else:
shape = ()
# Get actual FITS format and zero-point
format, bzero = hdu.columns[i].format, hdu.columns[i].bzero
# Remove numbers from format, to find just type
format = format.strip("1234567890.")
if type.type is np.string_ and format in ['I', 'F', 'E', 'D']:
if format == 'I':
type = np.int64
elif format in ['F', 'E']:
type = np.float32
elif format == 'D':
type = np.float64
if format == 'X' and type.type == np.uint8:
type = np.bool
if len(shape) == 1:
shape = (shape[0] * 8,)
if format == 'L':
type = np.bool
if bzero and format in ['B', 'I', 'J']:
if format == 'B' and bzero == -128:
dtype.append((name, np.int8, shape))
elif format == 'I' and bzero == - np.iinfo(np.int16).min:
dtype.append((name, np.uint16, shape))
elif format == 'J' and bzero == - np.iinfo(np.int32).min:
dtype.append((name, np.uint32, shape))
else:
dtype.append((name, type, shape))
else:
dtype.append((name, type, shape))
dtype = np.dtype(dtype)
if self._masked:
self._setup_table(len(hdu.data), dtype, units=columns.units)
else:
self._setup_table(len(hdu.data), dtype, units=columns.units, \
nulls=columns.nulls)
# Populate the table
for i, name in enumerate(columns.names):
format, bzero = hdu.columns[i].format[-1], hdu.columns[i].bzero
if bzero and format in ['B', 'I', 'J']:
data = np.rec.recarray.field(hdu.data, i)
if format == 'B' and bzero == -128:
data = (data.astype(np.int16) + bzero).astype(np.int8)
elif format == 'I' and bzero == - np.iinfo(np.int16).min:
data = (data.astype(np.int32) + bzero).astype(np.uint16)
elif format == 'J' and bzero == - np.iinfo(np.int32).min:
data = (data.astype(np.int64) + bzero).astype(np.uint32)
else:
data = table.field(name)
else:
data = table.field(name)
self.data[name][:] = data[:]
if self._masked:
if columns.nulls[i] == 'NAN.0':
null = np.nan
elif columns.nulls[i] == 'INF.0':
null = np.inf
else:
null = columns.nulls[i]
self.data[name].mask = smart_mask(data, null)
self.data[name].set_fill_value(null)
for key in header.keys():
if not key[:4] in ['TFOR', 'TDIS', 'TDIM', 'TTYP', 'TUNI'] and \
not key in standard_keys:
self.add_keyword(key, header[key])
try:
header['COMMENT']
except KeyError:
pass
else:
# PyFITS used to define header['COMMENT'] as the last comment read in
# (which was a string), but now defines it as a _HeaderCommentaryCards
# object
if isinstance(header['COMMENT'], basestring):
for comment in header.get_comment():
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
else:
for comment in header['COMMENT']:
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
if hdu.name:
self.table_name = str(hdu.name)
hdulist.close()
return
def _to_hdu(self):
'''
Return the current table as a astropy.io.fits HDU object
'''
columns = []
for name in self.names:
if self._masked:
data = self.data[name].filled()
null = self.data[name].fill_value
if data.ndim > 1:
null = null[0]
if type(null) in [np.bool_, np.bool]:
null = bool(null)
else:
data = self.data[name]
null = self.columns[name].null
unit = self.columns[name].unit
dtype = self.columns[name].dtype
elemwidth = None
if unit == None:
unit = ''
if data.ndim > 1:
elemwidth = str(data.shape[1])
column_type = smart_dtype(dtype)
if column_type == np.string_:
elemwidth = dtype.itemsize
if column_type in type_dict:
if elemwidth:
format = str(elemwidth) + type_dict[column_type]
else:
format = type_dict[column_type]
else:
raise Exception("cannot use numpy type " + str(column_type))
if column_type == np.uint16:
bzero = - np.iinfo(np.int16).min
elif column_type == np.uint32:
bzero = - np.iinfo(np.int32).min
elif column_type == np.uint64:
raise Exception("uint64 unsupported")
elif column_type == np.int8:
bzero = -128
else:
bzero = None
columns.append(fits.Column(name=name, format=format, unit=unit, \
null=null, array=data, bzero=bzero))
hdu = fits.new_table(fits.ColDefs(columns))
try:
hdu.name = self.table_name
except:
hdu.name = ''
for key in self.keywords:
if len(key) > 8:
keyname = "hierarch " + key
else:
keyname = key
try: # PyFITS 3.x
hdu.header[keyname] = self.keywords[key]
except KeyError: # PyFITS 2.x
hdu.header.update(keyname, self.keywords[key])
for comment in self.comments:
hdu.header.add_comment(comment)
return hdu
def write(self, filename, overwrite=False):
'''
Write the table to a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to write the table to
Optional Keyword Arguments:
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
try:
_to_hdu(self).writeto(filename)
except:
_to_hdu(self).writeto(filename, output_verify='silentfix')
# PyFITS can handle compression, so no decompression detection
@auto_download_to_file
@auto_fileobj_to_file
def read_set(self, filename, memmap=False, verbose=True):
'''
Read all tables from a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to read the tables from
Optional Keyword Arguments:
*memmap*: [ bool ]
Whether PyFITS should use memory mapping
'''
self.reset()
# Read in primary header
header = fits.getheader(filename, 0)
for key in header.keys():
if not key[:4] in ['TFOR', 'TDIS', 'TDIM', 'TTYP', 'TUNI'] and \
not key in standard_keys:
self.add_keyword(key, header[key])
try:
header['COMMENT']
except KeyError:
pass
else:
# PyFITS used to define header['COMMENT'] as the last comment read in
# (which was a string), but now defines it as a _HeaderCommentaryCards
# object
if isinstance(header['COMMENT'], basestring):
for comment in header.get_comment():
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
else:
for comment in header['COMMENT']:
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
# Read in tables one by one
from .basetable import Table
for hdu in _list_tables(filename):
table = Table()
read(table, filename, hdu=hdu, memmap=memmap, verbose=verbose)
self.append(table)
def write_set(self, filename, overwrite=False):
'''
Write the tables to a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to write the tables to
Optional Keyword Arguments:
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
primary = fits.PrimaryHDU()
for key in self.keywords:
if len(key) > 8:
keyname = "hierarch " + key
else:
keyname = key
try: # PyFITS 3.x
primary.header[keyname] = self.keywords[key]
except KeyError: # PyFITS 2.x
primary.header.update(keyname, self.keywords[key])
for comment in self.comments:
primary.header.add_comment(comment)
hdulist = [primary]
for table_key in self.tables:
hdulist.append(_to_hdu(self.tables[table_key]))
hdulist = fits.HDUList(hdulist)
hdulist.writeto(filename)
|
mit
| 1,441,949,267,621,351,000 | 4,985,714,573,346,603,000 | 27.364078 | 78 | 0.552114 | false |
Kraxi/YTplaylist
|
venv/Lib/site-packages/pip/commands/install.py
|
187
|
14659
|
from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
|
gpl-2.0
| -8,894,581,388,813,013,000 | -5,970,225,650,312,014,000 | 37.576316 | 79 | 0.515519 | false |
loulich/Couchpotato
|
libs/pyutil/test/deprecated/test_picklesaver.py
|
106
|
1340
|
#!/usr/bin/env python
# Copyright (c) 2002 Luke 'Artimage' Nelson
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import os
try:
from twisted.trial import unittest
except ImportError, le:
print "Skipping %s since it requires Twisted and Twisted could not be imported: %s" % (__name__, le,)
else:
from pyutil import PickleSaver, fileutil
class Thingie(PickleSaver.PickleSaver):
def __init__(self, fname, delay=30):
PickleSaver.PickleSaver.__init__(self, fname=fname, attrs={'tmp_store':'False'}, DELAY=delay)
class PickleSaverTest(unittest.TestCase):
def _test_save_now(self, fname):
thingie = Thingie(fname, delay=0)
thingie.tmp_store = 'True'
thingie.lazy_save() # Note: it was constructed with default save delay of 0.
def test_save_now(self):
"""
This test should create a lazy save object, save it with no delay and check if the file exists.
"""
tempdir = fileutil.NamedTemporaryDirectory()
fname = os.path.join(tempdir.name, "picklesavertest")
self._test_save_now(fname)
self.failUnless(os.path.isfile(fname), "The file [%s] does not exist." %(fname,))
tempdir.shutdown()
|
gpl-3.0
| -3,026,381,232,433,795,600 | 2,359,552,084,865,899,000 | 36.222222 | 107 | 0.635821 | false |
skriticos/ac
|
src/AniChou/myanimelist.py
|
1
|
14742
|
# =========================================================================== #
# Name: myanimelist.py
# Purpose: Provide an interface to anime data; syncronize with the MyAnimeList
# server;
#
# Copyright (c) 2008 Gareth Latty
# Copyright (c) 2009 Sebastian Bartos
# Copyright (c) 2009 Daniel Anderson - dankles/evilsage4
#
# License: GPL v3, see COPYING file for details
# =========================================================================== #
import urllib
import urllib2
from cookielib import LWPCookieJar
import socket
from lib.beautifulsoup import BeautifulSoup
import re
import urlparse
from datetime import date, datetime
import os, time
from data import mal_anime_data_schema
from database import db as local_database
from globs import ac_log_path, ac_data_path
class anime_data(object):
"""
Anime data module. Reads and writes local anime data to disk, fetches and
syncs with MyanimeList server.
username: login username
password: login password
db_path: path to database
db: local anime database that is a nested dict and has ASCII-fied series
titles as keys and and fields form mal_anime_data_schema as dict data.
"""
def __init__(self, **kw):
"""
Setup credentials, read local data and setup network connection
environment. Optionally sync with MAL on startup.
Does not take positional arguments. Keyword arguments can either be
given individually (username, password, initsync) or as an
ac_config() instance. This will not be retained.
In the latter form we support some additional command line options.
"""
# When the architecture stabilizes, switch to config as the sole
# positional argument, and retain it instead of copying parts.
# That would also enable reconfiguration at runtime.
self.username = kw.get('username', kw['config'].get('mal', 'username'))
self.password = kw.get('password', kw['config'].get('mal', 'password'))
initsync = kw.get('initsync', kw['config'].get('startup', 'sync'))
try:
self.login = kw['config'].get('mal', 'login')
except KeyError:
# We need a default even if arguments were given individually.
self.login = True
try:
self.mirror = kw['config'].get('mal', 'mirror')
except KeyError:
self.mirror = None
# pull the local DB as a dictionary object
#self.db = {}
self.local_db = local_database()
self.db = self.local_db.get_db()
# setup cookie handler
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(LWPCookieJar()))
urllib2.install_opener(opener)
socket.setdefaulttimeout(40)
if initsync:
self.sync()
def save(self):
""" Only saves the current state to disk w/o network activity.
"""
self.local_db.set_db(self.db)
def sync(self):
"""
Syncronize local anime database with the MyAnimeList server.
(fetch -> compare -> push -> update local)
Return:
nested dict of remote updates with ASCII-fied series titles as
keys and a list of keys that got deleted on the MyAnimeList server.
"""
# Three way switch: login (un)successfull or don't even try.
login = _login(self.username,self.password) if self.login else None
if login is False:
print 'Login failed..'
return False
remoteAnime_db = _getAnimeList(self.username, self.mirror)
if self.db:
# If local DB is already initialized then filter changes
# and push local updates
(remote_updates, local_updates, deleted_entry_keys) = \
_filter_sync_changes(remoteAnime_db, self.db)
_logchanges(remote_updates, local_updates, deleted_entry_keys)
if login:
_push_list(local_updates)
else:
print 'Warning! Your local data goes ouf of sync'
# update local anime list with changes
for key in deleted_entry_keys:
del self.db[key]
for key, value in remote_updates.items():
self.db[key] = value
# write to local DB
self.local_db.set_db(self.db)
return (remote_updates, deleted_entry_keys)
else:
# initialize local data, as it was empty before
self.db = remoteAnime_db
# write to local DB
self.local_db.set_db(self.db)
return (self.db, {})
def fetch(self):
"""
UNUSED
Only fetch anime data from MyAnimeList server (overwrites local data,
if existent). Useful for initializing and resetting local database.
Returns a copy of the fetched database on success, None on failure.
"""
self.db = _getAnimeList(self.username)
# write to local DB
self.local_db.set_db(self.db)
return self.db
def _appInfoURL(user, status = 'all', typ = None):
"""
Safely generate a URL to get XML.
Type may be 'manga'.
"""
# Example taken from the site.
template = 'http://myanimelist.net/malappinfo.php?u=Wile&status=all&type=manga'
# Make tuple mutable.
parts = list(urlparse.urlparse(template))
# New parameters.
query = {'u': user}
if status:
query['status'] = status
if typ:
query['type'] = typ
# urlencode would literally output 'None'.
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def _getAnimeList(username, mirror):
"""
Retrieve Anime XML from MyAnimeList server.
Returns: dictionary object.
Ways in which the ouput of malAppInfo is *not* XML:
Declared as UTF-8 but contains illegal byte sequences (characters)
Uses entities inside CDATA, which is exactly the wrong way round.
It further disagrees with the Expat C extension behind minidom:
Contains tabs and newlines outside of tags.
"""
# This function should be broken up and partly refactored into
# the class to be better configurable.
fetch_url = _appInfoURL(username)
try:
fetch_response = open(mirror, 'rb')
except:
# TODO whatever error open(None) raises.
fetch_response = urllib2.urlopen(fetch_url)
# BeautifulSoup could do the read() and unicode-conversion, if it
# weren't for the illegal characters, as it internally doesn't
# use 'replace'.
fetch_response = unicode(fetch_response.read(), 'utf-8', 'replace')
xmldata = BeautifulSoup.BeautifulStoneSoup(fetch_response)
# For unknown reasons it doesn't work without recursive.
# Nor does iterating over myanimelist.anime. BS documentation broken?
anime_nodes = xmldata.myanimelist.findAll('anime', recursive = True)
# We have to manually convert after getting them out of the CDATA.
entity = lambda m: BeautifulSoup.Tag.XML_ENTITIES_TO_SPECIAL_CHARS[m.group(1)]
# Walk through all the anime nodes and convert the data to a python
# dictionary.
ac_remote_anime_dict = dict()
for anime in anime_nodes:
# ac_node builds the output of our function. Everything added to it
# must either be made independent of the parse tree by calling
# NavigableString.extract() or, preferrably, be turned into a
# different type like unicode(). This is a side-effect of using
# non-mutators like string.strip()
# Failing to do this will crash cPickle.
ac_node = dict()
for node, typ in mal_anime_data_schema.iteritems():
try:
value = getattr(anime, node).string.strip()
# One would think re.sub directly accepts string subclasses
# like NavigableString. Raises a TypeError, though.
value = re.sub(r'&(\w+);', entity, value)
except AttributeError:
continue
if typ is datetime:
# process my_last_updated unix timestamp
ac_node[node] = datetime.fromtimestamp(int(value))
elif typ is int:
# process integer slots
ac_node[node] = int(value)
elif typ is date and value != '0000-00-00':
# proces date slots
(y,m,d) = value.split('-')
(y,m,d) = int(y), int(m), int(d)
if y and m and d:
ac_node[node] = date(y,m,d)
else:
# process string slots
ac_node[node] = value
# series titles are used as anime identifiers
# the keys for the resulting dictionary are encoded to ASCII, so they
# can be simply put into shelves
key = ac_node['series_title'].encode('utf-8')
# add node entry to the resulting nodelist
ac_remote_anime_dict[key] = ac_node
# the resulting dict is like this:
# {<ASCII-fied key from title>: {<mal_anime_data_schema-fields>: <values>}, ...}
return ac_remote_anime_dict
def _logchanges(remote, local, deleted):
""" Writes changes to logfile.
"""
f = open(ac_log_path, 'a')
now = str(int(time.mktime(datetime.now().timetuple())))
for key, value in remote.items():
f.write(now + ': Fetching "' + key +
'" episode ' + str(value['my_watched_episodes']) + '\n')
for key, value in local.items():
f.write(now + ': Pushing "' + key +
'" episode ' + str(value['my_watched_episodes']) + '\n')
for entry in deleted:
f.write(now + ': Deleted "' + entry + '"\n')
f.close()
def _login(username, password):
"""
Log in to MyAnimeList server.
Returns: True on success, False on failure
"""
# prepare login data
login_base_url = 'http://myanimelist.net/login.php'
headers = {
'User-Agent': 'anichou',
'Content-Type': 'application/x-www-form-urlencoded'}
login_data = urllib.urlencode({
'username': username,
'password': password,
'cookie': 1,
'sublogin': 'Login'})
# phrase login request (to perform a POST request)
login_request = urllib2.Request(login_base_url, login_data, headers)
# try to connect and authenticate with MyAnimeList server
try:
login_response = urllib2.urlopen(login_request).read()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'Failed to reach myanimelist.net.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
return False
# check if login was successful
if not login_response.count('<div class="badresult">'):
if login_response == "Couldn't open s-database. Please contact Xinil.":
return False
return True
else:
return False
def _filter_sync_changes(ac_remote_anime_dict, ac_local_anime_dict):
"""
Private Method
Compares the anime entry my_last_updated in both parameters and returns two
dictionaries of changed values of both parameters.
The one for the local dictionary can be used to push changes to the mal
server while the other can be used to update the local display and database.
Returns:
remote_updates: changes that are more up to date on the server
local_updates: changes that are more up to date locally
deleted_enry_keys: keys that are in the local database, but not in the
remote list.
"""
remote_updates = dict()
local_updates = dict()
# search for entirely new enries and deleted entries
remote_keys = ac_remote_anime_dict.keys()
local_keys = ac_local_anime_dict.keys()
deleted_entry_keys = \
filter(lambda x:x not in remote_keys, local_keys)
new_entry_keys = \
filter(lambda x:x not in local_keys, remote_keys)
for key in new_entry_keys:
remote_updates[key] = ac_remote_anime_dict[key]
# search in both dictionaries for differing update keys and append to the
# other's updates depending on which key is newer
common_keys = filter(lambda x:x in local_keys, remote_keys)
for key in common_keys:
remote_timestamp = ac_remote_anime_dict[key]['my_last_updated']
local_timestamp = ac_local_anime_dict[key]['my_last_updated']
if remote_timestamp > local_timestamp:
remote_updates[key] = ac_remote_anime_dict[key]
elif remote_timestamp < local_timestamp:
local_updates[key] = ac_local_anime_dict[key]
return (remote_updates, local_updates, deleted_entry_keys)
def _push_list(local_updates):
"""
Private Medoth
Updates every entry in the local updates dictionary to the mal server.
Should be called after the local updates are determined with the
filter_sync_changes function.
Returns:
True on success, False on failure
"""
headers = {
'User-Agent': 'anichou',
'Content-Type': 'application/x-www-form-urlencoded'}
for anime in local_updates.values():
# construct push request for entry update
postdata = urllib.urlencode({
# id entry
'series_animedb_id': str(anime['series_animedb_id']),
'series_title': str(anime['series_animedb_id']),
# set interesting values
'completed_eps': str(anime['my_watched_episodes']),
'status': str(anime['my_status']),
'score': str(anime['my_score']),
# protocol stuff
'close_on_update': 'true',
'submitIt': 2 })
push_base_url = \
'http://myanimelist.net/panel.php?keepThis=true&go=edit&id=' + \
str(anime['my_id']) + '&hidenav=true&TB_iframe=false'
push_request = urllib2.Request(push_base_url, postdata, headers)
# push update request
try:
response = urllib2.urlopen(push_request)
# print response.read() # -- for testing
except URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
return False
return True
|
gpl-3.0
| 3,001,411,212,907,113,000 | 537,053,940,203,640,700 | 35.580645 | 84 | 0.599647 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.