repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
zturchan/CMPUT410-Lab6
|
v1/lib/python2.7/site-packages/django/forms/fields.py
|
34
|
46782
|
"""
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
TextInput, NumberInput, EmailInput, URLInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION
)
from django.utils import formats
from django.utils.encoding import smart_text, force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField'
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.error_messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.error_messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.error_messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
warnings.warn(
'Using SplitDateTimeWidget with DateTimeField is deprecated. '
'Use SplitDateTimeField instead.',
RemovedInDjango19Warning, stacklevel=2)
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if value[0] in self.empty_values and value[1] in self.empty_values:
return None
value = '%s %s' % tuple(value)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message is not None:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def _has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _("Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from django.utils.image import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
# verify() must be called immediately after the constructor.
Image.open(file).verify()
except Exception:
# Pillow (or PIL) doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
def _has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple([x.__deepcopy__(memo) for x in self.fields])
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field._has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class IPAddressField(CharField):
default_validators = [validators.validate_ipv4_address]
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
super(IPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
return value.strip()
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
|
apache-2.0
| -531,462,122,730,696,600 | 3,562,171,440,497,276,000 | 38.050083 | 120 | 0.59634 | false |
Kiiv/CouchPotatoServer
|
libs/rsa/key.py
|
110
|
17087
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''RSA key generation code.
Create new keys with the newkeys() function. It will give you a PublicKey and a
PrivateKey object.
Loading and saving keys requires the pyasn1 module. This module is imported as
late as possible, such that other functionality will remain working in absence
of pyasn1.
'''
import logging
from rsa._compat import b
import rsa.prime
import rsa.pem
import rsa.common
log = logging.getLogger(__name__)
class AbstractKey(object):
'''Abstract superclass for private and public keys.'''
@classmethod
def load_pkcs1(cls, keyfile, format='PEM'):
r'''Loads a key in PKCS#1 DER or PEM format.
:param keyfile: contents of a DER- or PEM-encoded file that contains
the public key.
:param format: the format of the file to load; 'PEM' or 'DER'
:return: a PublicKey object
'''
methods = {
'PEM': cls._load_pkcs1_pem,
'DER': cls._load_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method(keyfile)
def save_pkcs1(self, format='PEM'):
'''Saves the public key in PKCS#1 DER or PEM format.
:param format: the format to save; 'PEM' or 'DER'
:returns: the DER- or PEM-encoded public key.
'''
methods = {
'PEM': self._save_pkcs1_pem,
'DER': self._save_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method()
class PublicKey(AbstractKey):
'''Represents a public RSA key.
This key is also known as the 'encryption key'. It contains the 'n' and 'e'
values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PublicKey(5, 3)
PublicKey(5, 3)
>>> key = PublicKey(5, 3)
>>> key.n
5
>>> key['n']
5
>>> key.e
3
>>> key['e']
3
'''
__slots__ = ('n', 'e')
def __init__(self, n, e):
self.n = n
self.e = e
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return 'PublicKey(%i, %i)' % (self.n, self.e)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PublicKey):
return False
return self.n == other.n and self.e == other.e
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the public
key.
@return: a PublicKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MAwCBQCNGmYtAgMBAAE='
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PublicKey._load_pkcs1_der(der)
PublicKey(2367317549, 65537)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded public key:
#
# RSAPublicKey ::= SEQUENCE {
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
as_ints = tuple(int(x) for x in priv)
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the public key in PKCS#1 DER format.
@returns: the DER-encoded public key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPubKey()
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded public key file.
The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
after the "-----END RSA PUBLIC KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the public
key.
@return: a PublicKey object
'''
der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded public key file.
@return: contents of a PEM-encoded file that contains the public key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
class PrivateKey(AbstractKey):
'''Represents a private RSA key.
This key is also known as the 'decryption key'. It contains the 'n', 'e',
'd', 'p', 'q' and other values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PrivateKey(3247, 65537, 833, 191, 17)
PrivateKey(3247, 65537, 833, 191, 17)
exp1, exp2 and coef don't have to be given, they will be calculated:
>>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
>>> pk.exp1
55063
>>> pk.exp2
10095
>>> pk.coef
50797
If you give exp1, exp2 or coef, they will be used as-is:
>>> pk = PrivateKey(1, 2, 3, 4, 5, 6, 7, 8)
>>> pk.exp1
6
>>> pk.exp2
7
>>> pk.coef
8
'''
__slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
def __init__(self, n, e, d, p, q, exp1=None, exp2=None, coef=None):
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
# Calculate the other values if they aren't supplied
if exp1 is None:
self.exp1 = int(d % (p - 1))
else:
self.exp1 = exp1
if exp1 is None:
self.exp2 = int(d % (q - 1))
else:
self.exp2 = exp2
if coef is None:
self.coef = rsa.common.inverse(q, p)
else:
self.coef = coef
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return 'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PrivateKey):
return False
return (self.n == other.n and
self.e == other.e and
self.d == other.d and
self.p == other.p and
self.q == other.q and
self.exp1 == other.exp1 and
self.exp2 == other.exp2 and
self.coef == other.coef)
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the private
key.
@return: a PrivateKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PrivateKey._load_pkcs1_der(der)
PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded private key:
#
# RSAPrivateKey ::= SEQUENCE {
# version Version,
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
# privateExponent INTEGER, -- d
# prime1 INTEGER, -- p
# prime2 INTEGER, -- q
# exponent1 INTEGER, -- d mod (p-1)
# exponent2 INTEGER, -- d mod (q-1)
# coefficient INTEGER, -- (inverse of q) mod p
# otherPrimeInfos OtherPrimeInfos OPTIONAL
# }
if priv[0] != 0:
raise ValueError('Unable to read this file, version %s != 0' % priv[0])
as_ints = tuple(int(x) for x in priv[1:9])
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the private key in PKCS#1 DER format.
@returns: the DER-encoded private key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPrivKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPrivKey()
asn_key.setComponentByName('version', 0)
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
asn_key.setComponentByName('privateExponent', self.d)
asn_key.setComponentByName('prime1', self.p)
asn_key.setComponentByName('prime2', self.q)
asn_key.setComponentByName('exponent1', self.exp1)
asn_key.setComponentByName('exponent2', self.exp2)
asn_key.setComponentByName('coefficient', self.coef)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded private key file.
The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
after the "-----END RSA PRIVATE KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the private
key.
@return: a PrivateKey object
'''
der = rsa.pem.load_pem(keyfile, b('RSA PRIVATE KEY'))
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded private key file.
@return: contents of a PEM-encoded file that contains the private key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, b('RSA PRIVATE KEY'))
def find_p_q(nbits, getprime_func=rsa.prime.getprime, accurate=True):
''''Returns a tuple of two different primes of nbits bits each.
The resulting p * q has exacty 2 * nbits bits, and the returned p and q
will not be equal.
:param nbits: the number of bits in each of p and q.
:param getprime_func: the getprime function, defaults to
:py:func:`rsa.prime.getprime`.
*Introduced in Python-RSA 3.1*
:param accurate: whether to enable accurate mode or not.
:returns: (p, q), where p > q
>>> (p, q) = find_p_q(128)
>>> from rsa import common
>>> common.bit_size(p * q)
256
When not in accurate mode, the number of bits can be slightly less
>>> (p, q) = find_p_q(128, accurate=False)
>>> from rsa import common
>>> common.bit_size(p * q) <= 256
True
>>> common.bit_size(p * q) > 240
True
'''
total_bits = nbits * 2
# Make sure that p and q aren't too close or the factoring programs can
# factor n.
shift = nbits // 16
pbits = nbits + shift
qbits = nbits - shift
# Choose the two initial primes
log.debug('find_p_q(%i): Finding p', nbits)
p = getprime_func(pbits)
log.debug('find_p_q(%i): Finding q', nbits)
q = getprime_func(qbits)
def is_acceptable(p, q):
'''Returns True iff p and q are acceptable:
- p and q differ
- (p * q) has the right nr of bits (when accurate=True)
'''
if p == q:
return False
if not accurate:
return True
# Make sure we have just the right amount of bits
found_size = rsa.common.bit_size(p * q)
return total_bits == found_size
# Keep choosing other primes until they match our requirements.
change_p = False
while not is_acceptable(p, q):
# Change p on one iteration and q on the other
if change_p:
p = getprime_func(pbits)
else:
q = getprime_func(qbits)
change_p = not change_p
# We want p > q as described on
# http://www.di-mgt.com.au/rsa_alg.html#crt
return (max(p, q), min(p, q))
def calculate_keys(p, q, nbits):
'''Calculates an encryption and a decryption key given p and q, and
returns them as a tuple (e, d)
'''
phi_n = (p - 1) * (q - 1)
# A very common choice for e is 65537
e = 65537
try:
d = rsa.common.inverse(e, phi_n)
except ValueError:
raise ValueError("e (%d) and phi_n (%d) are not relatively prime" %
(e, phi_n))
if (e * d) % phi_n != 1:
raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
"phi_n (%d)" % (e, d, phi_n))
return (e, d)
def gen_keys(nbits, getprime_func, accurate=True):
'''Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
:param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
``q`` will use ``nbits/2`` bits.
:param getprime_func: either :py:func:`rsa.prime.getprime` or a function
with similar signature.
'''
(p, q) = find_p_q(nbits // 2, getprime_func, accurate)
(e, d) = calculate_keys(p, q, nbits // 2)
return (p, q, e, d)
def newkeys(nbits, accurate=True, poolsize=1):
'''Generates public and private keys, and returns them as (pub, priv).
The public key is also known as the 'encryption key', and is a
:py:class:`rsa.PublicKey` object. The private key is also known as the
'decryption key' and is a :py:class:`rsa.PrivateKey` object.
:param nbits: the number of bits required to store ``n = p*q``.
:param accurate: when True, ``n`` will have exactly the number of bits you
asked for. However, this makes key generation much slower. When False,
`n`` may have slightly less bits.
:param poolsize: the number of processes to use to generate the prime
numbers. If set to a number > 1, a parallel algorithm will be used.
This requires Python 2.6 or newer.
:returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
Python 2.6 or newer.
'''
if nbits < 16:
raise ValueError('Key too small')
if poolsize < 1:
raise ValueError('Pool size (%i) should be >= 1' % poolsize)
# Determine which getprime function to use
if poolsize > 1:
from rsa import parallel
import functools
getprime_func = functools.partial(parallel.getprime, poolsize=poolsize)
else: getprime_func = rsa.prime.getprime
# Generate the key components
(p, q, e, d) = gen_keys(nbits, getprime_func)
# Create the key objects
n = p * q
return (
PublicKey(n, e),
PrivateKey(n, e, d, p, q)
)
__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
if __name__ == '__main__':
import doctest
try:
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if (count and count % 10 == 0) or count == 1:
print('%i times' % count)
except KeyboardInterrupt:
print('Aborted')
else:
print('Doctests done')
|
gpl-3.0
| 2,920,393,246,148,952,600 | 1,615,490,468,873,763,800 | 28.407917 | 87 | 0.575383 | false |
jazcollins/models
|
syntaxnet/dragnn/python/composite_optimizer_test.py
|
12
|
4661
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CompositeOptimizer."""
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from dragnn.python import composite_optimizer
class MockAdamOptimizer(tf.train.AdamOptimizer):
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="Adam"):
super(MockAdamOptimizer, self).__init__(learning_rate, beta1, beta2,
epsilon, use_locking, name)
def _create_slots(self, var_list):
super(MockAdamOptimizer, self)._create_slots(var_list)
for v in var_list:
self._zeros_slot(v, "adam_counter", self._name)
def _apply_dense(self, grad, var):
train_op = super(MockAdamOptimizer, self)._apply_dense(grad, var)
counter = self.get_slot(var, "adam_counter")
return tf.group(train_op, tf.assign_add(counter, [1.0]))
class MockMomentumOptimizer(tf.train.MomentumOptimizer):
def __init__(self,
learning_rate,
momentum,
use_locking=False,
name="Momentum",
use_nesterov=False):
super(MockMomentumOptimizer, self).__init__(learning_rate, momentum,
use_locking, name, use_nesterov)
def _create_slots(self, var_list):
super(MockMomentumOptimizer, self)._create_slots(var_list)
for v in var_list:
self._zeros_slot(v, "momentum_counter", self._name)
def _apply_dense(self, grad, var):
train_op = super(MockMomentumOptimizer, self)._apply_dense(grad, var)
counter = self.get_slot(var, "momentum_counter")
return tf.group(train_op, tf.assign_add(counter, [1.0]))
class CompositeOptimizerTest(test_util.TensorFlowTestCase):
def test_switching(self):
with self.test_session() as sess:
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for w and b that compute y_data = w * x_data + b
# (We know that w should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = w * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
# Set up optimizers.
step = tf.get_variable(
"step",
shape=[],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.int32)
optimizer1 = MockAdamOptimizer(0.05)
optimizer2 = MockMomentumOptimizer(0.05, 0.5)
switch = tf.less(step, 100)
optimizer = composite_optimizer.CompositeOptimizer(optimizer1, optimizer2,
switch)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
# Fit the line.:
for iteration in range(201):
self.assertEqual(sess.run(switch), iteration < 100)
sess.run(train_op)
sess.run(tf.assign_add(step, 1))
slot_names = optimizer.get_slot_names()
self.assertItemsEqual(
slot_names,
["m", "v", "momentum", "adam_counter", "momentum_counter"])
adam_counter = sess.run(optimizer.get_slot(w, "adam_counter"))
momentum_counter = sess.run(optimizer.get_slot(w, "momentum_counter"))
self.assertEqual(adam_counter, min(iteration + 1, 100))
self.assertEqual(momentum_counter, max(iteration - 99, 0))
if iteration % 20 == 0:
logging.info("%d %s %d %d", iteration, sess.run([switch, step, w, b]),
adam_counter, momentum_counter)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
| -5,818,489,411,824,246,000 | -6,709,065,618,241,456,000 | 35.992063 | 80 | 0.611457 | false |
koonsolo/MysticMine
|
monorail/pickupsview.py
|
1
|
11068
|
import random
import pygame
from koon.geo import Vec2D
import koon.geo as geo
from koon.gfx import SpriteFilm, Font, LoopAnimationTimer, PingPongTimer, Timer
from koon.res import resman
import pickups
import event
import tiles
class PickupView:
def __init__( self ):
self.pos = None
self.jump_pos = None
def get_z( self ):
if self.pos is None:
return -999
else:
return self.pos.y + 64
z = property( get_z )
def get_pos( self, frame ):
self.pos = None
if self.model.container is None or not hasattr( self.model.container, "views" ): return None
self.pos = self.model.container.views[0].get_pickup_pos( frame )
if self.model.jump_cnt is not None:
if self.jump_pos is None:
self.jump_pos = self.pos
x = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.x, self.pos.x )
y = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.y, self.pos.y )
height = self.model.jump_cnt
if self.model.jump_cnt > 0.5:
height = 1.0 - self.model.jump_cnt
self.pos = Vec2D( x, y - 30 * height)
else:
self.jump_pos = None
return self.pos
class TorchView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.torch_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class KeyView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.key_sprite")
self.animTimer = LoopAnimationTimer( 25, 0, 19 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class MirrorView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.mirror_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 9 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 10) )
class OilerView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.oiler_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class MultiplierView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
def draw( self, frame ):
if self.get_pos( frame ) is None: return
font = Font(size = 28, color = (255,0,0))
pos = self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET)
if self.model.goldcar is not None:
pos += Vec2D(0, 20)
font.draw("x2", frame.surface, pos.get_tuple(), Font.CENTER, Font.MIDDLE)
class BalloonView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.balloon_sprite")
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class GhostView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.ghost_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class CopperCoinView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.copper_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, self.sprite.max_x )
self.animTimer.set_frame( 0, random.randint(0,self.sprite.max_x-1) )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class GoldBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.gold_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class RockBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.rock_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET + 10) )
class DiamondView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.diamond_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class DynamiteView (PickupView):
class Sparkle:
def __init__( self, pos ):
self.pos = pos
self.life = 10 + int(random.random() * 2)
self.move = Vec2D( random.uniform( -2.5, 2.5 ), random.uniform( -2.5, 0.0 ) )
self.surf = resman.get("game.sparkle_surf")
width, height = self.surf.get_size()
self.center = Vec2D( width/2, height/2 )
def game_tick( self ):
self.life -= 1
self.pos += self.move
self.move.y += 0.1
def is_dead( self ):
return self.life <= 0
def draw( self, frame ):
pos = self.pos + self.center + Vec2D( frame.X_OFFSET, frame.Y_OFFSET )
self.surf.draw( frame.surface, pos )
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.dynamite_sprite").clone()
self.sprite_delta = 1
self.prev_life = 1.0
w, h = self.sprite.get_size()
self.sparkle_offset = Vec2D( 7, -h + 24 )
self.sparkle_line = Vec2D( 0, -22 )
self.sparkles = []
self.sparkle_timer = Timer( 25 )
def draw( self, frame ):
if self.get_pos(frame) is None: return
# no time... must implement... bad code...
if self.model.life < pickups.Dynamite.DEC * 18 and\
self.model.life != self.prev_life:
self.prev_life = self.model.life
self.sprite.nr += self.sprite_delta
if self.sprite.nr < 0:
self.sprite.nr = 0
self.sprite_delta = 1
elif self.sprite.nr >= 4:
self.sprite.nr = 3
self.sprite_delta = -1
event.Event.dynamite_tick()
while self.sparkle_timer.do_tick( frame.time_sec ):
self.sparkle_tick( frame )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D( frame.X_OFFSET, frame.Y_OFFSET ) )
for sparkle in self.sparkles:
sparkle.draw( frame )
def sparkle_tick( self, frame ):
if self.model.life > pickups.Dynamite.DEC * 18:
for i in range(3):
pos = self.get_pos(frame) + self.sparkle_offset + self.sparkle_line * self.model.life
self.sparkles.append( DynamiteView.Sparkle( pos ) )
new_sparkles = []
for sparkle in self.sparkles:
sparkle.game_tick()
if not sparkle.is_dead():
new_sparkles.append( sparkle )
self.sparkles = new_sparkles
class LampView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.lamp_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class AxeView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.axe_sprite").clone()
# FIXME: make it pingpong instead of loop
self.animTimer = PingPongTimer( 25, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class FlagView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.flag%d_sprite" % (model.goldcar.nr+1))
self.animTimer = LoopAnimationTimer( 20, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class LeprechaunView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.leprechaun_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
|
mit
| -715,982,806,236,803,000 | -7,886,651,174,294,162,000 | 34.703226 | 111 | 0.58502 | false |
raviflipsyde/servo
|
tests/wpt/css-tests/tools/pywebsocket/src/test/test_handshake.py
|
452
|
7134
|
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_quoted_data(self):
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_quoted_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
mpl-2.0
| -2,159,104,301,209,942,300 | 9,008,040,115,167,891,000 | 36.946809 | 78 | 0.600505 | false |
eayunstack/fuel-ostf
|
fuel_health/common/facts.py
|
2
|
2308
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from fuel_health.common import log as logging
LOG = logging.getLogger(__name__)
class Facts:
__default_config_path = '/var/lib/puppet/yaml/facts/'
def __init__(self, config=None):
_config_path = config or self.__default_config_path
self.config = self._read_config(_config_path)
@property
def amqp(self):
_amqp = self._get_rabbit_data() or self._get_qpid_data()
return _amqp
@property
def amqp_user(self):
return 'nova'
@property
def amqp_password(self):
return self.amqp['password']
def _read_config(self, path):
_file = None
for file in os.listdir(path):
if file.endswith('.yaml'):
_file = file
break
_file = open(os.path.join(path, _file))
self._init_parser()
data = yaml.load(_file)
_file.close()
return data
def _get_rabbit_data(self):
try:
return self.config['values']['rabbit']
except KeyError:
return None
def _get_qpid_data(self):
try:
return self.config['values']['qpid']
except KeyError:
return None
def _init_parser(self):
# Custom YAML constructs for ruby objects for puppet files parsing
def _construct_ruby_object(loader, suffix, node):
return loader.construct_yaml_map(node)
def _construct_ruby_sym(loader, suffix, node):
return loader.construct_yaml_str(node)
yaml.add_multi_constructor(u"!ruby/object:", _construct_ruby_object)
yaml.add_multi_constructor(u"!ruby/sym", _construct_ruby_sym)
|
apache-2.0
| 4,022,788,442,097,678,000 | 976,668,015,615,768,300 | 28.21519 | 76 | 0.630849 | false |
Geeglee/scrapy
|
tests/test_selector_csstranslator.py
|
59
|
6026
|
"""
Selector tests for cssselect backend
"""
from twisted.trial import unittest
from scrapy.http import HtmlResponse
from scrapy.selector.csstranslator import ScrapyHTMLTranslator
from scrapy.selector import Selector
from cssselect.parser import SelectorSyntaxError
from cssselect.xpath import ExpressionError
HTMLBODY = '''
<html>
<body>
<div>
<a id="name-anchor" name="foo"></a>
<a id="tag-anchor" rel="tag" href="http://localhost/foo">link</a>
<a id="nofollow-anchor" rel="nofollow" href="https://example.org"> link</a>
<p id="paragraph">
lorem ipsum text
<b id="p-b">hi</b> <em id="p-em">there</em>
<b id="p-b2">guy</b>
<input type="checkbox" id="checkbox-unchecked" />
<input type="checkbox" id="checkbox-disabled" disabled="" />
<input type="text" id="text-checked" checked="checked" />
<input type="hidden" />
<input type="hidden" disabled="disabled" />
<input type="checkbox" id="checkbox-checked" checked="checked" />
<input type="checkbox" id="checkbox-disabled-checked"
disabled="disabled" checked="checked" />
<fieldset id="fieldset" disabled="disabled">
<input type="checkbox" id="checkbox-fieldset-disabled" />
<input type="hidden" />
</fieldset>
</p>
<map name="dummymap">
<area shape="circle" coords="200,250,25" href="foo.html" id="area-href" />
<area shape="default" id="area-nohref" />
</map>
</div>
<div class="cool-footer" id="foobar-div" foobar="ab bc cde">
<span id="foobar-span">foo ter</span>
</div>
</body></html>
'''
class TranslatorMixinTest(unittest.TestCase):
tr_cls = ScrapyHTMLTranslator
def setUp(self):
self.tr = self.tr_cls()
self.c2x = self.tr.css_to_xpath
def test_attr_function(self):
cases = [
('::attr(name)', u'descendant-or-self::*/@name'),
('a::attr(href)', u'descendant-or-self::a/@href'),
('a ::attr(img)', u'descendant-or-self::a/descendant-or-self::*/@img'),
('a > ::attr(class)', u'descendant-or-self::a/*/@class'),
]
for css, xpath in cases:
self.assertEqual(self.c2x(css), xpath, css)
def test_attr_function_exception(self):
cases = [
('::attr(12)', ExpressionError),
('::attr(34test)', ExpressionError),
('::attr(@href)', SelectorSyntaxError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
def test_text_pseudo_element(self):
cases = [
('::text', u'descendant-or-self::text()'),
('p::text', u'descendant-or-self::p/text()'),
('p ::text', u'descendant-or-self::p/descendant-or-self::text()'),
('#id::text', u"descendant-or-self::*[@id = 'id']/text()"),
('p#id::text', u"descendant-or-self::p[@id = 'id']/text()"),
('p#id ::text', u"descendant-or-self::p[@id = 'id']/descendant-or-self::text()"),
('p#id > ::text', u"descendant-or-self::p[@id = 'id']/*/text()"),
('p#id ~ ::text', u"descendant-or-self::p[@id = 'id']/following-sibling::*/text()"),
('a[href]::text', u'descendant-or-self::a[@href]/text()'),
('a[href] ::text', u'descendant-or-self::a[@href]/descendant-or-self::text()'),
('p::text, a::text', u"descendant-or-self::p/text() | descendant-or-self::a/text()"),
]
for css, xpath in cases:
self.assertEqual(self.c2x(css), xpath, css)
def test_pseudo_function_exception(self):
cases = [
('::attribute(12)', ExpressionError),
('::text()', ExpressionError),
('::attr(@href)', SelectorSyntaxError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
def test_unknown_pseudo_element(self):
cases = [
('::text-node', ExpressionError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
def test_unknown_pseudo_class(self):
cases = [
(':text', ExpressionError),
(':attribute(name)', ExpressionError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
class CSSSelectorTest(unittest.TestCase):
sscls = Selector
def setUp(self):
self.htmlresponse = HtmlResponse('http://example.com', body=HTMLBODY)
self.sel = self.sscls(self.htmlresponse)
def x(self, *a, **kw):
return [v.strip() for v in self.sel.css(*a, **kw).extract() if v.strip()]
def test_selector_simple(self):
for x in self.sel.css('input'):
self.assertTrue(isinstance(x, self.sel.__class__), x)
self.assertEqual(self.sel.css('input').extract(),
[x.extract() for x in self.sel.css('input')])
def test_text_pseudo_element(self):
self.assertEqual(self.x('#p-b2'), [u'<b id="p-b2">guy</b>'])
self.assertEqual(self.x('#p-b2::text'), [u'guy'])
self.assertEqual(self.x('#p-b2 ::text'), [u'guy'])
self.assertEqual(self.x('#paragraph::text'), [u'lorem ipsum text'])
self.assertEqual(self.x('#paragraph ::text'), [u'lorem ipsum text', u'hi', u'there', u'guy'])
self.assertEqual(self.x('p::text'), [u'lorem ipsum text'])
self.assertEqual(self.x('p ::text'), [u'lorem ipsum text', u'hi', u'there', u'guy'])
def test_attribute_function(self):
self.assertEqual(self.x('#p-b2::attr(id)'), [u'p-b2'])
self.assertEqual(self.x('.cool-footer::attr(class)'), [u'cool-footer'])
self.assertEqual(self.x('.cool-footer ::attr(id)'), [u'foobar-div', u'foobar-span'])
self.assertEqual(self.x('map[name="dummymap"] ::attr(shape)'), [u'circle', u'default'])
def test_nested_selector(self):
self.assertEqual(self.sel.css('p').css('b::text').extract(),
[u'hi', u'guy'])
self.assertEqual(self.sel.css('div').css('area:last-child').extract(),
[u'<area shape="default" id="area-nohref">'])
|
bsd-3-clause
| 1,883,563,559,471,644,700 | 2,281,182,248,955,631,000 | 38.385621 | 101 | 0.575672 | false |
mitsuhiko/click
|
src/click/types.py
|
1
|
30864
|
import os
import stat
from datetime import datetime
from ._compat import _get_argv_encoding
from ._compat import filename_to_ui
from ._compat import get_filesystem_encoding
from ._compat import get_strerror
from ._compat import open_stream
from .exceptions import BadParameter
from .utils import LazyFile
from .utils import safecall
class ParamType:
"""Represents the type of a parameter. Validates and converts values
from the command line or Python into the correct type.
To implement a custom type, subclass and implement at least the
following:
- The :attr:`name` class attribute must be set.
- Calling an instance of the type with ``None`` must return
``None``. This is already implemented by default.
- :meth:`convert` must convert string values to the correct type.
- :meth:`convert` must accept values that are already the correct
type.
- It must be able to convert a value if the ``ctx`` and ``param``
arguments are ``None``. This can occur when converting prompt
input.
"""
is_composite = False
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def to_info_dict(self):
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
# The class name without the "ParamType" suffix.
param_type = type(self).__name__.partition("ParamType")[0]
param_type = param_type.partition("ParameterType")[0]
return {"param_type": param_type, "name": self.name}
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Convert the value to the correct type. This is not called if
the value is ``None`` (the missing value).
This must accept string values from the command line, as well as
values that are already the correct type. It may also convert
other compatible types.
The ``param`` and ``ctx`` arguments may be ``None`` in certain
situations, such as when converting prompt input.
If the value cannot be converted, call :meth:`fail` with a
descriptive message.
:param value: The value to convert.
:param param: The parameter that is using this type to convert
its value. May be ``None``.
:param ctx: The current context that arrived at this value. May
be ``None``.
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
def shell_complete(self, ctx, param, incomplete):
"""Return a list of
:class:`~click.shell_completion.CompletionItem` objects for the
incomplete value. Most types do not provide completions, but
some do, and this allows custom types to provide custom
completions as well.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
return []
class CompositeParamType(ParamType):
is_composite = True
@property
def arity(self):
raise NotImplementedError()
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["func"] = self.func
return info_dict
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = str(value)
except UnicodeError:
value = value.decode("utf-8", "replace")
self.fail(value, param, ctx)
class UnprocessedParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
return value
def __repr__(self):
return "UNPROCESSED"
class StringParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
if isinstance(value, bytes):
enc = _get_argv_encoding()
try:
value = value.decode(enc)
except UnicodeError:
fs_enc = get_filesystem_encoding()
if fs_enc != enc:
try:
value = value.decode(fs_enc)
except UnicodeError:
value = value.decode("utf-8", "replace")
else:
value = value.decode("utf-8", "replace")
return value
return str(value)
def __repr__(self):
return "STRING"
class Choice(ParamType):
"""The choice type allows a value to be checked against a fixed set
of supported values. All of these values have to be strings.
You should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
The resulting value will always be one of the originally passed choices
regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
being specified.
See :ref:`choice-opts` for an example.
:param case_sensitive: Set to false to make choices case
insensitive. Defaults to true.
"""
name = "choice"
def __init__(self, choices, case_sensitive=True):
self.choices = choices
self.case_sensitive = case_sensitive
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["choices"] = self.choices
info_dict["case_sensitive"] = self.case_sensitive
return info_dict
def get_metavar(self, param):
choices_str = "|".join(self.choices)
# Use curly braces to indicate a required argument.
if param.required and param.param_type_name == "argument":
return f"{{{choices_str}}}"
# Use square braces to indicate an option or optional argument.
return f"[{choices_str}]"
def get_missing_message(self, param):
choice_str = ",\n\t".join(self.choices)
return f"Choose from:\n\t{choice_str}"
def convert(self, value, param, ctx):
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
normed_value = normed_value.casefold()
normed_choices = {
normed_choice.casefold(): original
for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
one_of = "one of " if len(self.choices) > 1 else ""
choices_str = ", ".join(repr(c) for c in self.choices)
self.fail(f"{value!r} is not {one_of}{choices_str}.", param, ctx)
def __repr__(self):
return f"Choice({list(self.choices)})"
def shell_complete(self, ctx, param, incomplete):
"""Complete choices that start with the incomplete value.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
str_choices = map(str, self.choices)
if self.case_sensitive:
matched = (c for c in str_choices if c.startswith(incomplete))
else:
incomplete = incomplete.lower()
matched = (c for c in str_choices if c.lower().startswith(incomplete))
return [CompletionItem(c) for c in matched]
class DateTime(ParamType):
"""The DateTime type converts date strings into `datetime` objects.
The format strings which are checked are configurable, but default to some
common (non-timezone aware) ISO 8601 formats.
When specifying *DateTime* formats, you should only pass a list or a tuple.
Other iterables, like generators, may lead to surprising results.
The format strings are processed using ``datetime.strptime``, and this
consequently defines the format strings which are allowed.
Parsing is tried using each format, in order, and the first format which
parses successfully is used.
:param formats: A list or tuple of date format strings, in the order in
which they should be tried. Defaults to
``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
``'%Y-%m-%d %H:%M:%S'``.
"""
name = "datetime"
def __init__(self, formats=None):
self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["formats"] = self.formats
return info_dict
def get_metavar(self, param):
return f"[{'|'.join(self.formats)}]"
def _try_to_convert_date(self, value, format):
try:
return datetime.strptime(value, format)
except ValueError:
return None
def convert(self, value, param, ctx):
if isinstance(value, datetime):
return value
for format in self.formats:
converted = self._try_to_convert_date(value, format)
if converted is not None:
return converted
plural = "s" if len(self.formats) > 1 else ""
formats_str = ", ".join(repr(f) for f in self.formats)
self.fail(
f"{value!r} does not match the format{plural} {formats_str}.", param, ctx
)
def __repr__(self):
return "DateTime"
class _NumberParamTypeBase(ParamType):
_number_class = None
def convert(self, value, param, ctx):
try:
return self._number_class(value)
except ValueError:
self.fail(f"{value!r} is not a valid {self.name}.", param, ctx)
class _NumberRangeBase(_NumberParamTypeBase):
def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):
self.min = min
self.max = max
self.min_open = min_open
self.max_open = max_open
self.clamp = clamp
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(
min=self.min,
max=self.max,
min_open=self.min_open,
max_open=self.max_open,
clamp=self.clamp,
)
return info_dict
def convert(self, value, param, ctx):
import operator
rv = super().convert(value, param, ctx)
lt_min = self.min is not None and (
operator.le if self.min_open else operator.lt
)(rv, self.min)
gt_max = self.max is not None and (
operator.ge if self.max_open else operator.gt
)(rv, self.max)
if self.clamp:
if lt_min:
return self._clamp(self.min, 1, self.min_open)
if gt_max:
return self._clamp(self.max, -1, self.max_open)
if lt_min or gt_max:
self.fail(f"{rv} is not in the range {self._describe_range()}.", param, ctx)
return rv
def _clamp(self, bound, dir, open):
"""Find the valid value to clamp to bound in the given
direction.
:param bound: The boundary value.
:param dir: 1 or -1 indicating the direction to move.
:param open: If true, the range does not include the bound.
"""
raise NotImplementedError
def _describe_range(self):
"""Describe the range for use in help text."""
if self.min is None:
op = "<" if self.max_open else "<="
return f"x{op}{self.max}"
if self.max is None:
op = ">" if self.min_open else ">="
return f"x{op}{self.min}"
lop = "<" if self.min_open else "<="
rop = "<" if self.max_open else "<="
return f"{self.min}{lop}x{rop}{self.max}"
def __repr__(self):
clamp = " clamped" if self.clamp else ""
return f"<{type(self).__name__} {self._describe_range()}{clamp}>"
class IntParamType(_NumberParamTypeBase):
name = "integer"
_number_class = int
def __repr__(self):
return "INT"
class IntRange(_NumberRangeBase, IntParamType):
"""Restrict an :data:`click.INT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "integer range"
def _clamp(self, bound, dir, open):
if not open:
return bound
return bound + dir
class FloatParamType(_NumberParamTypeBase):
name = "float"
_number_class = float
def __repr__(self):
return "FLOAT"
class FloatRange(_NumberRangeBase, FloatParamType):
"""Restrict a :data:`click.FLOAT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing. This is not supported if either
boundary is marked ``open``.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "float range"
def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):
super().__init__(
min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp
)
if (min_open or max_open) and clamp:
raise TypeError("Clamping is not supported for open bounds.")
def _clamp(self, bound, dir, open):
if not open:
return bound
# Could use Python 3.9's math.nextafter here, but clamping an
# open float range doesn't seem to be particularly useful. It's
# left up to the user to write a callback to do it if needed.
raise RuntimeError("Clamping is not supported for open bounds.")
class BoolParamType(ParamType):
name = "boolean"
def convert(self, value, param, ctx):
if value in {False, True}:
return bool(value)
norm = value.strip().lower()
if norm in {"1", "true", "t", "yes", "y", "on"}:
return True
if norm in {"0", "false", "f", "no", "n", "off"}:
return False
self.fail(f"{value!r} is not a valid boolean.", param, ctx)
def __repr__(self):
return "BOOL"
class UUIDParameterType(ParamType):
name = "uuid"
def convert(self, value, param, ctx):
import uuid
if isinstance(value, uuid.UUID):
return value
value = value.strip()
try:
return uuid.UUID(value)
except ValueError:
self.fail(f"{value!r} is not a valid UUID.", param, ctx)
def __repr__(self):
return "UUID"
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or upon
first IO. The default is to be non-lazy for standard input and output
streams as well as files opened for reading, `lazy` otherwise. When opening a
file lazily for reading, it is still opened temporarily for validation, but
will not be held open until first IO. lazy is mainly useful when opening
for writing to avoid creating the file until it is needed.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = "filename"
envvar_list_splitter = os.path.pathsep
def __init__(
self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
):
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(mode=self.mode, encoding=self.encoding)
return info_dict
def resolve_lazy_flag(self, value):
if self.lazy is not None:
return self.lazy
if value == "-":
return False
elif "w" in self.mode:
return True
return False
def convert(self, value, param, ctx):
try:
if hasattr(value, "read") or hasattr(value, "write"):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f = LazyFile(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
if ctx is not None:
ctx.call_on_close(f.close_intelligently)
return f
f, should_close = open_stream(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except OSError as e: # noqa: B014
self.fail(f"{filename_to_ui(value)!r}: {get_strerror(e)}", param, ctx)
def shell_complete(self, ctx, param, incomplete):
"""Return a special completion marker that tells the completion
system to use the shell to provide file path completions.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
return [CompletionItem(incomplete, type="file")]
class Path(ParamType):
"""The path type is similar to the :class:`File` type but it performs
different checks. First of all, instead of returning an open file
handle it returns just the filename. Secondly, it can perform various
basic checks about what the file or directory should be.
.. versionchanged:: 6.0
`allow_dash` was added.
:param exists: if set to true, the file or directory needs to exist for
this value to be valid. If this is not required and a
file does indeed not exist, then all further checks are
silently skipped.
:param file_okay: controls if a file is a possible value.
:param dir_okay: controls if a directory is a possible value.
:param writable: if true, a writable check is performed.
:param readable: if true, a readable check is performed.
:param resolve_path: if this is true, then the path is fully resolved
before the value is passed onwards. This means
that it's absolute and symlinks are resolved. It
will not expand a tilde-prefix, as this is
supposed to be done by the shell only.
:param allow_dash: If this is set to `True`, a single dash to indicate
standard streams is permitted.
:param path_type: optionally a string type that should be used to
represent the path. The default is `None` which
means the return value will be either bytes or
unicode depending on what makes most sense given the
input data Click deals with.
"""
envvar_list_splitter = os.path.pathsep
def __init__(
self,
exists=False,
file_okay=True,
dir_okay=True,
writable=False,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.writable = writable
self.readable = readable
self.resolve_path = resolve_path
self.allow_dash = allow_dash
self.type = path_type
if self.file_okay and not self.dir_okay:
self.name = "file"
self.path_type = "File"
elif self.dir_okay and not self.file_okay:
self.name = "directory"
self.path_type = "Directory"
else:
self.name = "path"
self.path_type = "Path"
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(
exists=self.exists,
file_okay=self.file_okay,
dir_okay=self.dir_okay,
writable=self.writable,
readable=self.readable,
allow_dash=self.allow_dash,
)
return info_dict
def coerce_path_result(self, rv):
if self.type is not None and not isinstance(rv, self.type):
if self.type is str:
rv = rv.decode(get_filesystem_encoding())
else:
rv = rv.encode(get_filesystem_encoding())
return rv
def convert(self, value, param, ctx):
rv = value
is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
if not is_dash:
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return self.coerce_path_result(rv)
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} does not exist.",
param,
ctx,
)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is a file.",
param,
ctx,
)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is a directory.",
param,
ctx,
)
if self.writable and not os.access(value, os.W_OK):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is not writable.",
param,
ctx,
)
if self.readable and not os.access(value, os.R_OK):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is not readable.",
param,
ctx,
)
return self.coerce_path_result(rv)
def shell_complete(self, ctx, param, incomplete):
"""Return a special completion marker that tells the completion
system to use the shell to provide path completions for only
directories or any paths.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
type = "dir" if self.dir_okay and not self.file_okay else "file"
return [CompletionItem(incomplete, type=type)]
class Tuple(CompositeParamType):
"""The default behavior of Click is to apply a type on a value directly.
This works well in most cases, except for when `nargs` is set to a fixed
count and different types should be used for different items. In this
case the :class:`Tuple` type can be used. This type can only be used
if `nargs` is set to a fixed number.
For more information see :ref:`tuple-type`.
This can be selected by using a Python tuple literal as a type.
:param types: a list of types that should be used for the tuple items.
"""
def __init__(self, types):
self.types = [convert_type(ty) for ty in types]
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["types"] = [t.to_info_dict() for t in self.types]
return info_dict
@property
def name(self):
return f"<{' '.join(ty.name for ty in self.types)}>"
@property
def arity(self):
return len(self.types)
def convert(self, value, param, ctx):
if len(value) != len(self.types):
raise TypeError(
"It would appear that nargs is set to conflict with the"
" composite type arity."
)
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
def convert_type(ty, default=None):
"""Find the most appropriate :class:`ParamType` for the given Python
type. If the type isn't provided, it can be inferred from a default
value.
"""
guessed_type = False
if ty is None and default is not None:
if isinstance(default, (tuple, list)):
# If the default is empty, ty will remain None and will
# return STRING.
if default:
item = default[0]
# A tuple of tuples needs to detect the inner types.
# Can't call convert recursively because that would
# incorrectly unwind the tuple to a single type.
if isinstance(item, (tuple, list)):
ty = tuple(map(type, item))
else:
ty = type(item)
else:
ty = type(default)
guessed_type = True
if isinstance(ty, tuple):
return Tuple(ty)
if isinstance(ty, ParamType):
return ty
if ty is str or ty is None:
return STRING
if ty is int:
return INT
if ty is float:
return FLOAT
# Booleans are only okay if not guessed. For is_flag options with
# flag_value, default=True indicates which flag_value is the
# default.
if ty is bool and not guessed_type:
return BOOL
if guessed_type:
return STRING
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError(
f"Attempted to use an uninstantiated parameter type ({ty})."
)
except TypeError:
# ty is an instance (correct), so issubclass fails.
pass
return FuncParamType(ty)
#: A dummy parameter type that just does nothing. From a user's
#: perspective this appears to just be the same as `STRING` but
#: internally no string conversion takes place if the input was bytes.
#: This is usually useful when working with file paths as they can
#: appear in bytes and unicode.
#:
#: For path related uses the :class:`Path` type is a better choice but
#: there are situations where an unprocessed type is useful which is why
#: it is is provided.
#:
#: .. versionadded:: 4.0
UNPROCESSED = UnprocessedParamType()
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
|
bsd-3-clause
| -3,319,765,033,171,836,400 | -8,709,285,369,673,033,000 | 32.402597 | 88 | 0.596099 | false |
nox/servo
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/authentication.py
|
247
|
1292
|
def main(request, response):
if "logout" in request.GET:
return ((401, "Unauthorized"),
[("WWW-Authenticate", 'Basic realm="test"')],
"Logged out, hopefully")
session_user = request.auth.username
session_pass = request.auth.password
expected_user_name = request.headers.get("X-User", None)
token = expected_user_name
if session_user is None and session_pass is None:
if token is not None and request.server.stash.take(token) is not None:
return 'FAIL (did not authorize)'
else:
if token is not None:
request.server.stash.put(token, "1")
status = (401, 'Unauthorized')
headers = [('WWW-Authenticate', 'Basic realm="test"'),
('XHR-USER', expected_user_name),
('SES-USER', session_user)]
return status, headers, 'FAIL (should be transparent)'
else:
if request.server.stash.take(token) == "1":
challenge = "DID"
else:
challenge = "DID-NOT"
headers = [('XHR-USER', expected_user_name),
('SES-USER', session_user),
("X-challenge", challenge)]
return headers, session_user + "\n" + session_pass;
|
mpl-2.0
| -1,873,293,436,578,436,000 | -137,674,085,855,750,100 | 39.375 | 78 | 0.547214 | false |
blindFS/powerline
|
powerline/lint/__init__.py
|
9
|
21309
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import logging
from collections import defaultdict
from itertools import chain
from functools import partial
from powerline import generate_config_finder, get_config_paths, load_config
from powerline.segments.vim import vim_modes
from powerline.lib.dict import mergedicts_copy
from powerline.lib.config import ConfigLoader
from powerline.lib.unicode import unicode
from powerline.lib.path import join
from powerline.lint.markedjson import load
from powerline.lint.markedjson.error import echoerr, EchoErr, MarkedError
from powerline.lint.checks import (check_matcher_func, check_ext, check_config, check_top_theme,
check_color, check_translated_group_name, check_group,
check_segment_module, check_exinclude_function, type_keys,
check_segment_function, check_args, get_one_segment_function,
check_highlight_groups, check_highlight_group, check_full_segment_data,
get_all_possible_functions, check_segment_data_key, register_common_name,
highlight_group_spec, check_log_file_level, check_logging_handler)
from powerline.lint.spec import Spec
from powerline.lint.context import Context
def open_file(path):
return open(path, 'rb')
def generate_json_config_loader(lhadproblem):
def load_json_config(config_file_path, load=load, open_file=open_file):
with open_file(config_file_path) as config_file_fp:
r, hadproblem = load(config_file_fp)
if hadproblem:
lhadproblem[0] = True
return r
return load_json_config
function_name_re = '^(\w+\.)*[a-zA-Z_]\w*$'
divider_spec = Spec().printable().len(
'le', 3, (lambda value: 'Divider {0!r} is too large!'.format(value))).copy
ext_theme_spec = Spec().type(unicode).func(lambda *args: check_config('themes', *args)).copy
top_theme_spec = Spec().type(unicode).func(check_top_theme).copy
ext_spec = Spec(
colorscheme=Spec().type(unicode).func(
(lambda *args: check_config('colorschemes', *args))
),
theme=ext_theme_spec(),
top_theme=top_theme_spec().optional(),
).copy
gen_components_spec = (lambda *components: Spec().list(Spec().type(unicode).oneof(set(components))))
log_level_spec = Spec().re('^[A-Z]+$').func(
(lambda value, *args: (True, True, not hasattr(logging, value))),
(lambda value: 'unknown debugging level {0}'.format(value))
).copy
log_format_spec = Spec().type(unicode).copy
main_spec = (Spec(
common=Spec(
default_top_theme=top_theme_spec().optional(),
term_truecolor=Spec().type(bool).optional(),
term_escape_style=Spec().type(unicode).oneof(set(('auto', 'xterm', 'fbterm'))).optional(),
# Python is capable of loading from zip archives. Thus checking path
# only for existence of the path, not for it being a directory
paths=Spec().list(
(lambda value, *args: (True, True, not os.path.exists(os.path.expanduser(value.value)))),
(lambda value: 'path does not exist: {0}'.format(value))
).optional(),
log_file=Spec().either(
Spec().type(unicode).func(
(
lambda value, *args: (
True,
True,
not os.path.isdir(os.path.dirname(os.path.expanduser(value)))
)
),
(lambda value: 'directory does not exist: {0}'.format(os.path.dirname(value)))
),
Spec().list(Spec().either(
Spec().type(unicode, type(None)),
Spec().tuple(
Spec().re(function_name_re).func(check_logging_handler),
Spec().tuple(
Spec().type(list).optional(),
Spec().type(dict).optional(),
),
log_level_spec().func(check_log_file_level).optional(),
log_format_spec().optional(),
),
))
).optional(),
log_level=log_level_spec().optional(),
log_format=log_format_spec().optional(),
interval=Spec().either(Spec().cmp('gt', 0.0), Spec().type(type(None))).optional(),
reload_config=Spec().type(bool).optional(),
watcher=Spec().type(unicode).oneof(set(('auto', 'inotify', 'stat'))).optional(),
).context_message('Error while loading common configuration (key {key})'),
ext=Spec(
vim=ext_spec().update(
components=gen_components_spec('statusline', 'tabline').optional(),
local_themes=Spec(
__tabline__=ext_theme_spec(),
).unknown_spec(
Spec().re(function_name_re).func(partial(check_matcher_func, 'vim')),
ext_theme_spec()
),
).optional(),
ipython=ext_spec().update(
local_themes=Spec(
in2=ext_theme_spec(),
out=ext_theme_spec(),
rewrite=ext_theme_spec(),
),
).optional(),
shell=ext_spec().update(
components=gen_components_spec('tmux', 'prompt').optional(),
local_themes=Spec(
continuation=ext_theme_spec(),
select=ext_theme_spec(),
),
).optional(),
wm=ext_spec().update(
local_themes=Spec().unknown_spec(
Spec().re('^[0-9A-Za-z-]+$'),
ext_theme_spec()
).optional()
).optional(),
).unknown_spec(
check_ext,
ext_spec(),
).context_message('Error while loading extensions configuration (key {key})'),
).context_message('Error while loading main configuration'))
term_color_spec = Spec().unsigned().cmp('le', 255).copy
true_color_spec = Spec().re(
'^[0-9a-fA-F]{6}$',
(lambda value: '"{0}" is not a six-digit hexadecimal unsigned integer written as a string'.format(value))
).copy
colors_spec = (Spec(
colors=Spec().unknown_spec(
Spec().ident(),
Spec().either(
Spec().tuple(term_color_spec(), true_color_spec()),
term_color_spec()
)
).context_message('Error while checking colors (key {key})'),
gradients=Spec().unknown_spec(
Spec().ident(),
Spec().tuple(
Spec().len('gt', 1).list(term_color_spec()),
Spec().len('gt', 1).list(true_color_spec()).optional(),
)
).context_message('Error while checking gradients (key {key})'),
).context_message('Error while loading colors configuration'))
color_spec = Spec().type(unicode).func(check_color).copy
name_spec = Spec().type(unicode).len('gt', 0).optional().copy
group_name_spec = Spec().ident().copy
group_spec = Spec().either(Spec(
fg=color_spec(),
bg=color_spec(),
attrs=Spec().list(Spec().type(unicode).oneof(set(('bold', 'italic', 'underline')))),
), group_name_spec().func(check_group)).copy
groups_spec = Spec().unknown_spec(
group_name_spec(),
group_spec(),
).context_message('Error while loading groups (key {key})').copy
colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
).context_message('Error while loading coloscheme'))
mode_translations_value_spec = Spec(
colors=Spec().unknown_spec(
color_spec(),
color_spec(),
).optional(),
groups=Spec().unknown_spec(
group_name_spec().func(check_translated_group_name),
group_spec(),
).optional(),
).copy
top_colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
mode_translations=Spec().unknown_spec(
Spec().type(unicode),
mode_translations_value_spec(),
).optional().context_message('Error while loading mode translations (key {key})').optional(),
).context_message('Error while loading top-level coloscheme'))
vim_mode_spec = Spec().oneof(set(list(vim_modes) + ['nc', 'tab_nc', 'buf_nc'])).copy
vim_colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
mode_translations=Spec().unknown_spec(
vim_mode_spec(),
mode_translations_value_spec(),
).optional().context_message('Error while loading mode translations (key {key})'),
).context_message('Error while loading vim colorscheme'))
shell_mode_spec = Spec().re('^(?:[\w\-]+|\.safe)$').copy
shell_colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
mode_translations=Spec().unknown_spec(
shell_mode_spec(),
mode_translations_value_spec(),
).optional().context_message('Error while loading mode translations (key {key})'),
).context_message('Error while loading shell colorscheme'))
args_spec = Spec(
pl=Spec().error('pl object must be set by powerline').optional(),
segment_info=Spec().error('Segment info dictionary must be set by powerline').optional(),
).unknown_spec(Spec(), Spec()).optional().copy
segment_module_spec = Spec().type(unicode).func(check_segment_module).optional().copy
exinclude_spec = Spec().re(function_name_re).func(check_exinclude_function).copy
segment_spec_base = Spec(
name=Spec().re('^[a-zA-Z_]\w*$').optional(),
function=Spec().re(function_name_re).func(check_segment_function).optional(),
exclude_modes=Spec().list(vim_mode_spec()).optional(),
include_modes=Spec().list(vim_mode_spec()).optional(),
exclude_function=exinclude_spec().optional(),
include_function=exinclude_spec().optional(),
draw_hard_divider=Spec().type(bool).optional(),
draw_soft_divider=Spec().type(bool).optional(),
draw_inner_divider=Spec().type(bool).optional(),
display=Spec().type(bool).optional(),
module=segment_module_spec(),
priority=Spec().type(int, float, type(None)).optional(),
after=Spec().printable().optional(),
before=Spec().printable().optional(),
width=Spec().either(Spec().unsigned(), Spec().cmp('eq', 'auto')).optional(),
align=Spec().oneof(set('lr')).optional(),
args=args_spec().func(lambda *args, **kwargs: check_args(get_one_segment_function, *args, **kwargs)),
contents=Spec().printable().optional(),
highlight_groups=Spec().list(
highlight_group_spec().re(
'^(?:(?!:divider$).)+$',
(lambda value: 'it is recommended that only divider highlight group names end with ":divider"')
)
).func(check_highlight_groups).optional(),
divider_highlight_group=highlight_group_spec().func(check_highlight_group).re(
':divider$',
(lambda value: 'it is recommended that divider highlight group names end with ":divider"')
).optional(),
).func(check_full_segment_data).copy
subsegment_spec = segment_spec_base().update(
type=Spec().oneof(set((key for key in type_keys if key != 'segment_list'))).optional(),
)
segment_spec = segment_spec_base().update(
type=Spec().oneof(type_keys).optional(),
segments=Spec().optional().list(subsegment_spec),
)
segments_spec = Spec().optional().list(segment_spec).copy
segdict_spec = Spec(
left=segments_spec().context_message('Error while loading segments from left side (key {key})'),
right=segments_spec().context_message('Error while loading segments from right side (key {key})'),
).func(
(lambda value, *args: (True, True, not (('left' in value) or ('right' in value)))),
(lambda value: 'segments dictionary must contain either left, right or both keys')
).context_message('Error while loading segments (key {key})').copy
divside_spec = Spec(
hard=divider_spec(),
soft=divider_spec(),
).copy
segment_data_value_spec = Spec(
after=Spec().printable().optional(),
before=Spec().printable().optional(),
display=Spec().type(bool).optional(),
args=args_spec().func(lambda *args, **kwargs: check_args(get_all_possible_functions, *args, **kwargs)),
contents=Spec().printable().optional(),
).copy
dividers_spec = Spec(
left=divside_spec(),
right=divside_spec(),
).copy
spaces_spec = Spec().unsigned().cmp(
'le', 2, (lambda value: 'Are you sure you need such a big ({0}) number of spaces?'.format(value))
).copy
common_theme_spec = Spec(
default_module=segment_module_spec().optional(),
cursor_space=Spec().type(int, float).cmp('le', 100).cmp('gt', 0).optional(),
cursor_columns=Spec().type(int).cmp('gt', 0).optional(),
).context_message('Error while loading theme').copy
top_theme_spec = common_theme_spec().update(
dividers=dividers_spec(),
spaces=spaces_spec(),
use_non_breaking_spaces=Spec().type(bool).optional(),
segment_data=Spec().unknown_spec(
Spec().func(check_segment_data_key),
segment_data_value_spec(),
).optional().context_message('Error while loading segment data (key {key})'),
)
main_theme_spec = common_theme_spec().update(
dividers=dividers_spec().optional(),
spaces=spaces_spec().optional(),
segment_data=Spec().unknown_spec(
Spec().func(check_segment_data_key),
segment_data_value_spec(),
).optional().context_message('Error while loading segment data (key {key})'),
)
theme_spec = common_theme_spec().update(
dividers=dividers_spec().optional(),
spaces=spaces_spec().optional(),
segment_data=Spec().unknown_spec(
Spec().func(check_segment_data_key),
segment_data_value_spec(),
).optional().context_message('Error while loading segment data (key {key})'),
segments=segdict_spec().update(above=Spec().list(segdict_spec()).optional()),
)
def register_common_names():
register_common_name('player', 'powerline.segments.common.players', '_player')
def load_json_file(path):
with open_file(path) as F:
try:
config, hadproblem = load(F)
except MarkedError as e:
return True, None, str(e)
else:
return hadproblem, config, None
def updated_with_config(d):
hadproblem, config, error = load_json_file(d['path'])
d.update(
hadproblem=hadproblem,
config=config,
error=error,
)
return d
def find_all_ext_config_files(search_paths, subdir):
for config_root in search_paths:
top_config_subpath = join(config_root, subdir)
if not os.path.isdir(top_config_subpath):
if os.path.exists(top_config_subpath):
yield {
'error': 'Path {0} is not a directory'.format(top_config_subpath),
'path': top_config_subpath,
}
continue
for ext_name in os.listdir(top_config_subpath):
ext_path = os.path.join(top_config_subpath, ext_name)
if not os.path.isdir(ext_path):
if ext_name.endswith('.json') and os.path.isfile(ext_path):
yield updated_with_config({
'error': False,
'path': ext_path,
'name': ext_name[:-5],
'ext': None,
'type': 'top_' + subdir,
})
else:
yield {
'error': 'Path {0} is not a directory or configuration file'.format(ext_path),
'path': ext_path,
}
continue
for config_file_name in os.listdir(ext_path):
config_file_path = os.path.join(ext_path, config_file_name)
if config_file_name.endswith('.json') and os.path.isfile(config_file_path):
yield updated_with_config({
'error': False,
'path': config_file_path,
'name': config_file_name[:-5],
'ext': ext_name,
'type': subdir,
})
else:
yield {
'error': 'Path {0} is not a configuration file'.format(config_file_path),
'path': config_file_path,
}
def dict2(d):
return defaultdict(dict, ((k, dict(v)) for k, v in d.items()))
def check(paths=None, debug=False, echoerr=echoerr, require_ext=None):
'''Check configuration sanity
:param list paths:
Paths from which configuration should be loaded.
:param bool debug:
Determines whether some information useful for debugging linter should
be output.
:param function echoerr:
Function that will be used to echo the error(s). Should accept four
optional keyword parameters: ``problem`` and ``problem_mark``, and
``context`` and ``context_mark``.
:param str require_ext:
Require configuration for some extension to be present.
:return:
``False`` if user configuration seems to be completely sane and ``True``
if some problems were found.
'''
hadproblem = False
register_common_names()
search_paths = paths or get_config_paths()
find_config_files = generate_config_finder(lambda: search_paths)
logger = logging.getLogger('powerline-lint')
logger.setLevel(logging.DEBUG if debug else logging.ERROR)
logger.addHandler(logging.StreamHandler())
ee = EchoErr(echoerr, logger)
if require_ext:
used_main_spec = main_spec.copy()
try:
used_main_spec['ext'][require_ext].required()
except KeyError:
used_main_spec['ext'][require_ext] = ext_spec()
else:
used_main_spec = main_spec
lhadproblem = [False]
load_json_config = generate_json_config_loader(lhadproblem)
config_loader = ConfigLoader(run_once=True, load=load_json_config)
lists = {
'colorschemes': set(),
'themes': set(),
'exts': set(),
}
found_dir = {
'themes': False,
'colorschemes': False,
}
config_paths = defaultdict(lambda: defaultdict(dict))
loaded_configs = defaultdict(lambda: defaultdict(dict))
for d in chain(
find_all_ext_config_files(search_paths, 'colorschemes'),
find_all_ext_config_files(search_paths, 'themes'),
):
if d['error']:
hadproblem = True
ee(problem=d['error'])
continue
if d['hadproblem']:
hadproblem = True
if d['ext']:
found_dir[d['type']] = True
lists['exts'].add(d['ext'])
if d['name'] == '__main__':
pass
elif d['name'].startswith('__') or d['name'].endswith('__'):
hadproblem = True
ee(problem='File name is not supposed to start or end with “__”: {0}'.format(
d['path']))
else:
lists[d['type']].add(d['name'])
config_paths[d['type']][d['ext']][d['name']] = d['path']
loaded_configs[d['type']][d['ext']][d['name']] = d['config']
else:
config_paths[d['type']][d['name']] = d['path']
loaded_configs[d['type']][d['name']] = d['config']
for typ in ('themes', 'colorschemes'):
if not found_dir[typ]:
hadproblem = True
ee(problem='Subdirectory {0} was not found in paths {1}'.format(typ, ', '.join(search_paths)))
diff = set(config_paths['colorschemes']) - set(config_paths['themes'])
if diff:
hadproblem = True
for ext in diff:
typ = 'colorschemes' if ext in config_paths['themes'] else 'themes'
if not config_paths['top_' + typ] or typ == 'themes':
ee(problem='{0} extension {1} not present in {2}'.format(
ext,
'configuration' if (
ext in loaded_configs['themes'] and ext in loaded_configs['colorschemes']
) else 'directory',
typ,
))
try:
main_config = load_config('config', find_config_files, config_loader)
except IOError:
main_config = {}
ee(problem='Configuration file not found: config.json')
hadproblem = True
except MarkedError as e:
main_config = {}
ee(problem=str(e))
hadproblem = True
else:
if used_main_spec.match(
main_config,
data={'configs': config_paths, 'lists': lists},
context=Context(main_config),
echoerr=ee
)[1]:
hadproblem = True
import_paths = [os.path.expanduser(path) for path in main_config.get('common', {}).get('paths', [])]
try:
colors_config = load_config('colors', find_config_files, config_loader)
except IOError:
colors_config = {}
ee(problem='Configuration file not found: colors.json')
hadproblem = True
except MarkedError as e:
colors_config = {}
ee(problem=str(e))
hadproblem = True
else:
if colors_spec.match(colors_config, context=Context(colors_config), echoerr=ee)[1]:
hadproblem = True
if lhadproblem[0]:
hadproblem = True
top_colorscheme_configs = dict(loaded_configs['top_colorschemes'])
data = {
'ext': None,
'top_colorscheme_configs': top_colorscheme_configs,
'ext_colorscheme_configs': {},
'colors_config': colors_config
}
for colorscheme, config in loaded_configs['top_colorschemes'].items():
data['colorscheme'] = colorscheme
if top_colorscheme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
ext_colorscheme_configs = dict2(loaded_configs['colorschemes'])
for ext, econfigs in ext_colorscheme_configs.items():
data = {
'ext': ext,
'top_colorscheme_configs': top_colorscheme_configs,
'ext_colorscheme_configs': ext_colorscheme_configs,
'colors_config': colors_config,
}
for colorscheme, config in econfigs.items():
data['colorscheme'] = colorscheme
if ext == 'vim':
spec = vim_colorscheme_spec
elif ext == 'shell':
spec = shell_colorscheme_spec
else:
spec = colorscheme_spec
if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
colorscheme_configs = {}
for ext in lists['exts']:
colorscheme_configs[ext] = {}
for colorscheme in lists['colorschemes']:
econfigs = ext_colorscheme_configs[ext]
ecconfigs = econfigs.get(colorscheme)
mconfigs = (
top_colorscheme_configs.get(colorscheme),
econfigs.get('__main__'),
ecconfigs,
)
if not (mconfigs[0] or mconfigs[2]):
continue
config = None
for mconfig in mconfigs:
if not mconfig:
continue
if config:
config = mergedicts_copy(config, mconfig)
else:
config = mconfig
colorscheme_configs[ext][colorscheme] = config
theme_configs = dict2(loaded_configs['themes'])
top_theme_configs = dict(loaded_configs['top_themes'])
for ext, configs in theme_configs.items():
data = {
'ext': ext,
'colorscheme_configs': colorscheme_configs,
'import_paths': import_paths,
'main_config': main_config,
'top_themes': top_theme_configs,
'ext_theme_configs': configs,
'colors_config': colors_config
}
for theme, config in configs.items():
data['theme'] = theme
if theme == '__main__':
data['theme_type'] = 'main'
spec = main_theme_spec
else:
data['theme_type'] = 'regular'
spec = theme_spec
if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
for top_theme, config in top_theme_configs.items():
data = {
'ext': None,
'colorscheme_configs': colorscheme_configs,
'import_paths': import_paths,
'main_config': main_config,
'theme_configs': theme_configs,
'ext_theme_configs': None,
'colors_config': colors_config
}
data['theme_type'] = 'top'
data['theme'] = top_theme
if top_theme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
return hadproblem
|
mit
| 5,603,034,885,208,779,000 | 7,957,658,207,869,762,000 | 33.142628 | 108 | 0.674443 | false |
chenjun0210/tensorflow
|
tensorflow/python/ops/state_ops.py
|
10
|
9619
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variables. See the @{python/state_ops} guide.
@@Variable
@@global_variables
@@local_variables
@@model_variables
@@trainable_variables
@@moving_average_variables
@@global_variables_initializer
@@local_variables_initializer
@@variables_initializer
@@is_variable_initialized
@@report_uninitialized_variables
@@assert_variables_initialized
@@assign
@@assign_add
@@assign_sub
@@Saver
@@latest_checkpoint
@@get_checkpoint_state
@@update_checkpoint_state
@@get_variable
@@get_local_variable
@@VariableScope
@@variable_scope
@@variable_op_scope
@@get_variable_scope
@@make_template
@@no_regularizer
@@constant_initializer
@@random_normal_initializer
@@truncated_normal_initializer
@@random_uniform_initializer
@@uniform_unit_scaling_initializer
@@zeros_initializer
@@ones_initializer
@@orthogonal_initializer
@@fixed_size_partitioner
@@variable_axis_size_partitioner
@@min_max_variable_partitioner
@@scatter_update
@@scatter_add
@@scatter_sub
@@scatter_mul
@@scatter_div
@@scatter_nd_update
@@scatter_nd_add
@@scatter_nd_sub
@@sparse_mask
@@IndexedSlices
@@initialize_all_tables
@@tables_initializer
@@export_meta_graph
@@import_meta_graph
@@all_variables
@@initialize_all_variables
@@initialize_local_variables
@@initialize_variables
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.1;5A
"""
return gen_state_ops._variable_v2(shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
if ref.op.type == "VarHandleOp":
return gen_resource_variable_ops.var_is_initialized_op(ref.handle,
name=name)
def assign_sub(ref, value, use_locking=None, name=None):
"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value, name=name)
def assign_add(ref, value, use_locking=None, name=None):
"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value, name=name)
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update 'ref' by assigning 'value' to it.
This operation outputs a Tensor that holds the new value of 'ref' after
the value has been assigned. This makes it easier to chain operations
that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of 'ref' after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value, name=name)
|
apache-2.0
| 6,323,476,951,739,608,000 | 2,119,369,272,020,253,000 | 34.363971 | 80 | 0.68084 | false |
llvm/llvm-test-suite
|
MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/__init__.py
|
4
|
4370
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python benchmarking utilities.
Example usage:
import google_benchmark as benchmark
@benchmark.register
def my_benchmark(state):
... # Code executed outside `while` loop is not timed.
while state:
... # Code executed within `while` loop is timed.
if __name__ == '__main__':
benchmark.main()
"""
from absl import app
from google_benchmark import _benchmark
from google_benchmark._benchmark import (
Counter,
kNanosecond,
kMicrosecond,
kMillisecond,
kSecond,
oNone,
o1,
oN,
oNSquared,
oNCubed,
oLogN,
oNLogN,
oAuto,
oLambda,
)
__all__ = [
"register",
"main",
"Counter",
"kNanosecond",
"kMicrosecond",
"kMillisecond",
"kSecond",
"oNone",
"o1",
"oN",
"oNSquared",
"oNCubed",
"oLogN",
"oNLogN",
"oAuto",
"oLambda",
]
__version__ = "0.2.0"
class __OptionMaker:
"""A stateless class to collect benchmark options.
Collect all decorator calls like @option.range(start=0, limit=1<<5).
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
def __init__(self, func):
self.func = func
self.builder_calls = []
@classmethod
def make(cls, func_or_options):
"""Make Options from Options or the benchmarked function."""
if isinstance(func_or_options, cls.Options):
return func_or_options
return cls.Options(func_or_options)
def __getattr__(self, builder_name):
"""Append option call in the Options."""
# The function that get returned on @option.range(start=0, limit=1<<5).
def __builder_method(*args, **kwargs):
# The decorator that get called, either with the benchmared function
# or the previous Options
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @regiser
return options
return __decorator
return __builder_method
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
option = __OptionMaker()
def register(undefined=None, *, name=None):
"""Register function for benchmarking."""
if undefined is None:
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:
name = options.func.__name__
# We register the benchmark and reproduce all the @option._ calls onto the
# benchmark builder pattern
benchmark = _benchmark.RegisterBenchmark(name, options.func)
for name, args, kwargs in options.builder_calls[::-1]:
getattr(benchmark, name)(*args, **kwargs)
# return the benchmarked function because the decorator does not modify it
return options.func
def _flags_parser(argv):
argv = _benchmark.Initialize(argv)
return app.parse_flags_with_usage(argv)
def _run_benchmarks(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
return _benchmark.RunSpecifiedBenchmarks()
def main(argv=None):
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
# Methods for use with custom main function.
initialize = _benchmark.Initialize
run_benchmarks = _benchmark.RunSpecifiedBenchmarks
|
apache-2.0
| 2,101,392,506,189,630,200 | 8,286,722,184,894,880,000 | 26.658228 | 90 | 0.652632 | false |
mwilliammyers/sportsstats
|
tests/test_nba.py
|
1
|
1960
|
#!/usr/bin/env python
"""test_nba
Tests for `nba` module.
"""
from sportsstats import nba
import unittest
class TestNba(unittest.TestCase):
def setUp(self):
from datetime import datetime
april_9 = datetime(2016, 4, 9)
self.nba_stats = nba.Stats(april_9, april_9)
self.expected_query_url = (
"/stats/leaguedashptstats?"
"College=&Conference=&Country=&DateFrom=04%2F09%2F2016&"
"DateTo=04%2F09%2F2016&Division=&DraftPick=&DraftYear=&"
"GameScope=&Height=&LastNGames=0&LeagueID=00&Location=&"
"Month=0&OpponentTeamID=0&Outcome=&PORound=0&PerMode=Totals&"
"PlayerExperience=&PlayerOrTeam=Player&PlayerPosition=&"
"PtMeasureType=SpeedDistance&Season=2015-16&SeasonSegment=&"
"SeasonType=Regular+Season&StarterBench=&TeamID=0&"
"VsConference=&VsDivision=&Weight="
)
pass
def tearDown(self):
del self.nba_stats
pass
def test_build_query_url(self):
actual = self.nba_stats._Stats__build_query_url()
self.assertEqual(actual, self.expected_query_url)
def test_send_get_request(self):
connection = self.nba_stats._Stats__send_get_request(
self.expected_query_url)
actual = connection.getresponse().status
self.assertEqual(actual, 200)
connection.close()
def test_download(self):
data = json.loads(self.nba_stats.download())
expected = [
'PLAYER_ID', 'PLAYER_NAME', 'TEAM_ID', 'TEAM_ABBREVIATION',
'GP', 'W', 'L', 'MIN', 'DIST_FEET', 'DIST_MILES',
'DIST_MILES_OFF', 'DIST_MILES_DEF', 'AVG_SPEED',
'AVG_SPEED_OFF', 'AVG_SPEED_DEF'
]
actual = data['resultSets'][0]['headers']
self.assertEqual(actual, expected)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
gpl-3.0
| -7,755,245,753,244,260,000 | -5,551,641,593,614,931,000 | 32.220339 | 77 | 0.590306 | false |
onitake/ansible
|
lib/ansible/module_utils/oneview.py
|
23
|
18876
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
import abc
import collections
import json
import os
import traceback
try:
from hpOneView.oneview_client import OneViewClient
HAS_HPE_ONEVIEW = True
except ImportError:
HAS_HPE_ONEVIEW = False
from ansible.module_utils import six
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import Mapping
def transform_list_to_dict(list_):
"""
Transforms a list into a dictionary, putting values as keys.
:arg list list_: List of values
:return: dict: dictionary built
"""
ret = {}
if not list_:
return ret
for value in list_:
if isinstance(value, Mapping):
ret.update(value)
else:
ret[to_native(value, errors='surrogate_or_strict')] = True
return ret
def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
"""
Merge two lists by the key. It basically:
1. Adds the items that are present on updated_list and are absent on original_list.
2. Removes items that are absent on updated_list and are present on original_list.
3. For all items that are in both lists, overwrites the values from the original item by the updated item.
:arg list original_list: original list.
:arg list updated_list: list with changes.
:arg str key: unique identifier.
:arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
if its values are null.
:return: list: Lists merged.
"""
ignore_when_null = [] if ignore_when_null is None else ignore_when_null
if not original_list:
return updated_list
items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
merged_items = collections.OrderedDict()
for item in updated_list:
item_key = item[key]
if item_key in items_map:
for ignored_key in ignore_when_null:
if ignored_key in item and item[ignored_key] is None:
item.pop(ignored_key)
merged_items[item_key] = items_map[item_key]
merged_items[item_key].update(item)
else:
merged_items[item_key] = item
return list(merged_items.values())
def _str_sorted(obj):
if isinstance(obj, Mapping):
return json.dumps(obj, sort_keys=True)
else:
return str(obj)
def _standardize_value(value):
"""
Convert value to string to enhance the comparison.
:arg value: Any object type.
:return: str: Converted value.
"""
if isinstance(value, float) and value.is_integer():
# Workaround to avoid erroneous comparison between int and float
# Removes zero from integer floats
value = int(value)
return str(value)
class OneViewModuleException(Exception):
"""
OneView base Exception.
Attributes:
msg (str): Exception message.
oneview_response (dict): OneView rest response.
"""
def __init__(self, data):
self.msg = None
self.oneview_response = None
if isinstance(data, six.string_types):
self.msg = data
else:
self.oneview_response = data
if data and isinstance(data, dict):
self.msg = data.get('message')
if self.oneview_response:
Exception.__init__(self, self.msg, self.oneview_response)
else:
Exception.__init__(self, self.msg)
class OneViewModuleTaskError(OneViewModuleException):
"""
OneView Task Error Exception.
Attributes:
msg (str): Exception message.
error_code (str): A code which uniquely identifies the specific error.
"""
def __init__(self, msg, error_code=None):
super(OneViewModuleTaskError, self).__init__(msg)
self.error_code = error_code
class OneViewModuleValueError(OneViewModuleException):
"""
OneView Value Error.
The exception is raised when the data contains an inappropriate value.
Attributes:
msg (str): Exception message.
"""
pass
class OneViewModuleResourceNotFound(OneViewModuleException):
"""
OneView Resource Not Found Exception.
The exception is raised when an associated resource was not found.
Attributes:
msg (str): Exception message.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class OneViewModuleBase(object):
MSG_CREATED = 'Resource created successfully.'
MSG_UPDATED = 'Resource updated successfully.'
MSG_DELETED = 'Resource deleted successfully.'
MSG_ALREADY_PRESENT = 'Resource is already present.'
MSG_ALREADY_ABSENT = 'Resource is already absent.'
MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
HPE_ONEVIEW_SDK_REQUIRED = 'HPE OneView Python SDK is required for this module.'
ONEVIEW_COMMON_ARGS = dict(
config=dict(type='path'),
hostname=dict(type='str'),
username=dict(type='str'),
password=dict(type='str', no_log=True),
api_version=dict(type='int'),
image_streamer_hostname=dict(type='str')
)
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
resource_client = None
def __init__(self, additional_arg_spec=None, validate_etag_support=False):
"""
OneViewModuleBase constructor.
:arg dict additional_arg_spec: Additional argument spec definition.
:arg bool validate_etag_support: Enables support to eTag validation.
"""
argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
self._check_hpe_oneview_sdk()
self._create_oneview_client()
self.state = self.module.params.get('state')
self.data = self.module.params.get('data')
# Preload params for get_all - used by facts
self.facts_params = self.module.params.get('params') or {}
# Preload options as dict - used by facts
self.options = transform_list_to_dict(self.module.params.get('options'))
self.validate_etag_support = validate_etag_support
def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
merged_arg_spec = dict()
merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
if validate_etag_support:
merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
if additional_arg_spec:
merged_arg_spec.update(additional_arg_spec)
return merged_arg_spec
def _check_hpe_oneview_sdk(self):
if not HAS_HPE_ONEVIEW:
self.module.fail_json(msg=self.HPE_ONEVIEW_SDK_REQUIRED)
def _create_oneview_client(self):
if self.module.params.get('hostname'):
config = dict(ip=self.module.params['hostname'],
credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
api_version=self.module.params['api_version'],
image_streamer_ip=self.module.params['image_streamer_hostname'])
self.oneview_client = OneViewClient(config)
elif not self.module.params['config']:
self.oneview_client = OneViewClient.from_environment_variables()
else:
self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
@abc.abstractmethod
def execute_module(self):
"""
Abstract method, must be implemented by the inheritor.
This method is called from the run method. It should contains the module logic
:return: dict: It must return a dictionary with the attributes for the module result,
such as ansible_facts, msg and changed.
"""
pass
def run(self):
"""
Common implementation of the OneView run modules.
It calls the inheritor 'execute_module' function and sends the return to the Ansible.
It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
"""
try:
if self.validate_etag_support:
if not self.module.params.get('validate_etag'):
self.oneview_client.connection.disable_etag_validation()
result = self.execute_module()
if "changed" not in result:
result['changed'] = False
self.module.exit_json(**result)
except OneViewModuleException as exception:
error_msg = '; '.join(to_native(e) for e in exception.args)
self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
def resource_absent(self, resource, method='delete'):
"""
Generic implementation of the absent state for the OneView resources.
It checks if the resource needs to be removed.
:arg dict resource: Resource to delete.
:arg str method: Function of the OneView client that will be called for resource deletion.
Usually delete or remove.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if resource:
getattr(self.resource_client, method)(resource)
return {"changed": True, "msg": self.MSG_DELETED}
else:
return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
def get_by_name(self, name):
"""
Generic get by name implementation.
:arg str name: Resource name to search for.
:return: The resource found or None.
"""
result = self.resource_client.get_by('name', name)
return result[0] if result else None
def resource_present(self, resource, fact_name, create_method='create'):
"""
Generic implementation of the present state for the OneView resources.
It checks if the resource needs to be created or updated.
:arg dict resource: Resource to create or update.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg str create_method: Function of the OneView client that will be called for resource creation.
Usually create or add.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
changed = False
if "newName" in self.data:
self.data["name"] = self.data.pop("newName")
if not resource:
resource = getattr(self.resource_client, create_method)(self.data)
msg = self.MSG_CREATED
changed = True
else:
merged_data = resource.copy()
merged_data.update(self.data)
if self.compare(resource, merged_data):
msg = self.MSG_ALREADY_PRESENT
else:
resource = self.resource_client.update(merged_data)
changed = True
msg = self.MSG_UPDATED
return dict(
msg=msg,
changed=changed,
ansible_facts={fact_name: resource}
)
def resource_scopes_set(self, state, fact_name, scope_uris):
"""
Generic implementation of the scopes update PATCH for the OneView resources.
It checks if the resource needs to be updated with the current scopes.
This method is meant to be run after ensuring the present state.
:arg dict state: Dict containing the data from the last state results in the resource.
It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg list scope_uris: List with all the scope URIs to be added to the resource.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if scope_uris is None:
scope_uris = []
resource = state['ansible_facts'][fact_name]
operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
state['changed'] = True
state['msg'] = self.MSG_UPDATED
return state
def compare(self, first_resource, second_resource):
"""
Recursively compares dictionary contents equivalence, ignoring types and elements order.
Particularities of the comparison:
- Inexistent key = None
- These values are considered equal: None, empty, False
- Lists are compared value by value after a sort, if they have same size.
- Each element is converted to str before the comparison.
:arg dict first_resource: first dictionary
:arg dict second_resource: second dictionary
:return: bool: True when equal, False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The first resource is True / Not Null and the second resource is False / Null
if resource1 and not resource2:
self.module.log("resource1 and not resource2. " + debug_resources)
return False
# Checks all keys in first dict against the second dict
for key in resource1:
if key not in resource2:
if resource1[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# If both values are null, empty or False it will be considered equal.
elif not resource1[key] and not resource2[key]:
continue
elif isinstance(resource1[key], Mapping):
# recursive call
if not self.compare(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif isinstance(resource1[key], list):
# change comparison function to compare_list
if not self.compare_list(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# Checks all keys in the second dict, looking for missing elements
for key in resource2.keys():
if key not in resource1:
if resource2[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
return True
def compare_list(self, first_resource, second_resource):
"""
Recursively compares lists contents equivalence, ignoring types and element orders.
Lists with same size are compared value by value after a sort,
each element is converted to str before the comparison.
:arg list first_resource: first list
:arg list second_resource: second list
:return: True when equal; False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The second list is null / empty / False
if not resource2:
self.module.log("resource 2 is null. " + debug_resources)
return False
if len(resource1) != len(resource2):
self.module.log("resources have different length. " + debug_resources)
return False
resource1 = sorted(resource1, key=_str_sorted)
resource2 = sorted(resource2, key=_str_sorted)
for i, val in enumerate(resource1):
if isinstance(val, Mapping):
# change comparison function to compare dictionaries
if not self.compare(val, resource2[i]):
self.module.log("resources are different. " + debug_resources)
return False
elif isinstance(val, list):
# recursive call
if not self.compare_list(val, resource2[i]):
self.module.log("lists are different. " + debug_resources)
return False
elif _standardize_value(val) != _standardize_value(resource2[i]):
self.module.log("values are different. " + debug_resources)
return False
# no differences found
return True
|
gpl-3.0
| 7,667,324,718,714,870,000 | -52,955,700,169,993,700 | 36.676647 | 125 | 0.639807 | false |
Emsu/prophet
|
examples/tutorial/__main__.py
|
3
|
1782
|
import datetime as dt
from prophet import Prophet
from prophet.data import YahooCloseData
from prophet.analyze import default_analyzers
from bollinger import BollingerData
from eventstudy import BollingerEventStudy
from eventstudy import OrderGenerator
# Based on Homework #7 for Computational Investing
# http://wiki.quantsoftware.org/index.php?title=CompInvesti_Homework_7
# Here we use 2 symbols and a benchmark to reduce data pulled
# but you can use the full sp5002012.txt file from QSTK
# You will have to adjust the portfolio analyzers
# The homework solution's analyzers start the analysis
# when the first trade is conducted instead of the entire
# duration of the backtest.
prophet = Prophet()
symbols = ["AAPL", "XOM", "SPX"]
prophet.set_universe(symbols)
prophet.register_data_generators(YahooCloseData(),
BollingerData(),
BollingerEventStudy())
prophet.set_order_generator(OrderGenerator())
backtest = prophet.run_backtest(start=dt.datetime(2008, 1, 1),
end=dt.datetime(2009, 12, 31), lookback=20)
prophet.register_portfolio_analyzers(default_analyzers)
analysis = prophet.analyze_backtest(backtest)
print(analysis)
# +----------------------------------------+
# | sharpe | -0.851247401074 |
# | average_return | -2.04368321273e-07 |
# | cumulative_return | -0.000103 |
# | volatility | 3.81116761073e-06 |
# +----------------------------------------+
# Generate orders for your to execute today
# Using Nov, 10 2014 as the date because there might be no data for today's
# date (Market might not be open) and we don't want a examples to fail.
today = dt.datetime(2009, 12, 31)
print(prophet.generate_orders(today, lookback=20))
|
bsd-3-clause
| -3,681,416,441,891,056,600 | 3,142,623,863,392,104,000 | 37.73913 | 75 | 0.680696 | false |
markeTIC/OCB
|
addons/website_mail/tests/__init__.py
|
261
|
1081
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 20123TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 593,393,362,308,228,100 | 474,634,322,041,084,900 | 44.041667 | 78 | 0.621647 | false |
yrizk/django-blog
|
blogvenv/lib/python3.4/site-packages/django/core/management/commands/createcachetable.py
|
96
|
3927
|
from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS, connections, models, router, transaction,
)
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='table_name', nargs='*',
help='Optional table names. Otherwise, settings.CACHES is used to '
'find cache tables.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database onto which the cache tables will be '
'installed. Defaults to the "default" database.')
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table)
def create_table(self, database, tablename):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute("\n".join(full_statement))
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
|
apache-2.0
| 5,485,194,304,930,234,000 | -5,178,824,374,366,974,000 | 42.633333 | 96 | 0.581105 | false |
simongoffin/website_version
|
openerp/addons/base/ir/ir_model.py
|
8
|
59779
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import time
import types
import openerp
import openerp.modules.registry
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
def _search_display_name(self, operator, value):
# overridden to allow searching both on model name (model field) and
# model description (name field)
return ['|', ('model', operator, value), ('name', operator, value)]
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
openerp.modules.registry.RegistryManager.new(cr.dbname)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool[vals['model']]._auto_init(cr, ctx)
self.pool[vals['model']]._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
class x_custom_model(osv.osv):
_custom = True
if isinstance(model, unicode):
model = model.encode('utf-8')
x_custom_model._name = model
x_custom_model._module = False
a = x_custom_model._build_model(self.pool, cr)
if not a._columns:
x_name = 'id'
elif 'x_name' in a._columns.keys():
x_name = 'x_name'
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade','Cascade'),('set null','Set NULL')], 'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
model._columns.pop(field.name, None)
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = self.pool[field.model]._all_columns[field.name].column._rel
cr.execute('DROP table "%s"' % (rel_name))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
self.pool[vals['model']]._rec_name = 'x_name'
self.pool[vals['model']].__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool[vals['model']]._auto_init(cr, ctx)
self.pool[vals['model']]._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % column_rename[1])
# This is VERY risky, but let us have this feature:
# we want to change the key of column in obj._columns dict
col = column_rename[0]._columns.pop(column_rename[1][1]) # take object out, w/o copy
column_rename[0]._columns[column_rename[1][2]] = col
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def _display_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
result2 = {}
for res in self.browse(cr, uid, ids, context=context):
if res.id:
result.setdefault(res.model, {})
result[res.model][res.res_id] = res.id
result2[res.id] = False
for model in result:
try:
r = dict(self.pool[model].name_get(cr, uid, result[model].keys(), context=context))
for key,val in result[model].items():
result2[val] = r.get(key, False)
except:
# some object have no valid name_get implemented, we accept this
pass
return result2
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'display_name': fields.function(_display_name_get, type='char', string='Record Name'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xml_id))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -6,899,523,867,176,229,000 | -5,518,347,352,462,716,000 | 47.839052 | 207 | 0.557989 | false |
diorcety/intellij-community
|
python/helpers/pydev/pydev_imports.py
|
53
|
2519
|
from pydevd_constants import USE_LIB_COPY, izip
try:
try:
if USE_LIB_COPY:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
else:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
except ImportError:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
try:
try:
if USE_LIB_COPY:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
else:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
except ImportError:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
execfile=execfile #Not in Py3k
except NameError:
from _pydev_imps._pydev_execfile import execfile
try:
if USE_LIB_COPY:
from _pydev_imps import _pydev_Queue as _queue
else:
import Queue as _queue
except:
import queue as _queue #@UnresolvedImport
try:
from pydevd_exec import Exec
except:
from pydevd_exec2 import Exec
try:
from urllib import quote, quote_plus, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote_plus #@UnresolvedImport
import os
try:
relpath = os.path.relpath
except:
# Only there from 2.6 onwards... let's provide a replacement.
def _split_path(path):
parts = []
loc = path
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = os.path.split(prev)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(path, start=None):
if start is None:
start = os.curdir
origin = os.path.abspath(path)
start = os.path.abspath(start)
orig_list = _split_path(os.path.normcase(origin))
dest_list = _split_path(start)
if orig_list[0] != os.path.normcase(dest_list[0]):
return start
i = 0
for start_seg, dest_seg in izip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
segments = [os.pardir] * (len(orig_list) - i)
segments += dest_list[i:]
if len(segments) == 0:
return os.curdir
else:
return os.path.join(*segments)
|
apache-2.0
| -6,447,642,774,281,908,000 | -1,788,814,729,735,537,700 | 22.990476 | 80 | 0.61056 | false |
python-ivi/python-ivi
|
ivi/agilent/agilentU2004A.py
|
2
|
1628
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2015-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentU2000 import *
class agilentU2004A(agilentU2000):
"Agilent U2004A RF power sensor driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'U2004A')
super(agilentU2004A, self).__init__(*args, **kwargs)
self._channel_count = 1
self._frequency_low = 9e3
self._frequency_high = 6e9
self._power_low = -60
self._power_high = 20
self._init_channels()
|
mit
| -3,735,514,370,675,388,400 | 1,212,408,169,713,829,400 | 35.177778 | 77 | 0.724816 | false |
arrabito/DIRAC
|
Core/DISET/private/MessageBroker.py
|
8
|
15445
|
""" Here, we need some documentation...
"""
import threading
import select
import time
import socket
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.Utilities.ThreadPool import getGlobalThreadPool
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.DISET.private.MessageFactory import MessageFactory, DummyMessage
class MessageBroker( object ):
def __init__( self, name, transportPool = None, threadPool = None ):
self.__name = name
self.__messageTransports = {}
self.__msgCounter = 0
self.__msgCounterLock = threading.Lock()
self.__responseCallbacks = {}
self.__msgInTransport = {}
self.__listenPersistConn = False
self.__useMessageObjects = True
self.__callbacksLock = threading.Condition()
self.__trInOutLock = threading.Lock()
self.__msgFactory = MessageFactory()
self.__log = gLogger.getSubLogger( "MSGBRK" )
if not transportPool:
transportPool = getGlobalTransportPool()
self.__trPool = transportPool
if not threadPool:
threadPool = getGlobalThreadPool()
self.__threadPool = threadPool
self.__listeningForMessages = False
self.__listenThread = None
def getNumConnections( self ):
return len( self.__messageTransports )
def getMsgFactory( self ):
return self.__msgFactory
def useMessageObjects( self, bD ):
self.__useMessageObjects = bD
# Message id generation
def __generateMsgId( self ):
self.__msgCounterLock.acquire()
try:
msgId = "%s:%d" % ( self.__name, self.__msgCounter )
self.__msgCounter += 1
return msgId
finally:
self.__msgCounterLock.release()
def getTransportPool( self ):
return self.__trPool
# Add and remove transport to/from broker
def addTransport( self, transport, *args, **kwargs ):
trid = self.__trPool.add( transport )
try:
result = self.addTransportId( trid, *args, **kwargs )
except Exception as e:
gLogger.exception( "Cannot add transport id", lException = e )
result = S_ERROR( "Cannot add transport id" )
if not result[ 'OK' ]:
self.__trPool.remove( trid )
return result
return S_OK( trid )
def addTransportId( self, trid, svcName,
receiveMessageCallback = None, disconnectCallback = None,
idleRead = False, listenToConnection = True ):
self.__trInOutLock.acquire()
try:
if trid in self.__messageTransports:
return S_OK()
tr = self.__trPool.get( trid )
if not tr:
return S_ERROR( "No transport with id %s registered" % trid )
self.__messageTransports[ trid ] = { 'transport' : tr,
'svcName' : svcName,
'cbReceiveMessage': receiveMessageCallback,
'cbDisconnect' : disconnectCallback,
'listen' : listenToConnection,
'idleRead' : idleRead }
self.__startListeningThread()
return S_OK()
finally:
self.__trInOutLock.release()
def listenToTransport( self, trid, listen = True ):
self.__trInOutLock.acquire()
try:
if trid in self.__messageTransports:
self.__messageTransports[ trid ][ 'listen' ] = listen
self.__startListeningThread()
finally:
self.__trInOutLock.release()
# Listen to connections
def __startListeningThread( self ):
threadDead = self.__listeningForMessages and self.__listenThread is not None and not self.__listenThread.isAlive()
if not self.__listeningForMessages or threadDead:
self.__listeningForMessages = True
self.__listenThread = threading.Thread( target = self.__listenAutoReceiveConnections )
self.__listenThread.setDaemon( True )
self.__listenThread.start()
def __listenAutoReceiveConnections( self ):
while self.__listeningForMessages:
self.__trInOutLock.acquire()
try:
sIdList = []
for trid in self.__messageTransports:
mt = self.__messageTransports[ trid ]
if not mt[ 'listen' ]:
continue
sIdList.append( ( trid, mt[ 'transport' ].getSocket() ) )
if not sIdList:
self.__listeningForMessages = False
return
finally:
self.__trInOutLock.release()
try:
try:
inList, _outList, _exList = select.select( [ pos[1] for pos in sIdList ] , [], [], 1 )
if len( inList ) == 0:
continue
except socket.error:
time.sleep( 0.001 )
continue
except select.error:
time.sleep( 0.001 )
continue
except Exception as e:
gLogger.exception( "Exception while selecting persistent connections", lException = e )
continue
for sock in inList:
for iPos in range( len( sIdList ) ):
if sock == sIdList[ iPos ][1]:
trid = sIdList[ iPos ][0]
if trid in self.__messageTransports:
result = self.__receiveMsgDataAndQueue( trid )
if not result[ 'OK' ]:
self.removeTransport( trid )
break
#Process received data functions
def __receiveMsgDataAndQueue( self, trid ):
#Receive
result = self.__trPool.receive( trid,
blockAfterKeepAlive = False,
idleReceive = self.__messageTransports[ trid ][ 'idleRead' ] )
self.__log.debug( "[trid %s] Received data: %s" % ( trid, str( result ) ) )
#If error close transport and exit
if not result[ 'OK' ]:
self.__log.debug( "[trid %s] ERROR RCV DATA %s" % ( trid, result[ 'Message' ] ) )
gLogger.warn( "Error while receiving message", "from %s : %s" % ( self.__trPool.get( trid ).getFormattedCredentials(),
result[ 'Message' ] ) )
return self.removeTransport( trid )
self.__threadPool.generateJobAndQueueIt( self.__processIncomingData,
args = ( trid, result ) )
return S_OK()
def __processIncomingData( self, trid, receivedResult ):
#If keep alive, return OK
if 'keepAlive' in receivedResult and receivedResult[ 'keepAlive' ]:
return S_OK()
#If idle read return
self.__trInOutLock.acquire()
try:
idleRead = self.__messageTransports[ trid ][ 'idleRead' ]
except KeyError:
return S_ERROR( "Transport %s unknown" % trid )
finally:
self.__trInOutLock.release()
if idleRead:
if receivedResult[ 'Value' ]:
gLogger.fatal( "OOOops. Idle read has returned data!" )
return S_OK()
if not receivedResult[ 'Value' ]:
self.__log.debug( "Transport %s closed connection" % trid )
return self.removeTransport( trid )
#This is a message req/resp
msg = receivedResult[ 'Value' ]
#Valid message?
if 'request' not in msg:
gLogger.warn( "Received data does not seem to be a message !!!!" )
return self.removeTransport( trid )
#Decide if it's a response or a request
if msg[ 'request' ]:
#If message has Id return ACK to received
if 'id' in msg:
self.__sendResponse( trid, msg[ 'id' ], S_OK() )
#Process msg
result = self.__processIncomingRequest( trid, msg )
else:
result = self.__processIncomingResponse( trid, msg )
#If error close the transport
if not result[ 'OK' ]:
gLogger.info( "Closing transport because of error while processing message", result[ 'Message' ] )
return self.removeTransport( trid )
return S_OK()
def __processIncomingRequest( self, trid, msg ):
self.__trInOutLock.acquire()
try:
rcvCB = self.__messageTransports[ trid ][ 'cbReceiveMessage' ]
except KeyError:
return S_ERROR( "Transport %s unknown" % trid )
finally:
self.__trInOutLock.release()
if not rcvCB:
gLogger.fatal( "Transport %s does not have a callback defined and a message arrived!" % trid )
return S_ERROR( "No message was expected in for this transport" )
#Check message has id and name
for requiredField in [ 'name' ]:
if requiredField not in msg:
gLogger.error( "Message does not have required field", requiredField )
return S_ERROR( "Message does not have %s" % requiredField )
#Load message
if 'attrs' in msg:
attrs = msg[ 'attrs' ]
if not isinstance( attrs, (tuple, list) ):
return S_ERROR( "Message args has to be a tuple or a list, not %s" % type( attrs ) )
else:
attrs = None
#Do we "unpack" or do we send the raw data to the callback?
if self.__useMessageObjects:
result = self.__msgFactory.createMessage( self.__messageTransports[ trid ][ 'svcName' ], msg[ 'name' ], attrs )
if not result[ 'OK' ]:
return result
msgObj = result[ 'Value' ]
else:
msgObj = DummyMessage( msg )
#Is msg ok?
if not msgObj.isOK():
return S_ERROR( "Messsage is invalid" )
try:
#Callback it and return response
result = rcvCB( trid, msgObj )
if not isReturnStructure( result ):
return S_ERROR( "Request function does not return a result structure" )
return result
except Exception as e:
#Whoops. Show exception and return
gLogger.exception( "Exception while processing message %s" % msg[ 'name' ], lException = e )
return S_ERROR( "Exception while processing message %s: %s" % ( msg[ 'name' ], str( e ) ) )
def __processIncomingResponse( self, trid, msg ):
#This is a message response
for requiredField in ( 'id', 'result' ):
if requiredField not in msg:
gLogger.error( "Message does not have required field", requiredField )
return S_ERROR( "Message does not have %s" % requiredField )
if not isReturnStructure( msg[ 'result' ] ):
return S_ERROR( "Message response did not return a result structure" )
return self.__notifyCallback( msg[ 'id' ], msg[ 'result' ] )
#Sending functions
def __sendResponse( self, trid, msgId, msgResult ):
msgResponse = { 'request' : False, 'id' : msgId, 'result' : msgResult }
_result = self.__trPool.send( trid, S_OK( msgResponse ) )
def sendMessage( self, trid, msgObj ):
if not msgObj.isOK():
return S_ERROR( "Message is not ready to be sent" )
result = self.__sendMessage( trid, msgObj )
if not result[ 'OK' ]:
self.removeTransport( trid )
return result
def __sendMessage( self, trid, msgObj ):
if not self.__trPool.exists( trid ):
return S_ERROR( "Not transport with id %s defined for messaging" % trid )
msg = { 'request' : True, 'name' : msgObj.getName() }
attrs = msgObj.dumpAttrs()[ 'Value' ]
msg[ 'attrs' ] = attrs
waitForAck = msgObj.getWaitForAck()
if not waitForAck:
return self.__trPool.send( trid, S_OK( msg ) )
msgId = self.__generateMsgId()
msg[ 'id' ] = msgId
self.__generateMessageResponse( trid, msgId )
result = self.__trPool.send( trid, S_OK( msg ) )
#Lock and generate and wait
self.__callbacksLock.acquire()
try:
if not result[ 'OK' ]:
#Release lock and exit
self.__clearCallback( msgId )
return result
return self.__waitForMessageResponse( msgId )
finally:
self.__callbacksLock.release()
#Callback nightmare
#Lock need to have been aquired prior to func
def __generateMessageResponse( self, trid, msgId ):
self.__callbacksLock.acquire()
try:
if msgId in self.__responseCallbacks:
return self.__responseCallbacks[ msgId ]
if trid not in self.__msgInTransport:
self.__msgInTransport[ trid ] = set()
self.__msgInTransport[ trid ].add( msgId )
self.__responseCallbacks[ msgId ] = { 'creationTime' : time.time(),
'trid' : trid
}
return self.__responseCallbacks[ msgId ]
finally:
self.__callbacksLock.release()
#Lock need to have been aquired prior to func
def __waitForMessageResponse( self, msgId ):
if msgId not in self.__responseCallbacks:
return S_ERROR( "Invalid msg id" )
respCallback = self.__responseCallbacks[ msgId ]
while 'result' not in respCallback and time.time() - respCallback[ 'creationTime' ] < 30 :
self.__callbacksLock.wait( 30 )
self.__clearCallback( msgId )
if 'result' in respCallback:
return respCallback[ 'result' ]
return S_ERROR( "Timeout while waiting for message ack" )
def __clearCallback( self, msgId ):
if msgId not in self.__responseCallbacks:
return False
trid = self.__responseCallbacks[ msgId ][ 'trid' ]
self.__responseCallbacks.pop( msgId )
try:
self.__msgInTransport[ trid ].remove( msgId )
except KeyError:
pass
return True
#Lock need to have been aquired prior to func
def __setCallbackResult( self, msgId, result = False ):
if msgId not in self.__responseCallbacks:
return False
self.__responseCallbacks[ msgId ][ 'result' ] = result
return True
def __notifyCallback( self, msgId, msgResult ):
self.__callbacksLock.acquire()
try:
if self.__setCallbackResult( msgId, msgResult ):
self.__callbacksLock.notifyAll()
finally:
self.__callbacksLock.release()
return S_OK()
def removeTransport( self, trid, closeTransport = True ):
#Delete from the message Transports
self.__trInOutLock.acquire()
try:
if trid not in self.__messageTransports:
return S_OK()
#Save the disconnect callback if it's there
if self.__messageTransports[ trid ][ 'cbDisconnect' ]:
cbDisconnect = self.__messageTransports[ trid ][ 'cbDisconnect' ]
else:
cbDisconnect = False
self.__messageTransports.pop( trid )
if closeTransport:
self.__trPool.close( trid )
finally:
self.__trInOutLock.release()
#Flush remaining messages
self.__callbacksLock.acquire()
try:
msgIds = False
if trid in self.__msgInTransport:
msgIds = set( self.__msgInTransport[ trid ] )
self.__msgInTransport.pop( trid )
for msgId in msgIds:
self.__setCallbackResult( msgId, S_ERROR( "Connection closed by peer" ) )
self.__callbacksLock.notifyAll()
finally:
self.__callbacksLock.release()
#Queue the disconnect CB if it's there
if cbDisconnect:
self.__threadPool.generateJobAndQueueIt( cbDisconnect,
args = ( trid, ) )
return S_OK()
class MessageSender( object ):
def __init__( self, serviceName, msgBroker ):
self.__serviceName = serviceName
self.__msgBroker = msgBroker
def getServiceName( self ):
return self.__serviceName
def sendMessage( self, trid, msgObj ):
return self.__msgBroker.sendMessage( trid, msgObj )
def createMessage( self, msgName ):
return self.__msgBroker.__msgFactory.createMessage( self.__serviceName, msgName )
gMessageBroker = False
def getGlobalMessageBroker():
global gMessageBroker
if not gMessageBroker:
gMessageBroker = MessageBroker( 'GlobalMessageBroker', transportPool = getGlobalTransportPool() )
return gMessageBroker
|
gpl-3.0
| 6,229,193,069,893,956,000 | -1,024,197,006,866,983,800 | 34.835267 | 124 | 0.6191 | false |
muffin/tutorial-rss-reader
|
server/vendor/pyquery/cssselectpatch.py
|
3
|
7695
|
#-*- coding:utf-8 -*-
#
# Copyright (C) 2008 - Olivier Lauzanne <[email protected]>
#
# Distributed under the BSD license, see LICENSE.txt
from cssselect import xpath as cssselect_xpath
from cssselect.xpath import ExpressionError
class JQueryTranslator(cssselect_xpath.HTMLTranslator):
"""This class is used to implement the css pseudo classes
(:first, :last, ...) that are not defined in the css standard,
but are defined in the jquery API.
"""
def xpath_first_pseudo(self, xpath):
"""Matches the first selected element.
"""
xpath.add_post_condition('position() = 1')
return xpath
def xpath_last_pseudo(self, xpath):
"""Matches the last selected element.
"""
xpath.add_post_condition('position() = last()')
return xpath
def xpath_even_pseudo(self, xpath):
"""Matches even elements, zero-indexed.
"""
# the first element is 1 in xpath and 0 in python and js
xpath.add_post_condition('position() mod 2 = 1')
return xpath
def xpath_odd_pseudo(self, xpath):
"""Matches odd elements, zero-indexed.
"""
xpath.add_post_condition('position() mod 2 = 0')
return xpath
def xpath_checked_pseudo(self, xpath):
"""Matches odd elements, zero-indexed.
"""
xpath.add_condition("@checked and name(.) = 'input'")
return xpath
def xpath_selected_pseudo(self, xpath):
"""Matches all elements that are selected.
"""
xpath.add_condition("@selected and name(.) = 'option'")
return xpath
def xpath_disabled_pseudo(self, xpath):
"""Matches all elements that are disabled.
"""
xpath.add_condition("@disabled")
return xpath
def xpath_enabled_pseudo(self, xpath):
"""Matches all elements that are enabled.
"""
xpath.add_condition("not(@disabled) and name(.) = 'input'")
return xpath
def xpath_file_pseudo(self, xpath):
"""Matches all input elements of type file.
"""
xpath.add_condition("@type = 'file' and name(.) = 'input'")
return xpath
def xpath_input_pseudo(self, xpath):
"""Matches all input elements.
"""
xpath.add_condition("(name(.) = 'input' or name(.) = 'select') "
+ "or (name(.) = 'textarea' or name(.) = 'button')")
return xpath
def xpath_button_pseudo(self, xpath):
"""Matches all button input elements and the button element.
"""
xpath.add_condition("(@type = 'button' and name(.) = 'input') "
+ "or name(.) = 'button'")
return xpath
def xpath_radio_pseudo(self, xpath):
"""Matches all radio input elements.
"""
xpath.add_condition("@type = 'radio' and name(.) = 'input'")
return xpath
def xpath_text_pseudo(self, xpath):
"""Matches all text input elements.
"""
xpath.add_condition("@type = 'text' and name(.) = 'input'")
return xpath
def xpath_checkbox_pseudo(self, xpath):
"""Matches all checkbox input elements.
"""
xpath.add_condition("@type = 'checkbox' and name(.) = 'input'")
return xpath
def xpath_password_pseudo(self, xpath):
"""Matches all password input elements.
"""
xpath.add_condition("@type = 'password' and name(.) = 'input'")
return xpath
def xpath_submit_pseudo(self, xpath):
"""Matches all submit input elements.
"""
xpath.add_condition("@type = 'submit' and name(.) = 'input'")
return xpath
def xpath_image_pseudo(self, xpath):
"""Matches all image input elements.
"""
xpath.add_condition("@type = 'image' and name(.) = 'input'")
return xpath
def xpath_reset_pseudo(self, xpath):
"""Matches all reset input elements.
"""
xpath.add_condition("@type = 'reset' and name(.) = 'input'")
return xpath
def xpath_header_pseudo(self, xpath):
"""Matches all header elelements (h1, ..., h6)
"""
# this seems kind of brute-force, is there a better way?
xpath.add_condition(
"(name(.) = 'h1' or name(.) = 'h2' or name (.) = 'h3') "
+ "or (name(.) = 'h4' or name (.) = 'h5' or name(.) = 'h6')")
return xpath
def xpath_parent_pseudo(self, xpath):
"""Match all elements that contain other elements
"""
xpath.add_condition("count(child::*) > 0")
return xpath
def xpath_empty_pseudo(self, xpath):
"""Match all elements that do not contain other elements
"""
xpath.add_condition("count(child::*) = 0")
return xpath
def xpath_eq_function(self, xpath, function):
"""Matches a single element by its index.
"""
if function.argument_types() != ['NUMBER']:
raise ExpressionError(
"Expected a single integer for :eq(), got %r"
% function.arguments
)
value = int(function.arguments[0].value)
xpath.add_post_condition(
'position() = %s' % (value + 1))
return xpath
def xpath_gt_function(self, xpath, function):
"""Matches all elements with an index over the given one.
"""
if function.argument_types() != ['NUMBER']:
raise ExpressionError(
"Expected a single integer for :gt(), got %r"
% function.arguments
)
value = int(function.arguments[0].value)
xpath.add_post_condition(
'position() > %s' % (value + 1))
return xpath
def xpath_lt_function(self, xpath, function):
"""Matches all elements with an index below the given one.
"""
if function.argument_types() != ['NUMBER']:
raise ExpressionError(
"Expected a single integer for :gt(), got %r"
% function.arguments
)
value = int(function.arguments[0].value)
xpath.add_post_condition(
'position() < %s' % (value + 1))
return xpath
def xpath_contains_function(self, xpath, function):
"""Matches all elements that contain the given text
"""
if function.argument_types() != ['STRING']:
raise ExpressionError(
"Expected a single string for :contains(), got %r"
% function.arguments
)
value = str(function.arguments[0].value)
xpath.add_post_condition(
"contains(text(), '%s')" % value)
return xpath
XPathExprOrig = cssselect_xpath.XPathExpr
class XPathExpr(XPathExprOrig):
def __init__(self, path='', element='*', condition='', star_prefix=False):
self.path = path
self.element = element
self.condition = condition
self.post_condition = None
def add_post_condition(self, post_condition):
if self.post_condition:
self.post_condition = '%s and (%s)' % (self.post_condition,
post_condition)
else:
self.post_condition = post_condition
def __str__(self):
path = XPathExprOrig.__str__(self)
if self.post_condition:
path = '%s[%s]' % (path, self.post_condition)
return path
def join(self, combiner, other):
res = XPathExprOrig.join(self, combiner, other)
self.post_condition = other.post_condition
return res
cssselect_xpath.XPathExpr = XPathExpr
|
mit
| 2,205,496,257,325,702,400 | 2,265,223,020,921,999,000 | 32.456522 | 78 | 0.558934 | false |
ionomy/ion
|
test/functional/token_test-pt1.py
|
1
|
8466
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the functionality of all CLI commands.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from time import sleep
from decimal import Decimal
import re
import sys
import os
import subprocess
ION_TX_FEE = 0.001
ION_AUTH_ADDR = "gAQQQjA4DCT2EZDVK6Jae4mFfB217V43Nt"
class TokenTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
#self.extra_args = [["-debug"],["-debug"]]
def run_test(self):
connect_nodes_bi(self.nodes, 0, 1)
tmpdir=self.options.tmpdir
self.log.info("Generating Tokens...")
self.nodes[0].generate(100)
self.nodes[0].importprivkey("cUnScAFQYLW8J8V9bWr57yj2AopudqTd266s6QuWGMMfMix3Hff4")
self.nodes[0].generate(100)
self.nodes[0].generate(100)
self.nodes[0].sendtoaddress(ION_AUTH_ADDR, 10)
self.nodes[0].generate(1)
MagicTok=self.nodes[0].configuremanagementtoken("MAGIC", "MagicToken", "4", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-MAGIC.json", "4f92d91db24bb0b8ca24a2ec86c4b012ccdc4b2e9d659c2079f5cc358413a765", "true")
self.nodes[0].generate(1)
MagicGroup_ID=MagicTok['groupID']
mintaddr=self.nodes[0].getnewaddress()
self.nodes[0].minttoken(MagicGroup_ID, mintaddr, 500)
self.nodes[0].generate(1)
XDMTok=self.nodes[0].configuremanagementtoken("XDM", "DarkMatter", "13", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-XDM.json", "f5125a90bde180ef073ce1109376d977f5cbddb5582643c81424cc6cc842babd", "true")
XDMGroup_ID=XDMTok['groupID']
AtomTok=self.nodes[0].configuremanagementtoken("ATOM", "Atom", "0", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-ATOM.json", "b0425ee4ba234099970c53c28288da749e2a1afc0f49856f4cab82b37f72f6a5", "true")
AtomGroup_ID=AtomTok['groupID']
ELECTok=self.nodes[0].configuremanagementtoken("ELEC", "Electron", "13", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-ELEC.json", "6de2409add060ec4ef03d61c0966dc46508ed3498e202e9459e492a372ddccf5", "true")
ELECGroup_ID=ELECTok['groupID']
self.nodes[0].generate(1)
self.log.info("Token Info %s" % json.dumps(self.nodes[0].tokeninfo("all"), indent=4))
MagicAddr=self.nodes[0].getnewaddress()
XDMAddr=self.nodes[0].getnewaddress()
AtomAddr=self.nodes[0].getnewaddress()
ELECAddr=self.nodes[0].getnewaddress()
HulkAddr=self.nodes[0].getnewaddress()
self.nodes[0].minttoken(MagicGroup_ID, MagicAddr, '4975')
self.nodes[0].generate(1)
self.nodes[0].minttoken(XDMGroup_ID, XDMAddr, '71')
self.nodes[0].generate(1)
self.nodes[0].minttoken(AtomGroup_ID, AtomAddr, '100')
self.nodes[0].generate(1)
self.nodes[0].minttoken(ELECGroup_ID, ELECAddr, '1')
self.nodes[0].generate(1)
HULKTok=self.nodes[0].configuretoken("HULK", "HulkToken", "10", "https://raw.githubusercontent.com/CeForce/hulktoken/master/hulk.json", "367750e31cb276f5218c013473449c9e6a4019fed603d045b51e25f5db29283a", "true")
HulkGroup_ID=HULKTok['groupID']
self.nodes[0].generate(1)
self.nodes[0].minttoken(HulkGroup_ID, HulkAddr, '15')
self.nodes[0].generate(1)
tokenBalance=self.nodes[0].gettokenbalance()
for balance in tokenBalance:
self.log.info("Token Name %s" % balance['name'])
self.log.info("Token Balance %s" % balance['balance'])
self.log.info("XDM Ticker %s" % json.dumps(self.nodes[0].tokeninfo('ticker', 'XDM'), indent=4))
self.log.info("XDM Scan Tokens %s" % self.nodes[0].scantokens('start', XDMGroup_ID))
tokenAuth=self.nodes[0].listtokenauthorities()
for authority in tokenAuth:
self.log.info("Ticker %s" % authority['ticker'])
self.log.info("Authority address %s\n" % authority['address'])
self.log.info("Token Authorities %s" % authority['tokenAuthorities'])
self.log.info("Drop Mint Authoritiy for XDM")
XDMDrop=self.nodes[0].listtokenauthorities(XDMGroup_ID)
self.nodes[0].droptokenauthorities(XDMGroup_ID, XDMDrop[0]['txid'], str(XDMDrop[0]['vout']), 'configure')
self.nodes[0].generate(1)
tokenAuthority=(self.nodes[0].listtokenauthorities(XDMGroup_ID))
tokenXDMAddr=tokenAuthority[0]['address']
self.log.info("Token authorities XDM %s\n" % tokenXDMAddr)
try:
self.log.info("Try minting XDM tokens with mint flag removed")
self.nodes[0].minttoken(XDMGroup_ID, XDMAddr, '100')
except Exception as e:
self.log.info(e)
#self.log.info("Re-Enable mint XDM")
#time.sleep(3600)
#self.nodes[0].createtokenauthorities(XDMGroup_ID, tokenXDMAddr, 'configure')
self.log.info("XDM Scan Tokens %s" % self.nodes[0].scantokens('start', XDMGroup_ID))
tokenBalance=self.nodes[0].gettokenbalance()
for balance in tokenBalance:
self.log.info("Token Name %s" % balance['name'])
self.log.info("Token Balance %s" % balance['balance'])
AtomBalance=self.nodes[0].gettokenbalance(AtomGroup_ID)
self.log.info("Atom Balance %s" % AtomBalance['balance'])
self.log.info("Melt 10 tokens from ATOM Group")
self.nodes[0].melttoken(AtomGroup_ID, '10')
AtomBalance=self.nodes[0].gettokenbalance(AtomGroup_ID)
self.log.info("Atom Balance %s\n" % AtomBalance['balance'])
self.log.info("Token info all (from node1)\n%s\n" % json.dumps(self.nodes[1].tokeninfo('all'), indent=4))
self.log.info("Token info ticker XDM\n%s\n" % json.dumps(self.nodes[0].tokeninfo('ticker', 'XDM'), indent=4))
self.log.info("Token info name DarkMatter\n%s\n" % json.dumps(self.nodes[0].tokeninfo('name', 'darkmatter'), indent=4))
self.log.info("Token info groupid %s\n%s\n" % (XDMGroup_ID, json.dumps(self.nodes[0].tokeninfo('groupid', XDMGroup_ID), indent=4)))
ELEC_Trans=self.nodes[0].listtokentransactions(ELECGroup_ID)
self.log.info("Token Transactions Electron Token\n%s\n" % ELEC_Trans)
ElecTrans=ELEC_Trans[0]['txid']
ELEC_BlockHash=self.nodes[0].getblockhash(200)
self.log.info("Electron Transaction\n%s" % self.nodes[0].gettokentransaction(ElecTrans))
self.log.info("Blockhash block 200 %s" % ELEC_BlockHash)
self.log.info("\nTransaction ID %s" % ElecTrans)
self.log.info("Transaction Details %s" % self.nodes[0].gettokentransaction(ElecTrans, ELEC_BlockHash))
self.log.info("\nList tokens since block 200 Hulk\n%s" % self.nodes[0].listtokenssinceblock(ELECGroup_ID, ELEC_BlockHash))
tokenHulkUnspent=self.nodes[0].listunspenttokens(HulkGroup_ID)
newHulk=self.nodes[0].getnewaddress()
self.log.info("Send tokens to new address %s" % self.nodes[0].sendtoken(HulkGroup_ID, newHulk, 2))
self.nodes[0].generate(1)
self.log.info(self.nodes[1].getaddressbalance)
subgroupID=self.nodes[0].getsubgroupid(HulkGroup_ID,"Bruce_Banner")
self.log.info("Subgroup Info %s " % self.nodes[0].tokeninfo('groupid',subgroupID))
self.log.info("\nUnspent Tokens Hulk Token\n%s\n" % tokenHulkUnspent)
tokenReceiveAddr=self.nodes[1].getnewaddress()
rawTxid=tokenHulkUnspent[0]['txid']
rawVout=tokenHulkUnspent[0]['vout']
rawAddr=tokenReceiveAddr
rawAmount=0.01
self.log.info("txid %s" % rawTxid)
self.log.info("vout %s" % rawVout)
self.log.info("recaddr %s" % rawAddr)
self.log.info("amount %s" % rawAmount )
inputs=[{ "txid" : rawTxid, "vout" : rawVout }]
outputs={ rawAddr : rawAmount }
token={ rawAddr : { "amount" : 0.001, "groupid" : HulkGroup_ID, "token_amount" : 0.1 }}
self.log.info(str(inputs))
self.log.info(outputs)
self.log.info(token)
# ICC 86
#rawtx=self.nodes[0].createrawtokentransaction(inputs, outputs, token)
#self.log.info(rawtx)
#time.sleep(3600)
if __name__ == '__main__':
TokenTest().main()
|
mit
| 5,659,994,111,610,937,000 | -6,372,162,263,945,583,000 | 55.818792 | 245 | 0.66773 | false |
axmachado/simplepos
|
simplepos/objfile/module.py
|
1
|
5296
|
# -*- coding: utf-8 -*-
"""
Copyright © 2017 - Alexandre Machado <[email protected]>
This file is part of Simple POS Compiler.
Simnple POS Compiler is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.
Simple POS Compiler is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Simple POS Compiler. If not, see <http://www.gnu.org/licenses/>.
@author: Alexandre Machado <[email protected]>
Module definition code
A Module is the result of a SimplePOS source file compilation.
Modules can be named (main modules) or
anonymous (linked along with main modules)
"""
from .block import CallableBlock
class Module(CallableBlock):
"""
The main construct of the intermediate representation
of the program, a Module is an image of a source code file
Attributes:
sourceFile: the source file name
objectFile: the object file name
functions: name indexed dictionary of functions defined in this module
externalFunctions: list of function names defined in other modules and that
must be linked with this one.
"""
def __init__(self, sourceFile, objFile):
super(Module, self).__init__()
self.sourceFile = sourceFile
self.objFile = objFile
self.functions = {}
self.externalFunctions = {}
self.constants = {}
self.externalConstants = {}
def printStats(self):
print("Module name:", self.name)
print("Source file: ", self.sourceFile)
print("Object file: ", self.objFile)
super(Module, self).printStats()
print("Defined functions:", len(self.functions))
print(" " + ", ".join(self.functions[x].name
for x in self.functions))
def findFunction(self, name):
"""
Find a function on the scope of the module.
This method will find all functions defined inside the module,
and all built in functions.
"""
from .functions import UndefinedFunction
from ..api import findApiFunction
try:
# first, try to find it as an API functions
func = findApiFunction(name)
except KeyError:
if name in self.functions:
# defined here
func = self.functions[name]
elif name in self.externalFunctions:
# already used and defined as external
func = self.externalFunctions[name]
else:
# not found, adding as an external reference
func = UndefinedFunction(name)
self.externalFunctions[name] = func
return func
def canResolveUndefined(self, function):
from .typedefs import UNDEF
theUndef = self.externalFunctions[function.name]
if len(theUndef.arguments) > 0:
if len(function.arguments) != len(theUndef.arguments):
return False
combo = zip(function.arguments, theUndef.arguments)
for (argf, argu) in combo:
if argf.type_ != argu.type_:
return False
if theUndef.returnType != UNDEF:
if theUndef.returnType != function.returnType:
return False
return True
def addFunction(self, function):
fname = function.name
if fname in self.externalFunctions:
if self.canResolveUndefined(function):
del self.externalFunctions[fname]
else:
raise ValueError('Defined function incompatible with '
'previous calls: ' + fname)
if fname in self.functions:
raise ValueError('Duplicated function definition: ' + fname)
self.functions[fname] = function
def addExternalConstant(self, name, value):
self.externalConstants[name] = value
def addLocalConstant(self, name, value):
self.constants[name] = value
def replaceVariableReferences(self, varName, variable):
super(Module, self).replaceVariableReferences(varName, variable)
for function in self.functions.values():
function.replaceGlobalVariableReferences(varName, variable)
def resolveExternalConstant(self, name, value):
if name in self.externalConstants:
super(Module, self).resolveExternalConstant(name, value)
for function in self.functions.values():
function.resolveExternalConstant(name, value)
del self.externalConstants[name]
def __str__(self):
partial = super(Module, self).__str__()
if len(self.functions) > 0:
partial += "\n\n"
for fcn in self.functions:
partial += str(self.functions[fcn])
partial += "\n"
return partial
|
gpl-3.0
| 8,268,632,714,541,786,000 | -349,238,880,933,023,740 | 34.066225 | 79 | 0.622663 | false |
praekelt/molo
|
molo/core/migrations/0020_add-social-media-fields-to-article-page.py
|
1
|
1042
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0010_change_on_delete_behaviour'),
('core', '0019_add_tags_to_article'),
]
operations = [
migrations.AddField(
model_name='articlepage',
name='social_media_description',
field=models.TextField(null=True, verbose_name=b'description', blank=True),
),
migrations.AddField(
model_name='articlepage',
name='social_media_image',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name=b'Image', blank=True, to='wagtailimages.Image', null=True),
),
migrations.AddField(
model_name='articlepage',
name='social_media_title',
field=models.TextField(null=True, verbose_name=b'title', blank=True),
),
]
|
bsd-2-clause
| -1,562,984,946,286,097,400 | -8,296,195,656,547,708,000 | 32.612903 | 172 | 0.613244 | false |
chunyisong/shadowsocks
|
shadowsocks/crypto/openssl.py
|
1038
|
5414
|
#!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
|
apache-2.0
| 9,186,750,861,636,387,000 | -4,988,745,238,474,006,000 | 28.911602 | 77 | 0.597525 | false |
befelix/GPy
|
GPy/util/normalizer.py
|
1
|
2996
|
'''
Created on Aug 27, 2014
@author: Max Zwiessele
'''
import numpy as np
class _Norm(object):
def __init__(self):
pass
def scale_by(self, Y):
"""
Use data matrix Y as normalization space to work in.
"""
raise NotImplementedError
def normalize(self, Y):
"""
Project Y into normalized space
"""
if not self.scaled():
raise AttributeError("Norm object not initialized yet, try calling scale_by(data) first.")
def inverse_mean(self, X):
"""
Project the normalized object X into space of Y
"""
raise NotImplementedError
def inverse_variance(self, var):
return var
def inverse_covariance(self, covariance):
"""
Convert scaled covariance to unscaled.
Args:
covariance - numpy array of shape (n, n)
Returns:
covariance - numpy array of shape (n, n, m) where m is number of
outputs
"""
raise NotImplementedError
def scaled(self):
"""
Whether this Norm object has been initialized.
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
input_dict = {}
return input_dict
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
normalizer_class = input_dict.pop('class')
import GPy
normalizer_class = eval(normalizer_class)
return normalizer_class._from_dict(normalizer_class, input_dict)
@staticmethod
def _from_dict(normalizer_class, input_dict):
return normalizer_class(**input_dict)
class Standardize(_Norm):
def __init__(self):
self.mean = None
def scale_by(self, Y):
Y = np.ma.masked_invalid(Y, copy=False)
self.mean = Y.mean(0).view(np.ndarray)
self.std = Y.std(0).view(np.ndarray)
def normalize(self, Y):
super(Standardize, self).normalize(Y)
return (Y-self.mean)/self.std
def inverse_mean(self, X):
return (X*self.std)+self.mean
def inverse_variance(self, var):
return (var*(self.std**2))
def inverse_covariance(self, covariance):
return (covariance[..., np.newaxis]*(self.std**2))
def scaled(self):
return self.mean is not None
def to_dict(self):
input_dict = super(Standardize, self)._to_dict()
input_dict["class"] = "GPy.util.normalizer.Standardize"
if self.mean is not None:
input_dict["mean"] = self.mean.tolist()
input_dict["std"] = self.std.tolist()
return input_dict
@staticmethod
def _from_dict(kernel_class, input_dict):
s = Standardize()
if "mean" in input_dict:
s.mean = np.array(input_dict["mean"])
if "std" in input_dict:
s.std = np.array(input_dict["std"])
return s
|
bsd-3-clause
| 146,413,978,136,254,500 | -2,983,139,890,466,080,300 | 25.513274 | 102 | 0.578104 | false |
greenape/gem-module
|
gaussianemulation/uncertainty.py
|
1
|
2386
|
from sympy import *
from mpmath import *
from util import *
def E(r_h, b_hat, r_t, e):
return r_h.T*b_hat + r_t.T*e
def V(sigma, u, r_t, A_inv, r_h, g, w):
res = Matrix([u])
res -= r_t.T*A_inv*r_t
res += (r_h - g.T*r_t).T*w*(r_h-g.T*r_t)
res *= sigma
return sigma*(u - r_t.T*A_inv*r_t + (r_h - g.T*r_t).T*w*(r_h-g.T*r_t))
def do_E_var(i_i, i_2, V_, E_):
return (i_i-v) + (i_2 - power(E_, 2.))
def E_var():
r_tt = R_tt(D, C, B, m, v)
r_hh = R_hh(m, B)
r_ht = R_ht(D, B, C, v, m, h)
i_1 = I_1(s_hat_sq, A_inv, r_tt, w, r_hh, r_ht, g)
i_2 = I_2(b_hat, r_hh, r_ht, e_, r_tt)
return do_E_var(i_1, i_2[0,0], V_[0,0], E_[0,0])
def I_1(sigma, A_inv, r_tt, w, r_hh, r_ht, g):
return sigma*(mpf(1)-Trace(A_inv*r_tt) + Trace(w*(r_hh - 2*r_ht*g + g.T*r_tt*g)))
def I_2(beta, r_hh, r_ht, e_, r_tt):
return beta.T*r_hh*beta + 2*beta.T*r_ht*e_ + e_.T*r_tt*e_
def Q_kl(x, xk, xl, C, B, m):
return 2*(x - xk).T*C*(x - xk) + 2*(x - xl).T*C*(x - xl) + (x - m).T*B*(x - m)
def Q_k(x, xk, m, B, C):
return (2*(x - xk).T*C*(x - xk) + (x-m).T*B*(x-m))[0,0]
def m_kl(xk, xl, C, B, m):
return ((4*C + B)**-1)*(2*C*xk + 2*C*xl + B*m)
def m_k(x, C, B, m):
return ((2*C + B)**-1)*(2*C*x + B*m)
def R_h(m):
return Matrix([1]).col_join(m)
def R_hh(m, B):
#np.vstack((np.hstack(([[1]], m.T)), np.hstack((m, m.dot(m.T) + B.getI()))))
return Matrix([1]).row_join(m.T).col_join(m.row_join(m*m.T + B**-1))
def R_ht(D, B, C, v, m, h):
return reduce(lambda x, y: x.row_join(y),map(lambda k: R_ht_elem(D, k, B, C, v, m, h), range(D.cols))) #matrix
def R_ht_elem(X, k, B, C, v, m, h):
x = X[:,k]
m_prime_k = m_k(x, C, B, m)
return R_t(X, k, B, C, v, m)*Matrix([1]).col_join(m_prime_k)
def R_tt(D, C, B, m, v):
return Matrix(D.cols, D.cols, lambda i, j: R_tt_element(D, i, j, C, B, m, v))
def R_tt_element(x, k, l, C, B, m, v):
xk = x[:,k]
xl = x[:,l]
qkl = Q_kl(m_kl(xk, xl, C, B, m), xk, xl, C, B, m)[0,0]
return power(1-v, 2.)*power(det(B), 0.5)*power(det(4*C + B), -0.5)*exp(- qkl/2.)
def R_t(D, B, C, v, m):
return Matrix(map(lambda k: R_t_elem(D, k, B, C, v, m), range(D.cols)))
def R_t_elem(X, k, B, C, v, m):
X = X[:,k]
m_prime_k = m_k(X, C, B, m)
q_k = Q_k(m_prime_k, X, m, B, C)
return (1-v)*power(det(B), 0.5)*power(det(2*C + B), -0.5)*exp(-q_k/2.)
|
mpl-2.0
| 1,959,638,479,073,149,000 | 5,279,150,871,914,311,000 | 30.407895 | 114 | 0.475272 | false |
zorroblue/scikit-learn
|
examples/model_selection/plot_roc.py
|
102
|
5056
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
| 6,666,611,373,114,471,000 | 77,071,120,957,055,330 | 33.162162 | 80 | 0.654668 | false |
jralls/gramps
|
gramps/gen/filters/rules/note/_hasreferencecountof.py
|
6
|
1716
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Stephane Charette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hasreferencecountbase import HasReferenceCountBase
#-------------------------------------------------------------------------
# "Notes with a certain reference count"
#-------------------------------------------------------------------------
class HasReferenceCountOf(HasReferenceCountBase):
"""Notes with a reference count of <count>"""
name = _('Notes with a reference count of <count>')
description = _("Matches notes with a certain reference count")
|
gpl-2.0
| -5,319,499,163,612,988,000 | 8,319,015,025,329,274,000 | 38 | 79 | 0.551865 | false |
jjlee3/openthread
|
tools/harness-automation/cases/leader_5_1_1.py
|
16
|
1875
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_1_1(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 1 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 1,605,230,022,392,501,800 | -7,728,431,371,881,209,000 | 42.604651 | 77 | 0.762667 | false |
potsmaster/cinder
|
cinder/tests/unit/test_infortrend_common.py
|
18
|
77180
|
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import test_infortrend_cli
from cinder.tests.unit import utils
from cinder.volume import configuration
from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
SUCCEED = (0, '')
FAKE_ERROR_RETURN = (-1, '')
class InfortrendTestCass(test.TestCase):
def __init__(self, *args, **kwargs):
super(InfortrendTestCass, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendTestCass, self).setUp()
self.cli_data = test_infortrend_cli.InfortrendCLITestData()
self.configuration = configuration.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.safe_get = self._fake_safe_get
def _fake_safe_get(self, key):
return getattr(self.configuration, key)
def _driver_setup(self, mock_commands, configuration=None):
if configuration is None:
configuration = self.configuration
self.driver = self._get_driver(configuration)
mock_commands_execute = self._mock_command_execute(mock_commands)
mock_cli = mock.Mock(side_effect=mock_commands_execute)
self.driver._execute_command = mock_cli
def _get_driver(self, conf):
raise NotImplementedError
def _mock_command_execute(self, mock_commands):
def fake_execute_command(cli_type, *args, **kwargs):
if cli_type in mock_commands.keys():
if isinstance(mock_commands[cli_type], list):
ret = mock_commands[cli_type][0]
del mock_commands[cli_type][0]
return ret
elif isinstance(mock_commands[cli_type], tuple):
return mock_commands[cli_type]
else:
return mock_commands[cli_type](*args, **kwargs)
return FAKE_ERROR_RETURN
return fake_execute_command
def _mock_show_lv_for_migrate(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier_for_migration()
return self.cli_data.get_test_show_lv()
def _mock_show_lv(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier()
return self.cli_data.get_test_show_lv()
def _assert_cli_has_calls(self, expect_cli_cmd):
self.driver._execute_command.assert_has_calls(expect_cli_cmd)
class InfortrendFCCommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendFCCommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '0,5'
self.configuration.infortrend_slots_b_channels_id = '0,5'
self.configuration.infortrend_cli_timeout = 30
def _get_driver(self, conf):
return common_cli.InfortrendCommon('FC', configuration=conf)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_normal_channel_with_r_model(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {'0': [], '5': []},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {'0': '113', '5': '113'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, self.cli_data.test_fc_properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_specific_channel(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_with_specific_channel)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_with_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_with_specific_channel)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_multipath_with_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_fc)
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_multipath_r_model)
def test_initialize_connection_with_get_wwn_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_zoning)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_zoning_r_model)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_r_model_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
properties, self.cli_data.test_fc_properties_zoning_r_model)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
conn_info, self.cli_data.test_fc_terminate_conn_info)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning_and_lun_map_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(None, conn_info)
class InfortrendiSCSICommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendiSCSICommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '1,2,4'
self.configuration.infortrend_slots_b_channels_id = '1,2,4'
def _get_driver(self, conf):
return common_cli.InfortrendCommon('iSCSI', configuration=conf)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_iqn_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_has_map(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_no_such_name(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info()
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_normal_channel_with_multipath(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {'1': [], '2': [], '4': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {'1': '1', '2': '1', '4': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_specific_channel(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '2, 4'
test_map_dict = {
'slot_a': {'2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_update_mcs_dict(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_mcs_dict = {
'slot_a': {'1': ['1', '2'], '2': ['4']},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictMatch(self.driver.mcs_dict, test_mcs_dict)
def test_mapping_info_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '2': ['4']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['1', '2'],
}
test_map_lun = ['2']
test_mcs_id = '0'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictMatch(map_chl, test_map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_mapping_info_with_mcs_multi_group(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {
'1': lun_list[2:],
'2': lun_list[:],
'3': lun_list[:],
'4': lun_list[1:],
'5': lun_list[:],
},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['3', '4'],
}
test_map_lun = ['1']
test_mcs_id = '1'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictMatch(map_chl, test_map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_specific_channel_with_multipath(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
def test_specific_channel_with_multipath_r_model(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
configuration.infortrend_slots_b_channels_id = '1'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {'1': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {'1': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(self.driver.map_dict, test_map_dict)
self.assertDictMatch(self.driver.target_dict, test_target_dict)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[0]),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume(test_volume)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_volume_with_create_fail(self):
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_snapshot_id = self.cli_data.fake_snapshot_id
test_pair_id = self.cli_data.fake_pair_id
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
expect_cli_cmd = [
mock.call('ShowPartition', '-l'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id[0], '-y'),
mock.call('ShowSnapshot', 'part=%s' % test_partition_id),
mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'),
mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'),
mock.call('ShowMap', 'part=%s' % test_partition_id),
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeletePartition', test_partition_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_volume_with_sync_pair(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_sync_pair(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_volume,
test_volume)
def test_delete_volume_with_delete_fail(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_volume_with_partiton_not_found(self, log_warning):
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_empty_list(),
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume_without_provider(self, log_info):
test_system_id = self.cli_data.fake_system_id[0]
test_volume = copy.deepcopy(self.cli_data.test_volume)
test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(test_system_id, 16), 'None')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_cloned_volume(self, log_info):
fake_partition_id = self.cli_data.fake_partition_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_src_volume = self.cli_data.test_volume
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
fake_partition_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_cloned_volume(
test_dst_volume, test_src_volume)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_cloned_volume_with_create_replica_fail(self):
test_dst_volume = self.cli_data.test_dst_volume
test_src_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': FAKE_ERROR_RETURN,
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_cloned_volume,
test_dst_volume,
test_src_volume)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_export(self):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': test_volume['provider_location'],
}
self.driver = self._get_driver(self.configuration)
model_update = self.driver.create_export(None, test_volume)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_get_volume_stats(self):
test_volume_states = self.cli_data.test_volume_states
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': self.cli_data.get_test_show_lv(),
'ShowPartition': self.cli_data.get_test_show_partition_detail(),
}
self._driver_setup(mock_commands)
self.driver.VERSION = '99.99'
volume_states = self.driver.get_volume_stats(True)
self.assertDictMatch(volume_states, test_volume_states)
def test_get_volume_stats_fail(self):
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.get_volume_stats)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
model_update = self.driver.create_snapshot(self.cli_data.test_snapshot)
self.assertEqual(fake_snapshot_id, model_update['provider_location'])
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot_without_partition_id(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_create_fail(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': FAKE_ERROR_RETURN,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_show_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_snapshot(test_snapshot)
self.assertEqual(1, log_info.call_count)
def test_delete_snapshot_without_provider_location(self):
test_snapshot = self.cli_data.test_snapshot
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
test_snapshot)
def test_delete_snapshot_with_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_snapshot_with_sync_pair(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_si_sync_pair(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'ShowSnapshot':
self.cli_data.get_test_show_snapshot_detail_filled_block(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot_without_filled_block(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_src_part_id = self.cli_data.fake_partition_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica': [
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_dst_volume_id),
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
],
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictMatch(model_update, test_model_update)
self.assertEqual(1, log_info.call_count)
def test_create_volume_from_snapshot_without_provider_location(
self):
test_snapshot = self.cli_data.test_snapshot
test_dst_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_dst_volume,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, test_iscsi_properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_iqn_not_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1])
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
test_connector['initiator'] = test_initiator
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateIQN': SUCCEED,
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, test_iscsi_properties)
expect_cli_cmd = [
mock.call('CreateIQN', test_initiator, test_initiator[-16:]),
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_empty_map(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_empty_list(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
properties, self.cli_data.test_iscsi_properties_empty_map)
def test_initialize_connection_with_create_map_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': FAKE_ERROR_RETURN,
'ShowNet': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
def test_initialize_connection_with_get_ip_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(properties, test_iscsi_properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 10
test_expand_size = test_new_size - test_volume['size']
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sGB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume_mb(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 5.5
test_expand_size = round((test_new_size - test_volume['size']) * 1024)
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sMB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_extend_volume_fail(self):
test_volume = self.cli_data.test_volume
test_new_size = 10
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.extend_volume,
test_volume,
test_new_size)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': SUCCEED,
'DeleteIQN': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeleteIQN', test_connector['initiator'][-16:]),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_terminate_connection_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.terminate_connection,
test_volume,
test_connector)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_dst_part_id),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, test_host)
expect_cli_cmd = [
mock.call('CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
''),
mock.call('ShowPartition'),
mock.call('CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'warning')
def test_migrate_volume_with_invalid_storage(self, log_warning):
fake_host = self.cli_data.fake_host
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, fake_host)
self.assertFalse(rc)
self.assertTrue(model_update is None)
self.assertEqual(1, log_warning.call_count)
def test_migrate_volume_with_get_part_id_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'DeleteMap': SUCCEED,
'CreateReplica': SUCCEED,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_migrate_volume_with_create_replica_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume['id'].replace('-', ''), fake_pool['pool_id']),
'DeleteMap': SUCCEED,
'CreateReplica': FAKE_ERROR_RETURN,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.migrate_volume,
test_volume,
test_host)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume_timeout(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
configuration = copy.copy(self.configuration)
configuration.infortrend_cli_timeout = 0
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id,
'Copy'),
}
self._driver_setup(mock_commands, configuration)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_manage_existing_get_size(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_with_import(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_in_use(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_no_source_id(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_part_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition': FAKE_ERROR_RETURN,
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_map_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_partition_id),
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictMatch(model_update, test_model_update)
def test_manage_existing_rename_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing,
test_volume,
test_ref_volume)
def test_manage_existing_with_part_not_found(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail(),
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing_with_import(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_partition_id),
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'info')
def test_unmanage(self, log_info):
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.unmanage(test_volume)
expect_cli_cmd = [
mock.call(
'SetPartition',
test_partition_id,
'name=cinder-unmanaged-%s' % test_volume_id[:-17]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_retype_without_change(self, log_info):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = {'extra_specs': {}}
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertTrue(rc)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_retype_with_change_provision(self, log_warning):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertFalse(rc)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_retype_with_migrate(self):
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_dst_part_id),
}
mock_commands = {
'ShowSnapshot': SUCCEED,
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
expect_cli_cmd = [
mock.call('ShowSnapshot', 'part=%s' % test_src_part_id),
mock.call(
'CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
'init=disable min=%sMB' % (
int(test_volume['size'] * 1024 * 0.2))
),
mock.call('ShowPartition'),
mock.call(
'CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'
),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictMatch(model_update, test_model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_update_migrated_volume(self):
src_volume = self.cli_data.test_volume
dst_volume = copy.deepcopy(self.cli_data.test_dst_volume)
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
test_model_update = {
'_name_id': None,
'provider_location': dst_volume['provider_location'],
}
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
expect_cli_cmd = [
mock.call('SetPartition', test_dst_part_id,
'name=%s' % src_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
def test_update_migrated_volume_rename_fail(self):
src_volume = self.cli_data.test_volume
dst_volume = self.cli_data.test_dst_volume
dst_volume['_name_id'] = 'fake_name_id'
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
self.assertEqual({'_name_id': 'fake_name_id'}, model_update)
|
apache-2.0
| -1,128,946,220,511,858,200 | 3,664,431,218,716,587,000 | 36.285024 | 79 | 0.584581 | false |
slorg1/heroku-buildpack-python
|
vendor/pip-pop/pip/_vendor/progress/helpers.py
|
404
|
2894
|
# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
|
mit
| 8,256,556,916,306,266,000 | 2,778,635,446,794,361,300 | 30.456522 | 74 | 0.618521 | false |
40223148/2015cda_g5
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/base.py
|
603
|
4652
|
#!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
|
gpl-3.0
| -7,222,614,957,259,555,000 | -3,416,306,650,093,458,000 | 28.443038 | 98 | 0.675193 | false |
nmrao/robotframework
|
src/robot/libdoc.py
|
17
|
8412
|
#!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Libdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.libdoc
python path/to/robot/libdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`libdoc` and :func:`libdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
Libdoc itself is implemented in the :mod:`~robot.libdocpkg` package.
"""
USAGE = """robot.libdoc -- Robot Framework library documentation generator
Version: <VERSION>
Usage: python -m robot.libdoc [options] library output_file
or: python -m robot.libdoc [options] library list|show|version [names]
Libdoc tool can generate keyword documentation in HTML and XML formats both
for test libraries and resource files. HTML format is suitable for humans and
XML specs for RIDE and other tools. Libdoc also has few special commands to
show library or resource information on the console.
Libdoc supports all library and resource types and also earlier generated XML
specs can be used as input. If a library needs arguments, they must be given
as part of the library name and separated by two colons, for example, like
`LibraryName::arg1::arg2`.
Options
=======
-f --format HTML|XML Specifies whether to generate HTML or XML output.
If this options is not used, the format is got
from the extension of the output file.
-F --docformat ROBOT|HTML|TEXT|REST
Specifies the source documentation format. Possible
values are Robot Framework's documentation format,
HTML, plain text, and reStructuredText. The default
value can be specified in test library source code
and the initial default value is `ROBOT`.
New in Robot Framework 2.7.5.
-n --name newname Sets the name of the documented library or resource.
-v --version newversion Sets the version of the documented library or
resource.
-P --pythonpath path * Additional locations where to search for libraries
and resources.
-E --escape what:with * Escapes characters which are problematic in console.
'what' is the name of the character to escape and
'with' is the string to escape it with.
<-------------------ESCAPES------------------------>
-h -? --help Print this help.
Creating documentation
======================
When creating documentation in HTML or XML format, the output file must
be specified as a second argument after the library/resource name or path.
Output format is got automatically from the extension but can also be set
with `--format` option.
Examples:
python -m robot.libdoc src/MyLib.py doc/MyLib.html
jython -m robot.libdoc MyJavaLibrary.java MyJavaLibrary.html
python -m robot.libdoc --name MyLib Remote::10.0.0.42:8270 MyLib.xml
Viewing information on console
==============================
Libdoc has three special commands to show information on the console. These
commands are used instead of the name of the output file, and they can also
take additional arguments.
list: List names of the keywords the library/resource contains. Can be
limited to show only certain keywords by passing optional patterns as
arguments. Keyword is listed if its name contains any given pattern.
show: Show library/resource documentation. Can be limited to show only
certain keywords by passing names as arguments. Keyword is shown if
its name matches any given name. Special argument `intro` will show
the library introduction and importing sections.
version: Show library version
Optional patterns given to `list` and `show` are case and space insensitive.
Both also accept `*` and `?` as wildcards.
Examples:
python -m robot.libdoc Dialogs list
python -m robot.libdoc Selenium2Library list browser
python -m robot.libdoc Remote::10.0.0.42:8270 show
python -m robot.libdoc Dialogs show PauseExecution execute*
python -m robot.libdoc Selenium2Library show intro
python -m robot.libdoc Selenium2Library version
Alternative execution
=====================
Libdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). In the examples above Libdoc is executed as an
installed module, but it can also be executed as a script like
`python path/robot/libdoc.py`.
For more information about Libdoc and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools.
"""
import sys
import os
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.utils import Application, seq2str
from robot.errors import DataError
from robot.libdocpkg import LibraryDocumentation, ConsoleViewer
class LibDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,), auto_version=False)
def validate(self, options, arguments):
if ConsoleViewer.handles(arguments[1]):
ConsoleViewer.validate_command(arguments[1], arguments[2:])
elif len(arguments) > 2:
raise DataError('Only two arguments allowed when writing output.')
return options, arguments
def main(self, args, name='', version='', format=None, docformat=None):
lib_or_res, output = args[:2]
libdoc = LibraryDocumentation(lib_or_res, name, version,
self._get_doc_format(docformat))
if ConsoleViewer.handles(output):
ConsoleViewer(libdoc).view(output, *args[2:])
else:
libdoc.save(output, self._get_output_format(format, output))
self.console(os.path.abspath(output))
def _get_doc_format(self, format):
if not format:
return None
return self._verify_format('Doc format', format,
['ROBOT', 'TEXT', 'HTML', 'REST'])
def _get_output_format(self, format, output):
default = os.path.splitext(output)[1][1:]
return self._verify_format('Format', format or default, ['HTML', 'XML'])
def _verify_format(self, type, format, valid):
format = format.upper()
if format not in valid:
raise DataError("%s must be %s, got '%s'."
% (type, seq2str(valid, lastsep=' or '), format))
return format
def libdoc_cli(arguments):
"""Executes Libdoc similarly as from the command line.
:param arguments: Command line arguments as a list of strings.
For programmatic usage the :func:`libdoc` function is typically better. It
has a better API for that usage and does not call :func:`sys.exit` like
this function.
Example::
from robot.libdoc import libdoc_cli
libdoc_cli(['--version', '1.0', 'MyLibrary.py', 'MyLibraryDoc.html'])
"""
LibDoc().execute_cli(arguments)
def libdoc(library_or_resource, outfile, name='', version='', format=None):
"""Executes libdoc.
Arguments have same semantics as Libdoc command line options with
same names.
Example::
from robot.libdoc import libdoc
libdoc('MyLibrary.py', 'MyLibraryDoc.html', version='1.0')
"""
LibDoc().execute(library_or_resource, outfile, name=name, version=version,
format=format)
if __name__ == '__main__':
libdoc_cli(sys.argv[1:])
|
apache-2.0
| -6,918,594,348,517,794,000 | -6,142,210,148,014,461,000 | 37.944444 | 80 | 0.675226 | false |
kyleabeauchamp/EnsemblePaper
|
code/model_building/fit_model.py
|
1
|
1807
|
import numpy as np
from fitensemble import belt, ensemble_fitter
import experiment_loader
import sys
import ALA3
belt.ne.set_num_threads(1)
def run(ff, prior, regularization_strength, bootstrap_index_list):
pymc_filename = ALA3.data_directory + "/models/model_%s_%s_reg-%.1f-BB%d.h5" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
populations_filename = ALA3.data_directory + "/frame_populations/pops_%s_%s_reg-%.1f-BB%d.dat" % (ff, prior, regularization_strength, bayesian_bootstrap_run)
predictions, measurements, uncertainties = experiment_loader.load(ff)
num_frames, num_measurements = predictions.shape
bootstrap_index_list = np.array_split(np.arange(num_frames), ALA3.num_blocks)
if bayesian_bootstrap_run == 0:
prior_pops = None
else:
prior_pops = ensemble_fitter.sample_prior_pops(num_frames, bootstrap_index_list)
if prior == "maxent":
model = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
elif prior == "dirichlet":
model = belt.DirichletBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
elif prior == "MVN":
model = belt.MVNBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, prior_pops=prior_pops)
model.sample(ALA3.num_samples, thin=ALA3.thin, burn=ALA3.burn, filename=pymc_filename)
p = model.accumulate_populations()
np.savetxt(populations_filename, p)
if __name__ == "__main__":
ff = sys.argv[1]
prior = sys.argv[2]
regularization_strength = float(sys.argv[3])
bayesian_bootstrap_run = int(sys.argv[4])
run(ff, prior, regularization_strength, bayesian_bootstrap_run)
|
gpl-3.0
| 2,622,566,507,316,693,500 | -2,278,186,770,559,781,400 | 45.333333 | 161 | 0.723852 | false |
harshaneelhg/scikit-learn
|
examples/cluster/plot_lena_compress.py
|
271
|
2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
bsd-3-clause
| -2,086,211,801,591,310,800 | -2,898,404,618,130,443,000 | 26.85 | 72 | 0.659785 | false |
dancingdan/tensorflow
|
tensorflow/python/autograph/utils/context_managers.py
|
64
|
1708
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various context managers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
def control_dependency_on_returns(return_value):
"""Create a TF control dependency on the return values of a function.
If the function had no return value, a no-op context is returned.
Args:
return_value: The return value to set as control dependency.
Returns:
A context manager.
"""
def control_dependency_handle(t):
if isinstance(t, tensor_array_ops.TensorArray):
return t.flow
return t
if return_value is None:
return contextlib.contextmanager(lambda: (yield))()
# TODO(mdan): Filter to tensor objects.
if not isinstance(return_value, (list, tuple)):
return_value = (return_value,)
return_value = tuple(control_dependency_handle(t) for t in return_value)
return ops.control_dependencies(return_value)
|
apache-2.0
| -8,678,702,268,534,706,000 | 2,263,149,902,680,536,300 | 33.857143 | 80 | 0.710187 | false |
sangwook236/general-development-and-testing
|
sw_dev/python/rnd/test/language_processing/opennmt_py_test.py
|
2
|
77277
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >>
# http://opennmt.net/
# https://github.com/OpenNMT/OpenNMT-py
import argparse, time
import torch
import torchtext
import onmt
import onmt.translate
import onmt.utils.parse
def save_model(model_filepath, model, generator):
#torch.save(model.state_dict(), model_filepath)
#torch.save({'state_dict': model.state_dict()}, model_filepath)
torch.save({'model': model.state_dict(), 'generator': generator.state_dict()}, model_filepath)
#torch.save({'model': model.state_dict(), 'generator': generator.state_dict(), 'optim': optim.state_dict()}, model_filepath)
print('Saved a model to {}.'.format(model_filepath))
def load_model(model_filepath, model, generator, device='cpu'):
"""
loaded_data = torch.load(model_filepath, map_location=device)
#model.load_state_dict(loaded_data)
model.load_state_dict(loaded_data['state_dict'])
print('Loaded a model from {}.'.format(model_filepath))
return model
"""
checkpoint = torch.load(model_filepath, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
#optim.load_state_dict(checkpoint['optim'])
#opt = checkpoint['opt']
#vocab = checkpoint['vocab']
#epoch = checkpoint['epoch']
print('Loaded a model from {}.'.format(model_filepath))
return model, generator
#--------------------------------------------------------------------
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/preprocess.py
def preprocess_test():
# REF [site] >> https://opennmt.net/OpenNMT-py/options/preprocess.html
if False:
parser = onmt.utils.parse.ArgumentParser(description='preprocess_test')
onmt.opts.config_opts(parser)
onmt.opts.preprocess_opts(parser)
opt = parser.parse_args()
else:
opt = argparse.Namespace()
opt.config = None # Config file path (default: None).
opt.save_config = None # Config file save path (default: None).
# Data.
opt.data_type = 'img' # Type of the source input. Options are [text|img|audio|vec]. (default: text).
opt.train_src = ['data/im2text/src-train.txt'] # Path(s) to the training source data (default: None).
opt.train_tgt = ['data/im2text/tgt-train.txt'] # Path(s) to the training target data (default: None).
opt.train_align = [None] # Path(s) to the training src-tgt alignment (default: [None]).
opt.train_ids = [None] # IDs to name training shards, used for corpus weighting (default: [None]).
opt.valid_src = 'data/im2text/src-val.txt' # Path to the validation source data (default: None).
opt.valid_tgt = 'data/im2text/tgt-val.txt' # Path to the validation target data (default: None).
opt.valid_align = None # Path(s) to the validation src-tgt alignment (default: None).
opt.src_dir = 'data/im2text/images/' # Source directory for image or audio files. (default: ).
opt.save_data = 'data/im2text/demo' # Output file for the prepared data (default: None).
opt.max_shard_size = 0 # Deprecated use shard_size instead (default: 0).
opt.shard_size = 500 # Divide src_corpus and tgt_corpus into smaller multiple src_copus and tgt corpus files, then build shards, each shard will have opt.shard_size samples except last shard. shard_size=0 means no segmentation shard_size>0 means segment dataset into multiple shards, each shard has shard_size samples (default: 1000000)
opt.num_threads = 1 # Number of shards to build in parallel. (default: 1).
opt.overwrite = False # Overwrite existing shards if any. (default: False).
# Vocab.
opt.src_vocab = '' # Path to an existing source vocabulary. Format: one word per line. (default: ).
opt.tgt_vocab = '' # Path to an existing target vocabulary. Format: one word per line. (default: ).
opt.features_vocabs_prefix = '' # Path prefix to existing features vocabularies (default: ).
opt.src_vocab_size = 50000 # Size of the source vocabulary (default: 50000).
opt.tgt_vocab_size = 50000 # Size of the target vocabulary (default: 50000).
opt.vocab_size_multiple = 1 # Make the vocabulary size a multiple of this value (default: 1).
opt.src_words_min_frequency = 0
opt.tgt_words_min_frequency = 2
opt.dynamic_dict = False # Create dynamic dictionaries (default: False).
opt.share_vocab = False # Share source and target vocabulary (default: False).
# Pruning.
opt.src_seq_length = 50 # Maximum source sequence length (default: 50).
opt.src_seq_length_trunc = None # Truncate source sequence length. (default: None).
opt.tgt_seq_length = 150 # Maximum target sequence length to keep. (default: 50).
opt.tgt_seq_length_trunc = None # Truncate target sequence length. (default: None).
opt.lower = False # Lowercase data (default: False).
opt.filter_valid = False # Filter validation data by src and/or tgt length (default: False).
# Random.
opt.shuffle = 0 # Shuffle data (default: 0).
opt.seed = 3435 # Random seed (default: 3435).
# Logging.
opt.report_every = 100000 # Report status every this many sentences (default: 100000).
opt.log_file = '' # Output logs to a file under this path. (default: ).
opt.log_file_level = '0' # {CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET, 50, 40, 30, 20, 10, 0}.
# Speech.
opt.sample_rate = 16000 # Sample rate. (default: 16000).
opt.window_size = 0.02 # Window size for spectrogram in seconds. (default: 0.02).
opt.window_stride = 0.01 # Window stride for spectrogram in seconds. (default: 0.01).
opt.window = 'hamming' # Window type for spectrogram generation. (default: hamming).
# Image.
opt.image_channel_size = 1 # Using grayscale image can training model faster and smaller {3, 1} (default: 3).
# Noise.
opt.subword_prefix = '_' # Subword prefix to build wordstart mask (default: _).
opt.subword_prefix_is_joiner = False # mask will need to be inverted if prefix is joiner (default: False).
print('Preprocess options:\n{}'.format(opt))
#------------------------------------------------------------
#onmt.bin.preprocess.preprocess(opt)
#------------------------------------------------------------
# REF [function] >> preprocess() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/bin/preprocess.py.
onmt.utils.parse.ArgumentParser.validate_preprocess_args(opt)
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/train.py
def train_test():
# REF [site] >> https://opennmt.net/OpenNMT-py/options/train.html
if True:
parser = onmt.utils.parse.ArgumentParser(description='train_test')
onmt.opts.config_opts(parser)
onmt.opts.model_opts(parser)
onmt.opts.train_opts(parser)
opt = parser.parse_args()
else:
opt = argparse.Namespace()
opt.config = None # Config file path (default: None).
opt.save_config = None # Config file save path (default: None).
# Model-Embeddings.
opt.src_word_vec_size = 80 # Word embedding size for src. (default: 500).
opt.tgt_word_vec_size = 80 # Word embedding size for tgt. (default: 500).
opt.word_vec_size = 80 # Word embedding size for src and tgt. (default: -1).
opt.share_decoder_embeddings = False # Use a shared weight matrix for the input and output word embeddings in the decoder. (default: False).
opt.share_embeddings = False # Share the word embeddings between encoder and decoder. Need to use shared dictionary for this option. (default: False).
opt.position_encoding = False # Use a sin to mark relative words positions. Necessary for non-RNN style models. (default: False).
# Model: Embedding Features.
opt.feat_merge = 'concat' # Merge action for incorporating features embeddings. Options [concat|sum|mlp]. (default: concat).
opt.feat_vec_size = -1 # If specified, feature embedding sizes will be set to this. Otherwise, feat_vec_exponent will be used. (default: -1).
opt.feat_vec_exponent = 0.7 # If -feat_merge_size is not set, feature embedding sizes will be set to N^feat_vec_exponent where N is the number of values the feature takes. (default: 0.7).
# Model: Encoder-Decoder.
opt.model_type = 'img' # Type of source model to use. Allows the system to incorporate non-text inputs. Options are [text|img|audio|vec]. (default: text).
opt.model_dtype = 'fp32' # Data type of the model. {fp32, fp16}. (default: fp32).
opt.encoder_type = 'brnn' # Type of encoder layer to use. Non-RNN layers are experimental. Options are [rnn|brnn|mean|transformer|cnn]. (default: rnn).
opt.decoder_type = 'rnn' # Type of decoder layer to use. Non-RNN layers are experimental. Options are [rnn|transformer|cnn]. (default: rnn).
opt.layers = -1 # Number of layers in enc/dec. (default: -1).
opt.enc_layers = 2 # Number of layers in the encoder (default: 2).
opt.dec_layers = 2 # Number of layers in the decoder (default: 2).
opt.rnn_size = -1 # Size of rnn hidden states. Overwrites enc_rnn_size and dec_rnn_size (default: -1).
opt.enc_rnn_size = 500 # Size of encoder rnn hidden states. Must be equal to dec_rnn_size except for speech-to-text. (default: 500).
opt.dec_rnn_size = 500 # Size of decoder rnn hidden states. Must be equal to enc_rnn_size except for speech-to-text. (default: 500).
opt.audio_enc_pooling = '1' # The amount of pooling of audio encoder, either the same amount of pooling across all layers indicated by a single number, or different amounts of pooling per layer separated by comma. (default: 1).
opt.cnn_kernel_width = 3 # Size of windows in the cnn, the kernel_size is (cnn_kernel_width, 1) in conv layer (default: 3).
opt.input_feed = 1 # Feed the context vector at each time step as additional input (via concatenation with the word embeddings) to the decoder. (default: 1).
opt.bridge = False # Have an additional layer between the last encoder state and the first decoder state (default: False).
opt.rnn_type = 'LSTM' # The gate type to use in the RNNs {LSTM, GRU, SRU} (default: LSTM).
opt.brnn = None # Deprecated, use 'encoder_type'. (default: None).
opt.context_gate = None # Type of context gate to use. Do not select for no context gate. {source, target, both} (default: None).
# Model: Attention.
opt.global_attention = 'general' # The attention type to use: dotprod or general (Luong) or MLP (Bahdanau) {dot, general, mlp, none} (default: general).
opt.global_attention_function = 'softmax' # {softmax, sparsemax}.
opt.self_attn_type = 'scaled-dot' # Self attention type in Transformer decoder layer -- currently "scaled-dot" or "average" (default: scaled-dot).
opt.max_relative_positions = 0 # Maximum distance between inputs in relative positions representations. For more detailed information, see: https://arxiv.org/pdf/1803.02155.pdf (default: 0).
opt.heads = 8 # Number of heads for transformer self-attention (default: 8).
opt.transformer_ff = 2048 # Size of hidden transformer feed-forward (default: 2048).
opt.aan_useffn = False # Turn on the FFN layer in the AAN decoder (default: False).
# Model: Alignement.
opt.lambda_align = 0.0 # Lambda value for alignement loss of Garg et al (2019) For more detailed information, see: https://arxiv.org/abs/1909.02074 (default: 0.0).
opt.alignment_layer = -3 # Layer number which has to be supervised. (default: -3).
opt.alignment_heads = 0 # N. of cross attention heads per layer to supervised with (default: 0).
opt.full_context_alignment = False # Whether alignment is conditioned on full target context. (default: False).
# Generator.
opt.copy_attn = False # Train copy attention layer. (default: False).
opt.copy_attn_type = 'general' # The copy attention type to use. Leave as None to use the same as -global_attention. {dot, general, mlp, none} (default: None).
opt.generator_function = 'softmax' # Which function to use for generating probabilities over the target vocabulary (choices: softmax, sparsemax) (default: softmax).
opt.copy_attn_force = False # When available, train to copy. (default: False).
opt.reuse_copy_attn = False # Reuse standard attention for copy (default: False).
opt.copy_loss_by_seqlength = False # Divide copy loss by length of sequence (default: False).
opt.coverage_attn = False # Train a coverage attention layer. (default: False).
opt.lambda_coverage = 0.0 # Lambda value for coverage loss of See et al (2017) (default: 0.0).
opt.loss_scale = 0 # For FP16 training, the static loss scale to use. If not set, the loss scale is dynamically computed. (default: 0).
opt.apex_opt_level = 'O1' # For FP16 training, the opt_level to use. See https://nvidia.github.io/apex/amp.html#opt-levels. {O0, O1, O2, O3} (default: O1).
# General.
opt.data = 'data/im2text/demo' # Path prefix to the ".train.pt" and ".valid.pt" file path from preprocess.py (default: None).
opt.data_ids = [None] # In case there are several corpora. (default: [None]).
opt.data_weights = [1] # Weights of different corpora, should follow the same order as in -data_ids. (default: [1]).
opt.data_to_noise = [] # IDs of datasets on which to apply noise. (default: []).
opt.save_model = 'demo-model' # Model filename (the model will be saved as <save_model>_N.pt where N is the number of steps (default: model).
opt.save_checkpoint_steps = 5000 # Save a checkpoint every X steps (default: 5000).
opt.keep_checkpoint = -1 # Keep X checkpoints (negative: keep all) (default: -1).
opt.gpuid = [] # Deprecated see world_size and gpu_ranks. (default: []).
opt.gpu_ranks = [0] # List of ranks of each process. (default: []).
opt.world_size = 1 # Total number of distributed processes. (default: 1).
opt.gpu_backend = 'nccl' # Type of torch distributed backend (default: nccl).
opt.gpu_verbose_level = 0 # Gives more info on each process per GPU. (default: 0).
opt.master_ip = 'localhost' # IP of master for torch.distributed training. (default: localhost).
opt.master_port = 10000 # Port of master for torch.distributed training. (default: 10000).
opt.queue_size = 40 # Size of queue for each process in producer/consumer (default: 40).
opt.seed = -1 # Random seed used for the experiments reproducibility. (default: -1).
# Initialization.
opt.param_init = 0.1 # Parameters are initialized over uniform distribution with support (-param_init, param_init). Use 0 to not use initialization (default: 0.1).
opt.param_init_glorot = False # Init parameters with xavier_uniform. Required for transformer. (default: False).
opt.train_from = '' # If training from a checkpoint then this is the path to the pretrained model's state_dict. (default: ).
opt.reset_optim = 'none' # Optimization resetter when train_from. {none, all, states, keep_states} (default: none).
opt.pre_word_vecs_enc = None # If a valid path is specified, then this will load pretrained word embeddings on the encoder side. See README for specific formatting instructions. (default: None).
opt.pre_word_vecs_dec = None # If a valid path is specified, then this will load pretrained word embeddings on the decoder side. See README for specific formatting instructions. (default: None)
opt.fix_word_vecs_enc = False # Fix word embeddings on the encoder side. (default: False).
opt.fix_word_vecs_dec = False # Fix word embeddings on the decoder side. (default: False).
# Optimization: Type.
opt.batch_size = 20 # Maximum batch size for training (default: 64).
opt.batch_type = 'sents' # Batch grouping for batch_size. Standard is sents. Tokens will do dynamic batching {sents, tokens} (default: sents).
opt.pool_factor = 8192 # Factor used in data loading and batch creations. It will load the equivalent of 'pool_factor' batches, sort them by the according 'sort_key' to produce homogeneous batches and reduce padding, and yield the produced batches in a shuffled way. Inspired by torchtext's pool mechanism. (default: 8192).
opt.normalization = 'sents' # Normalization method of the gradient. {sents, tokens} (default: sents).
opt.accum_count = [1] # Accumulate gradient this many times. Approximately equivalent to updating batch_size * accum_count batches at once. Recommended for Transformer. (default: [1]).
opt.accum_steps = [0] # Steps at which accum_count values change (default: [0]).
opt.valid_steps = 10000 # Perfom validation every X steps (default: 10000).
opt.valid_batch_size = 32 # Maximum batch size for validation (default: 32).
opt.max_generator_batches = 32 # Maximum batches of words in a sequence to run the generator on in parallel. Higher is faster, but uses more memory. Set to 0 to disable. (default: 32).
opt.train_steps = 100000 # Number of training steps (default: 100000).
opt.single_pass = False # Make a single pass over the training dataset. (default: False).
opt.epochs = 0 # Deprecated epochs see train_steps (default: 0).
opt.early_stopping = 0 # Number of validation steps without improving. (default: 0).
opt.early_stopping_criteria = None # Criteria to use for early stopping. (default: None).
opt.optim = 'sgd' # Optimization method. {sgd, adagrad, adadelta, adam, sparseadam, adafactor, fusedadam} (default: sgd).
opt.adagrad_accumulator_init = 0 # Initializes the accumulator values in adagrad. Mirrors the initial_accumulator_value option in the tensorflow adagrad (use 0.1 for their default). (default: 0).
opt.max_grad_norm = 20.0 # If the norm of the gradient vector exceeds this, renormalize it to have the norm equal to max_grad_norm (default: 5).
opt.dropout = [0.3] # Dropout probability; applied in LSTM stacks. (default: [0.3]).
opt.attention_dropout = [0.1] # Attention Dropout probability. (default: [0.1]).
opt.dropout_steps = [0] # Steps at which dropout changes. (default: [0]).
opt.truncated_decoder = 0 # Truncated bptt. (default: 0).
opt.adam_beta1 = 0.9 # The beta1 parameter used by Adam. Almost without exception a value of 0.9 is used in the literature, seemingly giving good results, so we would discourage changing this value from the default without due consideration. (default: 0.9).
opt.adam_beta2 = 0.999 # The beta2 parameter used by Adam. Typically a value of 0.999 is recommended, as this is the value suggested by the original paper describing Adam, and is also the value adopted in other frameworks such as Tensorflow and Keras, i.e. see: https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer or https://keras.io/optimizers/. Whereas recently the paper "Attention is All You Need" suggested a value of 0.98 for beta2, this parameter may not work well for normal models / default baselines. (default: 0.999)
opt.label_smoothing = 0.0 # Label smoothing value epsilon. Probabilities of all non-true labels will be smoothed by epsilon / (vocab_size - 1). Set to zero to turn off label smoothing. For more detailed information, see: https://arxiv.org/abs/1512.00567 (default: 0.0).
opt.average_decay = 0 # Moving average decay. Set to other than 0 (e.g. 1e-4) to activate. Similar to Marian NMT implementation: http://www.aclweb.org/anthology/P18-4020 For more detail on Exponential Moving Average: https://en.wikipedia.org/wiki/Moving_average (default: 0).
opt.average_every = 1 # Step for moving average. Default is every update, if -average_decay is set. (default: 1).
opt.src_noise = [] # {sen_shuffling, infilling, mask}.
opt.src_noise_prob = [] # Probabilities of src_noise functions (default: []).
# Optimization: Rate.
opt.learning_rate = 0.1 # Starting learning rate. Recommended settings: sgd = 1, adagrad = 0.1, adadelta = 1, adam = 0.001 (default: 1.0).
opt.learning_rate_decay = 0.5 # If update_learning_rate, decay learning rate by this much if steps have gone past start_decay_steps (default: 0.5).
opt.start_decay_steps = 50000 # Start decaying every decay_steps after start_decay_steps (default: 50000).
opt.decay_steps = 10000 # Decay every decay_steps (default: 10000).
opt.decay_method = 'none' # Use a custom decay rate. (default: none).
opt.warmup_steps = 4000 # Number of warmup steps for custom decay. (default: 4000).
# Logging.
opt.report_every = 50 # Print stats at this interval. (default: 50).
opt.log_file = '' # Output logs to a file under this path. (default: ).
opt.log_file_level = '0' # {CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET, 50, 40, 30, 20, 10, 0}.
opt.exp_host = '' # Send logs to this crayon server. (default: ).
opt.exp = '' # Name of the experiment for logging. (default: ).
opt.tensorboard = False # Use tensorboard for visualization during training. Must have the library tensorboard >= 1.14. (default: False).
opt.tensorboard_log_dir = 'runs/onmt' # Log directory for Tensorboard. This is also the name of the run. (default: runs/onmt).
# Speech.
opt.sample_rate = 16000 # Sample rate. (default: 16000).
opt.window_size = 0.02 # Window size for spectrogram in seconds. (default: 0.02).
# Image.
opt.image_channel_size = 1 # Using grayscale image can training model faster and smaller {3, 1} (default: 3).
print('Train options:\n{}'.format(opt))
#------------------------------------------------------------
#onmt.bin.train.train(opt)
#------------------------------------------------------------
# REF [function] >> train() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/bin/train.py.
onmt.utils.parse.ArgumentParser.validate_train_opts(opt)
#onmt.utils.parse.ArgumentParser.update_model_opts(opt)
#onmt.utils.parse.ArgumentParser.validate_model_opts(opt)
# REF [function] >> main() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/train_single.py.
if opt.train_from:
onmt.utils.logging.logger.info('Loading checkpoint from {}.'.format(opt.train_from))
checkpoint = torch.load(opt.train_from, map_location=lambda storage, loc: storage)
model_opt = onmt.utils.parse.ArgumentParser.ckpt_model_opts(checkpoint['opt'])
onmt.utils.parse.ArgumentParser.update_model_opts(model_opt)
onmt.utils.parse.ArgumentParser.validate_model_opts(model_opt)
onmt.utils.logging.logger.info('Loading vocab from checkpoint at {}.'.format(opt.train_from))
vocab = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
onmt.utils.parse.ArgumentParser.update_model_opts(model_opt)
onmt.utils.parse.ArgumentParser.validate_model_opts(model_opt)
vocab = torch.load(opt.data + '.vocab.pt')
fields = vocab
device_id = 0
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
print('Device: {}.'.format(device))
#--------------------
# Build a model.
model = onmt.model_builder.build_model(model_opt, opt, fields, checkpoint=None)
generator = None # FIXME [implement] >>
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
# Set up an optimizer.
lr = 1.0
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optimizer = onmt.utils.optimizers.Optimizer(torch_optimizer, learning_rate=lr, learning_rate_decay_fn=None, max_grad_norm=2)
#--------------------
# Train.
model_saver = onmt.models.build_model_saver(model_opt, opt, model, fields, optimizer)
#model_saver = None
trainer = onmt.trainer.build_trainer(opt, device_id, model, fields, optimizer, model_saver=model_saver)
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/translate.py
def translate_test():
# REF [site] >> https://opennmt.net/OpenNMT-py/options/translate.html
if True:
parser = onmt.utils.parse.ArgumentParser(description='translate_test')
onmt.opts.config_opts(parser)
onmt.opts.translate_opts(parser)
opt = parser.parse_args()
else:
opt = argparse.Namespace()
opt.config = None # Config file path (default: None).
opt.save_config = None # Config file save path (default: None).
# Model.
opt.model = [] # Path to model .pt file(s). Multiple models can be specified, for ensemble decoding. (default: []).
opt.fp32 = False # Force the model to be in FP32 because FP16 is very slow on GTX1080(ti). (default: False).
opt.avg_raw_probs = False # If this is set, during ensembling scores from different models will be combined by averaging their raw probabilities and then taking the log. Otherwise, the log probabilities will be averaged directly. Necessary for models whose output layers can assign zero probability. (default: False).
# Data.
opt.data_type = 'text' # Type of the source input. Options: [text | img]. (default: text).
opt.src = None # Source sequence to decode (one line per sequence) (default: None).
opt.src_dir = '' # Source directory for image or audio files (default: ).
opt.tgt = None # True target sequence (optional) (default: None).
opt.shard_size = 10000 # Divide src and tgt (if applicable) into smaller multiple src and tgt files, then build shards, each shard will have opt.shard_size samples except last shard. shard_size=0 means no segmentation shard_size>0 means segment dataset into multiple shards, each shard has shard_size samples (default: 10000).
opt.output = 'pred.txt' # Path to output the predictions (each line will be the decoded sequence (default: pred.txt).
opt.report_align = False # Report alignment for each translation. (default: False).
opt.report_time = False # Report some translation time metrics (default: False).
opt.dynamic_dict = False # Create dynamic dictionaries (default: False).
opt.share_vocab = False # Share source and target vocabulary (default: False).
# Random Sampling.
opt.random_sampling_topk = 1 # Set this to -1 to do random sampling from full distribution. Set this to value k>1 to do random sampling restricted to the k most likely next tokens. Set this to 1 to use argmax or for doing beam search. (default: 1).
opt.random_sampling_temp = 1.0 # If doing random sampling, divide the logits by this before computing softmax during decoding. (default: 1.0).
opt.seed = 829 # Random seed (default: 829).
# Beam.
opt.beam_size = 5 # Beam size (default: 5).
opt.min_length = 0 # Minimum prediction length (default: 0).
opt.max_length = 100 # Maximum prediction length. (default: 100).
opt.max_sent_length = None # Deprecated, use '-max_length' instead (default: None).
opt.stepwise_penalty = False # Apply penalty at every decoding step. Helpful for summary penalty. (default: False).
opt.length_penalty = 'none' # Length Penalty to use. {none, wu, avg} (default: none).
opt.ratio = -0.0 # Ratio based beam stop condition (default: -0.0).
opt.coverage_penalty = 'none' # Coverage Penalty to use. {none, wu, summary} (default: none).
opt.alpha = 0.0 # Google NMT length penalty parameter (higher = longer generation) (default: 0.0).
opt.beta = -0.0 # Coverage penalty parameter (default: -0.0).
opt.block_ngram_repeat = 0 # Block repetition of ngrams during decoding. (default: 0).
opt.ignore_when_blocking = [] # Ignore these strings when blocking repeats. You want to block sentence delimiters. (default: []).
opt.replace_unk = False # Replace the generated UNK tokens with the source token that had highest attention weight. If phrase_table is provided, it will look up the identified source token and give the corresponding target token. If it is not provided (or the identified source token does not exist in the table), then it will copy the source token. (default: False).
opt.phrase_table = '' # If phrase_table is provided (with replace_unk), it will look up the identified source token and give the corresponding target token. If it is not provided (or the identified source token does not exist in the table), then it will copy the source token. (default: )
# Logging.
opt.verbose = False # Print scores and predictions for each sentence (default: False).
opt.log_file = '' # Output logs to a file under this path. (default: ).
opt.log_file_level = '0' # {CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET, 50, 40, 30, 20, 10, 0}.
opt.attn_debug = False # Print best attn for each word (default: False).
opt.align_debug = False # Print best align for each word (default: False).
opt.dump_beam = '' # File to dump beam information to. (default: ).
opt.n_best = 1 # If verbose is set, will output the n_best decoded sentences (default: 1).
# Efficiency.
opt.batch_size = 30 # Batch size (default: 30).
opt.batch_type = 'sents' # Batch grouping for batch_size. Standard is sents. Tokens will do dynamic batching {sents, tokens} (default: sents).
opt.gpu = -1 # Device to run on (default: -1).
# Speech.
opt.sample_rate = 16000 # Sample rate. (default: 16000).
opt.window_size = 0.02 # Window size for spectrogram in seconds (default: 0.02).
opt.window_stride = 0.01 # Window stride for spectrogram in seconds (default: 0.01).
opt.window = 'hamming' # Window type for spectrogram generation (default: hamming).
# Image.
opt.image_channel_size = 3 # Using grayscale image can training model faster and smaller {3, 1} (default: 3).
print('Translate options:\n{}'.format(opt))
#------------------------------------------------------------
#onmt.bin.translate.translate(opt)
#------------------------------------------------------------
# REF [function] >> translate() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/bin/translate.py.
onmt.utils.parse.ArgumentParser.validate_translate_opts(opt)
logger = onmt.utils.logging.init_logger(opt.log_file)
translator = onmt.translate.translator.build_translator(opt, report_score=True, logger=None, out_file=None)
src_shards = onmt.utils.misc.split_corpus(opt.src, opt.shard_size)
tgt_shards = onmt.utils.misc.split_corpus(opt.tgt, opt.shard_size)
shard_pairs = zip(src_shards, tgt_shards)
for i, (src_shard, tgt_shard) in enumerate(shard_pairs):
logger.info('Translating shard {}.'.format(i))
translator.translate(
src=src_shard,
tgt=tgt_shard,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
batch_type=opt.batch_type,
attn_debug=opt.attn_debug,
align_debug=opt.align_debug
)
# REF [file] >> ${OpenNMT-py_HOME}/onmt/bin/server.py
def server_test():
raise NotImplementedError
#--------------------------------------------------------------------
# REF [site] >> https://opennmt.net/OpenNMT-py/Library.html
def library_example():
is_trained, is_model_loaded = True, False
preprocessed_data_dir_path = './data'
if is_trained:
model_filepath = './onmt_library_model.pt'
if is_model_loaded:
model_filepath_to_load = None
assert not is_model_loaded or (is_model_loaded and model_filepath_to_load is not None)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
gpu = 0 if torch.cuda.is_available() else -1
#--------------------
# Prepare data.
# Load in the vocabulary for the model of interest.
vocab_fields = torch.load(preprocessed_data_dir_path + '/data.vocab.pt')
train_data_files = [
preprocessed_data_dir_path + '/data.train.0.pt'
]
valid_data_files = [
preprocessed_data_dir_path + '/data.valid.0.pt'
]
src_text_field = vocab_fields['src'].base_field
src_vocab = src_text_field.vocab
src_padding = src_vocab.stoi[src_text_field.pad_token]
tgt_text_field = vocab_fields['tgt'].base_field
tgt_vocab = tgt_text_field.vocab
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token]
train_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=train_data_files, fields=vocab_fields,
batch_size=50, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=True, repeat=True
)
valid_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=valid_data_files, fields=vocab_fields,
batch_size=10, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=False, repeat=False
)
#--------------------
# Build a model.
emb_size = 100
rnn_size = 500
# Specify the core model.
encoder_embeddings = onmt.modules.Embeddings(emb_size, len(src_vocab), word_padding_idx=src_padding)
encoder = onmt.encoders.RNNEncoder(
hidden_size=rnn_size, num_layers=1, bidirectional=True,
rnn_type='LSTM', embeddings=encoder_embeddings
)
decoder_embeddings = onmt.modules.Embeddings(emb_size, len(tgt_vocab), word_padding_idx=tgt_padding)
decoder = onmt.decoders.decoder.InputFeedRNNDecoder(
hidden_size=rnn_size, num_layers=1, bidirectional_encoder=True,
rnn_type='LSTM', embeddings=decoder_embeddings
)
model = onmt.models.model.NMTModel(encoder, decoder)
# Specify the tgt word generator.
model.generator = torch.nn.Sequential(
torch.nn.Linear(rnn_size, len(tgt_vocab)),
torch.nn.LogSoftmax(dim=-1)
)
if is_model_loaded:
model, model.generator = load_model(model_filepath_to_load, model, model.generator, device=device)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
if is_trained:
# Specify loss computation module.
loss = onmt.utils.loss.NMTLossCompute(
criterion=torch.nn.NLLLoss(ignore_index=tgt_padding, reduction='sum'),
generator=model.generator
)
# Set up an optimizer.
lr = 1.0
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optim = onmt.utils.optimizers.Optimizer(torch_optimizer, learning_rate=lr, learning_rate_decay_fn=None, max_grad_norm=2)
#--------------------
# Train.
# Keeping track of the output requires a report manager.
report_manager = onmt.utils.ReportMgr(report_every=50, start_time=None, tensorboard_writer=None)
trainer = onmt.Trainer(
model=model, train_loss=loss, valid_loss=loss,
optim=optim, report_manager=report_manager
)
print('Start training...')
start_time = time.time()
total_stats = trainer.train(
train_iter=train_iter, train_steps=400,
valid_iter=valid_iter, valid_steps=200
)
print('End training: {} secs.'.format(time.time() - start_time))
print('Train: Accuracy = {}, Cross entropy = {}, Perplexity = {}.'.format(total_stats.accuracy(), total_stats.xent(), total_stats.ppl()))
save_model(model_filepath, model, model.generator)
#--------------------
# Load up the translation functions.
src_reader = onmt.inputters.str2reader['text']
tgt_reader = onmt.inputters.str2reader['text']
scorer = onmt.translate.GNMTGlobalScorer(alpha=0.7, beta=0.0, length_penalty='avg', coverage_penalty='none')
# Decoding strategy:
# Greedy search, if beam_size = 1.
# Beam search, otherwise.
translator = onmt.translate.Translator(
model=model, fields=vocab_fields,
src_reader=src_reader(), tgt_reader=tgt_reader(),
global_scorer=scorer, gpu=gpu
)
# Build a word-based translation from the batch output of translator and the underlying dictionaries.
builder = onmt.translate.TranslationBuilder(data=torch.load(valid_data_files[0]), fields=vocab_fields)
for batch in valid_iter:
print('Start translating...')
start_time = time.time()
trans_batch = translator.translate_batch(batch=batch, src_vocabs=[src_vocab], attn_debug=False)
print('End translating: {} secs.'.format(time.time() - start_time))
translations = builder.from_batch(trans_batch)
for trans in translations:
print(trans.log(0))
#--------------------------------------------------------------------
def build_im2latex_model(input_channel, num_classes, word_vec_size):
bidirectional_encoder = False
embedding_dropout = 0.3
encoder_num_layers = 2
encoder_rnn_size = 500
encoder_dropout = 0.3
decoder_rnn_type = 'LSTM'
decoder_num_layers = 2
decoder_hidden_size = 500
decoder_dropout = 0.3
src_embeddings = None
tgt_embeddings = onmt.modules.Embeddings(
word_vec_size=word_vec_size,
word_vocab_size=num_classes,
word_padding_idx=1,
position_encoding=False,
feat_merge='concat',
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=embedding_dropout,
sparse=False,
fix_word_vecs=False
)
encoder = onmt.encoders.ImageEncoder(
num_layers=encoder_num_layers, bidirectional=bidirectional_encoder,
rnn_size=encoder_rnn_size, dropout=encoder_dropout, image_chanel_size=input_channel
)
decoder = onmt.decoders.InputFeedRNNDecoder(
rnn_type=decoder_rnn_type, bidirectional_encoder=bidirectional_encoder,
num_layers=decoder_num_layers, hidden_size=decoder_hidden_size,
attn_type='general', attn_func='softmax',
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=decoder_dropout, embeddings=tgt_embeddings,
reuse_copy_attn=False, copy_attn_type='general'
)
generator = torch.nn.Sequential(
torch.nn.Linear(in_features=decoder_hidden_size, out_features=num_classes, bias=True),
onmt.modules.util_class.Cast(dtype=torch.float32),
torch.nn.LogSoftmax(dim=-1)
)
model = onmt.models.NMTModel(encoder, decoder)
return model, generator
class MyImageEncoder(onmt.encoders.encoder.EncoderBase):
def __init__(self, image_height, input_channel, hidden_size, num_layers, bidirectional=False):
super().__init__()
assert image_height % 16 == 0, 'image_height has to be a multiple of 16'
self.image_height = image_height
# Build a model.
# This implementation assumes that input size is h x w.
self.cnn = torch.nn.Sequential(
torch.nn.Conv2d(input_channel, 64, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d(2, 2), # 64 x h/2 x w/2.
torch.nn.Conv2d(64, 128, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d(2, 2), # 128 x h/4 x w/4.
torch.nn.Conv2d(128, 256, 3, 1, 1), torch.nn.BatchNorm2d(256), torch.nn.ReLU(True), # 256 x h/4 x w/4.
torch.nn.Conv2d(256, 256, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d((2, 2), (2, 1), (0, 1)), # 256 x h/8 x w/4+1.
torch.nn.Conv2d(256, 512, 3, 1, 1), torch.nn.BatchNorm2d(512), torch.nn.ReLU(True), # 512 x h/8 x w/4+1.
torch.nn.Conv2d(512, 512, 3, 1, 1), torch.nn.ReLU(True), torch.nn.MaxPool2d((2, 2), (2, 1), (0, 1)), # 512 x h/16 x w/4+2.
torch.nn.Conv2d(512, 512, 2, 1, 0), torch.nn.BatchNorm2d(512), torch.nn.ReLU(True) # 512 x h/16-1 x w/4+1.
)
num_features = (image_height // 16 - 1) * 512
#import rare.crnn_lang
#self.rnn = torch.nn.Sequential(
# rare.crnn_lang.BidirectionalLSTM(num_features, hidden_size, hidden_size),
# rare.crnn_lang.BidirectionalLSTM(hidden_size, hidden_size, hidden_size)
#)
self.sequence_rnn = torch.nn.LSTM(num_features, hidden_size, num_layers=num_layers, bidirectional=bidirectional, batch_first=False)
if bidirectional:
self.sequence_projector = torch.nn.Linear(hidden_size * 2, hidden_size * 2)
#self.sequence_projector = torch.nn.Linear(hidden_size * 2, hidden_size)
else:
self.sequence_projector = torch.nn.Linear(hidden_size, hidden_size)
def forward(self, src, lengths=None):
# NOTE [info] >> This resizing is not good.
#src = torch.nn.functional.upsample(src, size=(self.image_height, int(src.shape[3] * self.image_height / src.shape[2])), mode='bilinear')
src = torch.nn.functional.upsample(src, size=(self.image_height, src.shape[3]), mode='bilinear')
# Conv features.
conv = self.cnn(src) # [b, c_out, h/16-1, w/4+1].
b, c, h, w = conv.size()
#assert h == 1, 'The height of conv must be 1'
#conv = conv.squeeze(2) # [b, c_out, w/4+1].
conv = conv.reshape(b, -1, w) # [b, c_out * h/16-1, w/4+1].
conv = conv.permute(2, 0, 1) # [w/4+1, b, c_out * h/16-1].
# RNN features.
#enc_outputs, enc_hiddens = self.rnn((conv, None)) # [w/4+1, b, hidden size], ([#directions, b, hidden size], [#directions, b, hidden size]).
enc_outputs, enc_hiddens = self.sequence_rnn(conv) # [w/4+1, b, #directions * hidden size], ([#layers * #directions, b, hidden size], [#layers * #directions, b, hidden size]).
enc_outputs = self.sequence_projector(enc_outputs) # [w/4+1, b, hidden size].
return enc_hiddens, enc_outputs, lengths
def build_my_im2latex_model(image_height, input_channel, num_classes, word_vec_size):
bidirectional_encoder = False
embedding_dropout = 0.3
encoder_num_layers = 2
encoder_rnn_size = 500
encoder_dropout = 0.3
decoder_rnn_type = 'LSTM'
decoder_num_layers = 2
decoder_hidden_size = encoder_rnn_size * 2 if bidirectional_encoder else encoder_rnn_size
decoder_dropout = 0.3
src_embeddings = None
tgt_embeddings = onmt.modules.Embeddings(
word_vec_size=word_vec_size,
word_vocab_size=num_classes,
word_padding_idx=1,
position_encoding=False,
feat_merge='concat',
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=embedding_dropout,
sparse=False,
fix_word_vecs=False
)
encoder = MyImageEncoder(
image_height, input_channel,
hidden_size=encoder_rnn_size, num_layers=encoder_num_layers, bidirectional=bidirectional_encoder
)
decoder = onmt.decoders.InputFeedRNNDecoder(
rnn_type=decoder_rnn_type, bidirectional_encoder=bidirectional_encoder,
num_layers=decoder_num_layers, hidden_size=decoder_hidden_size,
attn_type='general', attn_func='softmax',
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=decoder_dropout, embeddings=tgt_embeddings,
reuse_copy_attn=False, copy_attn_type='general'
)
generator = torch.nn.Sequential(
torch.nn.Linear(in_features=decoder_hidden_size, out_features=num_classes, bias=True),
onmt.modules.util_class.Cast(dtype=torch.float32),
torch.nn.LogSoftmax(dim=-1)
)
model = onmt.models.NMTModel(encoder, decoder)
return model, generator
# REF [function] >> Translator.translate_batch() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
def create_greedy_search_strategy(batch_size, random_sampling_topk, random_sampling_temp, min_length, max_length, block_ngram_repeat, bos_index, eos_index, pad_index, exclusion_idxs):
replace_unk = False
#tgt_prefix = False
attn_debug = False
return onmt.translate.greedy_search.GreedySearch(
pad=pad_index, bos=bos_index, eos=eos_index,
batch_size=batch_size,
min_length=min_length, max_length=max_length,
block_ngram_repeat=block_ngram_repeat,
exclusion_tokens=exclusion_idxs,
return_attention=attn_debug or replace_unk,
sampling_temp=random_sampling_temp,
keep_topk=random_sampling_topk
)
# REF [function] >> Translator.translate_batch() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
def create_beam_search_strategy(batch_size, scorer, beam_size, n_best, ratio, min_length, max_length, block_ngram_repeat, bos_index, eos_index, pad_index, exclusion_idxs):
stepwise_penalty = None,
replace_unk = False
#tgt_prefix = False
attn_debug = False
return onmt.translate.beam_search.BeamSearch(
beam_size,
batch_size=batch_size,
pad=pad_index, bos=bos_index, eos=eos_index,
n_best=n_best,
global_scorer=scorer,
min_length=min_length, max_length=max_length,
return_attention=attn_debug or replace_unk,
block_ngram_repeat=block_ngram_repeat,
exclusion_tokens=exclusion_idxs,
stepwise_penalty=stepwise_penalty,
ratio=ratio
)
# REF [function] >> Translator._decode_and_generate() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
def decode_and_generate(model, decoder_in, memory_bank, batch, src_vocabs, memory_lengths, beam_size, copy_attn, tgt_vocab, tgt_unk_idx, src_map=None, step=None, batch_offset=None):
if copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(decoder_in.gt(len(tgt_vocab) - 1), tgt_unk_idx)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
dec_out, dec_attn = model.decoder(decoder_in, memory_bank, memory_lengths=memory_lengths, step=step)
# Generator forward.
if not copy_attn:
if 'std' in dec_attn:
attn = dec_attn['std']
else:
attn = None
log_probs = model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn['copy']
scores = model.generator(dec_out.view(-1, dec_out.size(2)), attn.view(-1, attn.size(2)), src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(-1, batch.batch_size, scores.size(-1))
scores = scores.transpose(0, 1).contiguous()
else:
scores = scores.view(-1, beam_size, scores.size(-1))
scores = onmt.modules.copy_generator.collapse_copy_scores(
scores,
batch,
tgt_vocab,
src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn
# REF [function] >> Translator._translate_batch_with_strategy() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/translator.py
# _translate_batch_with_strategy()
# _run_encoder()
# _gold_score()
# _score_target()
# _decode_and_generate()
# _decode_and_generate()
# _align_forward()
# _run_encoder()
def translate_batch_with_strategy(model, decode_strategy, src, batch_size, beam_size, unk_index, tgt_vocab, src_vocabs=[]):
copy_attn = False # Fixed.
report_align = False # Fixed.
parallel_paths = decode_strategy.parallel_paths # beam_size.
enc_states, memory_bank, src_lengths = model.encoder(src, lengths=None)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(memory_bank).long().fill_(memory_bank.size(0))
model.decoder.init_state(src, memory_bank, enc_states)
src_map, target_prefix = None, None
fn_map_state, memory_bank, memory_lengths, src_map = decode_strategy.initialize(memory_bank, src_lengths, src_map, target_prefix)
if fn_map_state is not None:
model.decoder.map_state(fn_map_state)
for step in range(decode_strategy.max_length):
decoder_input = decode_strategy.current_predictions.view(1, -1, 1)
log_probs, attn = decode_and_generate(
model,
decoder_input,
memory_bank,
batch=None, # NOTE [caution] >>
src_vocabs=src_vocabs,
memory_lengths=memory_lengths,
beam_size=beam_size, copy_attn=copy_attn,
tgt_vocab=tgt_vocab, tgt_unk_idx=unk_index,
src_map=src_map,
step=step,
batch_offset=decode_strategy.batch_offset
)
decode_strategy.advance(log_probs, attn)
any_finished = decode_strategy.is_finished.any()
if any_finished:
decode_strategy.update_finished()
if decode_strategy.done:
break
select_indices = decode_strategy.select_indices
if any_finished:
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices) for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
if parallel_paths > 1 or any_finished:
model.decoder.map_state(lambda state, dim: state.index_select(dim, select_indices))
results = dict()
results['scores'] = decode_strategy.scores
results['predictions'] = decode_strategy.predictions
results['attention'] = decode_strategy.attention
if report_align:
results['alignment'] = self._align_forward(batch, decode_strategy.predictions)
else:
results['alignment'] = [[] for _ in range(batch_size)]
return results
def im2latex_example():
src_data_type, tgt_data_type = 'img', 'text'
input_channel = 3
num_classes = 466
word_vec_size = 500
batch_size = 32
train_steps, valid_steps, save_checkpoint_steps = 400, 200, 200
#train_steps, valid_steps, save_checkpoint_steps = 10000, 1000, 5000
is_trained, is_model_loaded = True, True
is_small_data_used = True
is_my_model_used = False # Use an image encoder (RARE) for me to define.
is_preprocessed_vocab_used, is_preprocessed_data_iterators_used = True, True
image_height = 64 if is_my_model_used else None
if is_small_data_used:
# For im2text_small.
# REF [site] >> http://lstm.seas.harvard.edu/latex/im2text_small.tgz
preprocessed_data_dir_path = './data/im2text_small'
num_train_data_files, num_valid_data_files = 2, 1
else:
# For im2text.
# REF [site] >> http://lstm.seas.harvard.edu/latex/im2text.tgz
preprocessed_data_dir_path = './data/im2text'
num_train_data_files, num_valid_data_files = 153, 17
if is_trained:
if is_my_model_used:
model_filepath = './data/im2latex_my_model.pt'
else:
model_filepath = './data/im2latex_model.pt'
if is_model_loaded:
if is_my_model_used:
model_filepath_to_load = './data/im2latex_my_model.pt'
else:
# Downloaded from http://lstm.seas.harvard.edu/latex/py-model.pt.
model_filepath_to_load = './data/py-model.pt'
#model_filepath_to_load = './data/im2latex_model.pt'
assert not is_model_loaded or (is_model_loaded and model_filepath_to_load is not None)
gpu = 0
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
print('Device: {}.'.format(device))
#--------------------
# Prepare data.
def read_lines_from_file(filepath):
try:
with open(filepath, 'r', encoding='utf-8') as fd:
lines = fd.read().splitlines() # A list of strings.
return lines
except UnicodeDecodeError as ex:
print('Unicode decode error in {}: {}.'.format(filepath, ex))
raise
except FileNotFoundError as ex:
print('File not found, {}: {}.'.format(filepath, ex))
raise
# REF [site] >> https://opennmt.net/OpenNMT-py/im2text.html
# NOTE [info] >> Two vocab_fields's are different, so a model has to be trained.
# If not, wrong results will be obtained.
if is_preprocessed_vocab_used:
# NOTE [info] >> When preprocessing data by onmt_preprocess or ${OpenNMT-py_HOME}/onmt/bin/preprocess.py.
# Load in the vocabulary for the model of interest.
vocab_fields = torch.load(preprocessed_data_dir_path + '/demo.vocab.pt')
else:
#UNKNOWN_TOKEN, PAD_TOKEN, SOS_TOKEN, EOS_TOKEN = '<UNK>', '<PAD>', '<SOS>', '<EOS>'
UNKNOWN_TOKEN, PAD_TOKEN, SOS_TOKEN, EOS_TOKEN = '<unk>', '<blank>', '<s>', '</s>'
def preprocess(x):
return x
def postprocess(batch, vocab):
if len(batch) == 1: return batch[0].unsqueeze(dim=0)
max_height, max_width = max([tt.shape[1] for tt in batch]), max([tt.shape[2] for tt in batch])
batch_resized = torch.zeros((len(batch), 3, max_height, max_width), dtype=batch[0].dtype)
for idx, tt in enumerate(batch):
batch_resized[idx, :, :tt.shape[1], :tt.shape[2]] = tt
return batch_resized
# REF [function] >> image_fields() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py.
src_field = torchtext.data.Field(
sequential=False, use_vocab=False, init_token=None, eos_token=None, fix_length=None,
#dtype=torch.float32, preprocessing=preprocess, postprocessing=postprocess, lower=False,
dtype=torch.float32, preprocessing=None, postprocessing=postprocess, lower=False,
tokenize=None, tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=None, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
tgt_field = torchtext.data.Field(
sequential=True, use_vocab=True, init_token=SOS_TOKEN, eos_token=EOS_TOKEN, fix_length=None,
dtype=torch.int64, preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en',
#tokenize=functools.partial(onmt.inputters.inputter._feature_tokenize, layer=0, feat_delim=None, truncate=None), tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=PAD_TOKEN, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
indices_field = torchtext.data.Field(
sequential=False, use_vocab=False, init_token=None, eos_token=None, fix_length=None,
dtype=torch.int64, preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=None, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
corpus_id_field = torchtext.data.Field(
sequential=False, use_vocab=True, init_token=None, eos_token=None, fix_length=None,
dtype=torch.int64, preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en',
include_lengths=False, batch_first=False, pad_token=None, pad_first=False, unk_token=UNKNOWN_TOKEN,
truncate_first=False, stop_words=None, is_target=False
)
# NOTE [info] >> It is better to build a vocabulary from corpora.
# TODO [choose] >>
if True:
tgt_train_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-train.txt')
tgt_valid_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-val.txt')
tgt_test_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-test.txt')
texts = [txt.split() for txt in tgt_train_texts] + [txt.split() for txt in tgt_valid_texts] + [txt.split() for txt in tgt_test_texts]
tgt_field.build_vocab(texts) # Sort vocabulary + add special tokens, <unknown>, <pad>, <bos>, and <eos>.
else:
vocab = read_lines_from_file(preprocessed_data_dir_path + '/vocab.txt')
#tgt_field.vocab = vocab # AttributeError: 'list' object has no attribute 'stoi'.
tgt_field.build_vocab([vocab]) # Sort vocabulary + add special tokens, <unknown>, <pad>, <bos>, and <eos>.
corpus_id_field.build_vocab(['train'])
# REF [function] >> build_vocab() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/inputter.py.
vocab_fields = {
'src': src_field,
'tgt': onmt.inputters.text_dataset.TextMultiField('tgt', tgt_field, feats_fields=[]),
'indices': indices_field,
'corpus_id': corpus_id_field,
}
"""
src_text_field = vocab_fields['src'].base_field # Error: AttributeError: 'Field' object has no attribute 'base_field'.
src_vocab = src_text_field.vocab
src_padding = src_vocab.stoi[src_text_field.pad_token]
#src_unk = src_vocab.stoi[src_text_field.unk_token]
#src_bos = src_vocab.stoi[src_text_field.init_token]
#src_eos = src_vocab.stoi[src_text_field.eos_token]
"""
tgt_text_field = vocab_fields['tgt'].base_field
tgt_vocab = tgt_text_field.vocab
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token]
#tgt_unk = tgt_vocab.stoi[tgt_text_field.unk_token]
#tgt_bos = tgt_vocab.stoi[tgt_text_field.init_token]
#tgt_eos = tgt_vocab.stoi[tgt_text_field.eos_token]
src_reader = onmt.inputters.str2reader[src_data_type]
tgt_reader = onmt.inputters.str2reader[tgt_data_type]
if src_data_type == 'img':
src_reader_obj = src_reader(truncate=None, channel_size=input_channel)
elif src_data_type == 'audio':
src_reader_obj = src_reader(sample_rate=0, window_size=0, window_stride=0, window=None, normalize_audio=True, truncate=None)
else:
src_reader_obj = src_reader()
if tgt_data_type == 'img':
tgt_reader_obj = tgt_reader(truncate=None, channel_size=input_channel)
elif tgt_data_type == 'audio':
tgt_reader_obj = tgt_reader(sample_rate=0, window_size=0, window_stride=0, window=None, normalize_audio=True, truncate=None)
else:
tgt_reader_obj = tgt_reader()
if is_preprocessed_data_iterators_used:
# NOTE [info] >> When preprocessing data by onmt_preprocess or ${OpenNMT-py_HOME}/onmt/bin/preprocess.py.
train_data_files = list()
for idx in range(num_train_data_files):
train_data_files.append(preprocessed_data_dir_path + '/demo.train.{}.pt'.format(idx))
valid_data_files = list()
for idx in range(num_valid_data_files):
valid_data_files.append(preprocessed_data_dir_path + '/demo.valid.{}.pt'.format(idx))
# REF [function] >> build_dataset_iter() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/inputter.py.
train_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=train_data_files, fields=vocab_fields,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=True, repeat=True,
num_batches_multiple=1, yield_raw_example=False
)
valid_iter = onmt.inputters.inputter.DatasetLazyIter(
dataset_paths=valid_data_files, fields=vocab_fields,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, is_train=False, repeat=False,
num_batches_multiple=1, yield_raw_example=False
)
else:
sortkey = onmt.inputters.str2sortkey[tgt_data_type]
src_dir_path = preprocessed_data_dir_path + '/images'
src_train_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-train.txt')
src_train_filepaths = [bytes(fpath, encoding='utf-8') for fpath in src_train_filepaths]
tgt_train_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-train.txt')
src_valid_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-val.txt')
src_valid_filepaths = [bytes(fpath, encoding='utf-8') for fpath in src_valid_filepaths]
tgt_valid_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-val.txt')
# REF [function] >> translate() in https://github.com/OpenNMT/OpenNMT-py/tree/master/onmt/translate/translator.py.
train_src_data = {'reader': src_reader_obj, 'data': src_train_filepaths, 'dir': src_dir_path}
train_tgt_data = {'reader': tgt_reader_obj, 'data': tgt_train_texts, 'dir': None}
train_readers, train_data, train_dirs = onmt.inputters.Dataset.config([('src', train_src_data), ('tgt', train_tgt_data)])
train_dataset = onmt.inputters.Dataset(
fields=vocab_fields, readers=train_readers, data=train_data, dirs=train_dirs, sort_key=sortkey,
filter_pred=None, corpus_id=None
)
valid_src_data = {'reader': src_reader_obj, 'data': src_valid_filepaths, 'dir': src_dir_path}
valid_tgt_data = {'reader': tgt_reader_obj, 'data': tgt_valid_texts, 'dir': None}
valid_readers, valid_data, valid_dirs = onmt.inputters.Dataset.config([('src', valid_src_data), ('tgt', valid_tgt_data)])
valid_dataset = onmt.inputters.Dataset(
fields=vocab_fields, readers=valid_readers, data=valid_data, dirs=valid_dirs, sort_key=sortkey,
filter_pred=None, corpus_id=None
)
train_iter = onmt.inputters.inputter.OrderedIterator(
dataset=train_dataset,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, train=True, repeat=True,
sort=False, sort_within_batch=True,
yield_raw_example=False
)
#train_iter.create_batches()
valid_iter = onmt.inputters.inputter.OrderedIterator(
dataset=valid_dataset,
batch_size=batch_size, batch_size_multiple=1, batch_size_fn=None, pool_factor=8192,
device=device, train=False, repeat=False,
sort=False, sort_within_batch=True,
yield_raw_example=False
)
#valid_iter.create_batches()
if False:
# Information on inputs.
# Refer to "Information on outputs".
tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token]
tgt_unk = tgt_vocab.stoi[tgt_text_field.unk_token]
tgt_bos = tgt_vocab.stoi[tgt_text_field.init_token]
tgt_eos = tgt_vocab.stoi[tgt_text_field.eos_token]
print('<UNK> = {}, <PAD> = {}, <BOS> = {}, <EOS> = {}.'.format(tgt_unk, tgt_padding, tgt_bos, tgt_eos))
for idx, batch in enumerate(train_iter):
# Source: [B, C, H, W] & [0, 1].
# Target: [T, B, 1]. No one-hot encoding.
print('Source #{}: {}, {}, ({}, {}).'.format(idx, batch.src.shape, batch.src.dtype, torch.min(batch.src), torch.max(batch.src)))
print('Target #{}: {}, {}.'.format(idx, batch.tgt.shape, batch.tgt.dtype))
#print('Target #{}: {}.'.format(idx, batch.tgt.transpose(0, 1).squeeze(dim=-1)))
if idx >= 4: break
#--------------------
# Build a model.
if is_my_model_used:
model, generator = build_my_im2latex_model(image_height, input_channel, num_classes, word_vec_size)
else:
model, generator = build_im2latex_model(input_channel, num_classes, word_vec_size)
#if model: print('Model:\n{}'.format(model))
# TODO [check] >> I don't know why the location where I add a generator should be different.
if is_my_model_used:
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
if is_model_loaded:
model, generator = load_model(model_filepath_to_load, model, generator, device=device)
if not is_my_model_used:
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
if is_trained:
# Specify loss computation module.
loss = onmt.utils.loss.NMTLossCompute(
criterion=torch.nn.NLLLoss(ignore_index=tgt_padding, reduction='sum'),
generator=model.generator
)
# Set up an optimizer.
lr = 1.0
torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr)
optim = onmt.utils.optimizers.Optimizer(torch_optimizer, learning_rate=lr, learning_rate_decay_fn=None, max_grad_norm=2)
#--------------------
# Train.
# Keeping track of the output requires a report manager.
#model_saver = onmt.models.ModelSaver('./data/model_ckpt', model, model_opt, fields=vocab_fields, optim=optim, keep_checkpoint=-1)
model_saver = None
report_manager = onmt.utils.ReportMgr(report_every=50, start_time=None, tensorboard_writer=None)
trainer = onmt.Trainer(
model=model, train_loss=loss, valid_loss=loss, optim=optim,
model_saver=model_saver,
report_manager=report_manager
)
print('Start training...')
start_time = time.time()
total_stats = trainer.train(
train_iter=train_iter, train_steps=train_steps,
valid_iter=valid_iter, valid_steps=valid_steps,
save_checkpoint_steps=save_checkpoint_steps
)
print('End training: {} secs.'.format(time.time() - start_time))
print('Train: Accuracy = {}, Cross entropy = {}, Perplexity = {}.'.format(total_stats.accuracy(), total_stats.xent(), total_stats.ppl()))
print('Start evaluating...')
start_time = time.time()
stats = trainer.validate(valid_iter=valid_iter, moving_average=None)
print('End evaluating: {} secs.'.format(time.time() - start_time))
print('Evaluation: Accuracy = {}, Cross entropy = {}, Perplexity = {}.'.format(stats.accuracy(), stats.xent(), stats.ppl()))
save_model(model_filepath, model, model.generator)
#--------------------
# Load up the translation functions.
scorer = onmt.translate.GNMTGlobalScorer(alpha=0.7, beta=0.0, length_penalty='avg', coverage_penalty='none')
if True:
# Use a customized onmt.translate.Translator._translate_batch_with_strategy().
# Use images as the input to a model.
import os, torchvision, cv2
tgt_unk = tgt_vocab.stoi[tgt_text_field.unk_token]
tgt_bos = tgt_vocab.stoi[tgt_text_field.init_token]
tgt_eos = tgt_vocab.stoi[tgt_text_field.eos_token]
src_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-test.txt')
tgt_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-test.txt')
transform = torchvision.transforms.ToTensor()
src_batches = list()
for fpath in src_filepaths:
img_fpath = os.path.join(preprocessed_data_dir_path, 'images', fpath)
img = cv2.imread(img_fpath, cv2.IMREAD_COLOR)
if img is None:
print('Failed to load an image: {}.'.format(img_fpath))
continue
img = transform(img)
img = torch.unsqueeze(img, dim=0) # Batch: Tensor, [B, C, H, W].
img = img.to(device)
src_batches.append(img)
tgt_batches = list()
for txt in tgt_texts:
tgt_batches.append([txt])
assert len(src_batches) == len(tgt_batches)
is_beam_search_used = True
if is_beam_search_used:
beam_size = 30
n_best = 1
ratio = 0.0
else:
beam_size = 1
random_sampling_topk, random_sampling_temp = 1, 1
n_best = 1 # Fixed. For handling translation results.
min_length, max_length = 0, 100
block_ngram_repeat = 0
#ignore_when_blocking = frozenset()
#exclusion_idxs = {tgt_vocab.stoi[t] for t in ignore_when_blocking}
exclusion_idxs = set()
model.eval()
with torch.no_grad():
for src_batch, tgt_batch in zip(src_batches, tgt_batches):
#batch_size = len(src_batch)
batch_size = 1
if is_beam_search_used:
decode_strategy = create_beam_search_strategy(batch_size, scorer, beam_size, n_best, ratio, min_length, max_length, block_ngram_repeat, tgt_bos, tgt_eos, tgt_padding, exclusion_idxs)
else:
decode_strategy = create_greedy_search_strategy(batch_size, random_sampling_topk, random_sampling_temp, min_length, max_length, block_ngram_repeat, tgt_bos, tgt_eos, tgt_padding, exclusion_idxs)
print('Start translating...')
start_time = time.time()
trans_batch = translate_batch_with_strategy(model, decode_strategy, src_batch, batch_size, beam_size, tgt_unk, tgt_vocab, src_vocabs=[])
print('End translating: {} secs.'.format(time.time() - start_time))
# Information on outputs.
# Refer to "Information on inputs".
#trans_batch['predictions'] # [batch size (list)][#bests (list)][decoded token ID sequence (tensor)]. Each decoded output has <EOS> (not always) but no <SOS>.
#trans_batch['scores'] # [batch size (list)][#bests (list)][scalar (tensor)].
#trans_batch['attention'] # [batch size (list)][#bests (list)][?].
#trans_batch['alignment'] # [batch size (list)][?].
for idx, (gt, pred, score, attn, alignment) in enumerate(zip(tgt_batch, trans_batch['predictions'], trans_batch['scores'], trans_batch['attention'], trans_batch['alignment'])):
print('ID #{}:'.format(idx))
print('\tG/T = {}.'.format(gt))
for rank_id in range(n_best):
try:
print('\tPrediction (rank {}) = {}.'.format(rank_id, ' '.join([tgt_vocab.itos[elem] for elem in pred[0].cpu().numpy() if elem < len(tgt_vocab.itos)])))
except IndexError as ex:
print('\tDecoding error (rank {}): {}.'.format(rank_id, pred[rank_id]))
print('\tScore (rank {}) = {}.'.format(rank_id, score[rank_id].cpu().item()))
#print('\tAttention (rank {}) = {}.'.format(rank_id, attn[rank_id].cpu().numpy()))
#print('\tAlignment (rank {}) = {}.'.format(rank_id, alignment[rank_id].cpu().item())) # Empty.
elif False:
# NOTE [error] >> This is not working when sources are not text.
# onmt.translate.Translator.translate() uses an instance of onmt.translate.TranslationBuilder.
# onmt.translate.TranslationBuilder builds a word-based translation from the batch output of translator and the underlying dictionaries.
# NOTE [info] >> When using input files.
try:
import tempfile
with tempfile.TemporaryFile(mode='w') as fd:
# Decoding strategy:
# Greedy search, if beam_size = 1.
# Beam search, otherwise.
translator = onmt.translate.Translator(
model=model, fields=vocab_fields,
src_reader=src_reader_obj, tgt_reader=tgt_reader_obj,
n_best=1, min_length=0, max_length=100,
beam_size=30, random_sampling_topk=1, random_sampling_temp=1,
data_type=src_data_type,
global_scorer=scorer,
copy_attn=False, report_align=False, report_score=True, out_file=fd,
gpu=gpu
)
src_filepaths = read_lines_from_file(preprocessed_data_dir_path + '/src-test.txt')
src_filepaths = [bytes(fpath, encoding='utf-8') for fpath in src_filepaths]
tgt_texts = read_lines_from_file(preprocessed_data_dir_path + '/tgt-test.txt')
try:
print('Start translating...')
start_time = time.time()
scores, predictions = translator.translate(src=src_filepaths, tgt=None, src_dir=preprocessed_data_dir_path + '/images', batch_size=batch_size, batch_type='tokens', attn_debug=False, align_debug=False, phrase_table='')
#scores, predictions = translator.translate(src=src_filepaths, tgt=tgt_texts, src_dir=preprocessed_data_dir_path + '/images', batch_size=batch_size, batch_type='tokens', attn_debug=False, align_debug=False, phrase_table='')
print('End translating: {} secs.'.format(time.time() - start_time))
for idx, (score, pred, gt) in enumerate(zip(scores, predictions, tgt_texts)):
print('ID #{}:'.format(idx))
print('\tG/T = {}.'.format(gt))
print('\tPrediction = {}.'.format(pred[0]))
print('\tScore = {}.'.format(score[0].cpu().item()))
except (RuntimeError, Exception) as ex:
print('Error: {}.'.format(ex))
except UnicodeDecodeError as ex:
print('Unicode decode error: {}.'.format(ex))
except FileNotFoundError as ex:
print('File not found: {}.'.format(ex))
else:
# Decoding strategy:
# Greedy search, if beam_size = 1.
# Beam search, otherwise.
translator = onmt.translate.Translator(
model=model, fields=vocab_fields,
src_reader=src_reader_obj, tgt_reader=tgt_reader_obj,
n_best=1, min_length=0, max_length=100,
beam_size=30, random_sampling_topk=1, random_sampling_temp=1,
data_type=src_data_type,
global_scorer=scorer,
copy_attn=False, report_align=False, report_score=True,
gpu=gpu
)
for batch in valid_iter:
print('Start translating...')
start_time = time.time()
trans_batch = translator.translate_batch(batch=batch, src_vocabs=[], attn_debug=False)
print('End translating: {} secs.'.format(time.time() - start_time))
#print('\tBatch source = {}.'.format(trans_batch['batch'].src.cpu().numpy()))
#print('\tBatch target = {}.'.format(trans_batch['batch'].tgt.cpu().numpy()))
#print('\tBatch indices = {}.'.format(trans_batch['batch'].indices.cpu().numpy()))
#print('\tBatch corpus ID = {}.'.format(trans_batch['batch'].corpus_id.cpu().numpy()))
for idx, (pred, score, attn, gold_score, alignment) in enumerate(zip(trans_batch['predictions'], trans_batch['scores'], trans_batch['attention'], trans_batch['gold_score'], trans_batch['alignment'])):
print('ID #{}:'.format(idx))
try:
print('\tPrediction = {}.'.format(' '.join([tgt_vocab.itos[elem] for elem in pred[0].cpu().numpy() if elem < len(tgt_vocab.itos)])))
except IndexError as ex:
print('\tDecoding error: {}.'.format(pred[0]))
print('\tScore = {}.'.format(score[0].cpu().item()))
#print('\tAttention = {}.'.format(attn[0].cpu().numpy()))
print('\tGold score = {}.'.format(gold_score.cpu().numpy()))
#print('\tAlignment = {}.'.format(alignment[0].cpu().item()))
#--------------------------------------------------------------------
"""
NMTModel(
(encoder): ImageEncoder(
(layer1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer2): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer3): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer5): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(layer6): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(batch_norm1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(batch_norm2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(batch_norm3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(rnn): LSTM(512, 250, num_layers=2, dropout=0.3, bidirectional=True)
(pos_lut): Embedding(1000, 512)
)
(decoder): InputFeedRNNDecoder(
(embeddings): Embeddings(
(make_embedding): Sequential(
(emb_luts): Elementwise(
(0): Embedding(1798, 80, padding_idx=1)
)
)
)
(dropout): Dropout(p=0.3, inplace=False)
(rnn): StackedLSTM(
(dropout): Dropout(p=0.3, inplace=False)
(layers): ModuleList(
(0): LSTMCell(580, 500)
(1): LSTMCell(500, 500)
)
)
(attn): GlobalAttention(
(linear_in): Linear(in_features=500, out_features=500, bias=False)
(linear_out): Linear(in_features=1000, out_features=500, bias=False)
)
)
(generator): Sequential(
(0): Linear(in_features=500, out_features=1798, bias=True)
(1): Cast()
(2): LogSoftmax()
)
)
"""
def build_submodels(input_channel, num_classes, word_vec_size):
bidirectional_encoder = True
embedding_dropout = 0.3
encoder_num_layers = 2
encoder_rnn_size = 500
encoder_dropout = 0.3
decoder_rnn_type = 'LSTM'
decoder_num_layers = 2
decoder_hidden_size = encoder_rnn_size
decoder_dropout = 0.3
src_embeddings = None
tgt_embeddings = onmt.modules.Embeddings(
word_vec_size=word_vec_size,
word_vocab_size=num_classes,
word_padding_idx=1,
position_encoding=False,
feat_merge='concat',
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=embedding_dropout,
sparse=False,
fix_word_vecs=False
)
encoder = onmt.encoders.ImageEncoder(
num_layers=encoder_num_layers, bidirectional=bidirectional_encoder,
rnn_size=encoder_rnn_size, dropout=encoder_dropout, image_chanel_size=input_channel
)
decoder = onmt.decoders.InputFeedRNNDecoder(
rnn_type=decoder_rnn_type, bidirectional_encoder=bidirectional_encoder,
num_layers=decoder_num_layers, hidden_size=decoder_hidden_size,
attn_type='general', attn_func='softmax',
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=decoder_dropout, embeddings=tgt_embeddings,
reuse_copy_attn=False, copy_attn_type='general'
)
generator = torch.nn.Sequential(
torch.nn.Linear(in_features=decoder_hidden_size, out_features=num_classes, bias=True),
onmt.modules.util_class.Cast(dtype=torch.float32),
torch.nn.LogSoftmax(dim=-1)
)
return encoder, decoder, generator
class MyModel(torch.nn.Module):
def __init__(self, encoder, decoder, generator=None):
super().__init__()
self.encoder, self.decoder, self._generator = encoder, decoder, generator
# REF [function] >> NMTModel.forward() in https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/models/model.py
def forward(self, src, tgt, lengths, bptt=False, with_align=False):
# TODO [check] >> This function is not tested.
enc_state, memory_bank, lengths = self.encoder(src, lengths=lengths)
if bptt is False:
self.decoder.init_state(src, memory_bank, enc_state)
dec_in = tgt[:-1] # Exclude last target from inputs.
dec_outs, attns = self.decoder(dec_in, memory_bank, memory_lengths=lengths, with_align=with_align)
if self._generator: dec_outs = self._generator(dec_outs)
return dec_outs, attns
# REF [site] >> https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/model_builder.py
def build_my_simple_model(use_NMTModel, input_channel, num_classes, word_vec_size):
encoder, decoder, generator = build_submodels(input_channel, num_classes, word_vec_size)
if use_NMTModel:
model = onmt.models.NMTModel(encoder, decoder)
else:
model = MyModel(encoder, decoder, generator=None)
return model, generator
def simple_example():
use_NMTModel = False
input_channel = 3
num_classes = 1798
word_vec_size = 80
batch_size = 64
max_time_steps = 10
gpu = 0
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
print('Device: {}.'.format(device))
#--------------------
# Build a model.
model, generator = build_my_simple_model(use_NMTModel, input_channel, num_classes, word_vec_size)
#if model: print('Model:\n{}'.format(model))
# NOTE [info] >> The generator is not called. So It has to be called explicitly.
#model.generator = generator
model.add_module('generator', generator)
model = model.to(device)
model.generator = model.generator.to(device)
#--------------------
# For checking.
if False:
# Information on inputs.
inputs = torch.rand(batch_size, input_channel, 300, 300) # [B, C, H, W]. [0, 1].
outputs = torch.randint(num_classes, (max_time_steps, batch_size, 1)) # [T, B, 1]. No one-hot encoding.
output_lens = torch.randint(1, max_time_steps + 1, (batch_size,)) # [B].
with torch.no_grad():
# Information on outputs.
model_outputs, attentions = model(inputs.to(device), outputs.to(device), output_lens.to(device)) # [T-1, B, hidden size] & [T-1, B, ???].
model_outputs = model.generator(model_outputs) # [T-1, B, #classes].
print('Source: {}, {}, ({}, {}).'.format(inputs.shape, inputs.dtype, torch.min(inputs), torch.max(inputs)))
print('Target: {}, {}.'.format(outputs.shape, outputs.dtype))
print('Model output: {}, {}.'.format(model_outputs.shape, model_outputs.dtype))
#model_outputs = model_outputs.transpose(0, 1) # [T-1, B, #classes] -> [B, T-1, #classes] where T-1 is for one-step look-ahead.
#_, model_outputs = torch.max(model_outputs, dim=-1)
model_outputs = model_outputs.cpu().numpy()
attentions = attentions['std'].cpu().numpy()
#attentions = attentions['copy'].cpu().numpy() # If copy_attn = True.
#attentions = attentions['coverage'].cpu().numpy() # If coverage_attn = True.
print("Model outputs' shape = {}.".format(model_outputs.shape))
print("Attentions' shape = {}.".format(attentions.shape))
#--------------------
# Train and evaluate.
#--------------------
# Infer.
# FIXME [implement] >> How to infer?
def main():
#preprocess_test() # Not yet completed.
#train_test() # Not yet completed.
#translate_test() # Not yet completed.
#server_test() # Not yet implemented.
#--------------------
#library_example()
im2latex_example()
#simple_example() # Not yet completed.
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
gpl-2.0
| -8,894,726,451,195,124,000 | -6,019,352,359,199,642,000 | 47.663098 | 544 | 0.694049 | false |
erkike/daw
|
angular/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py
|
960
|
45344
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
elif sdk_version > 0:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
self.WriteLn('LOCAL_CXX_STL := libc++_static')
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
sdk_version = generator_flags.get('aosp_sdk_version', 0)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = qualified_target in needed_targets
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets,
sdk_version=sdk_version)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?= first\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
|
mit
| 429,361,578,222,620,600 | 7,282,210,184,580,835,000 | 40.410046 | 80 | 0.640371 | false |
xtr4nge/FruityProxy
|
plugins/plugin.py
|
1
|
1262
|
#!/usr/bin/env python
# Copyright (C) 2015-2016 xtr4nge [_AT_] gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
try:
from mitmproxy import controller, proxy # mitmproxy 0.17
from mitmproxy.proxy.server import ProxyServer # mitmproxy 0.17
except:
from libmproxy import controller, proxy # mitmproxy 0.15
from libmproxy.proxy.server import ProxyServer # mitmproxy 0.15
import logging
from configobj import ConfigObj
class Plugin(object):
name = "Plugin"
version = "1.0"
config = ConfigObj("fruityproxy.conf")
def request(self, flow):
pass
def response(self, flow):
pass
|
lgpl-3.0
| -1,350,723,407,429,639,400 | 1,398,769,543,895,168,000 | 29.047619 | 71 | 0.715531 | false |
achals/servo
|
tests/wpt/css-tests/css21_dev/xhtml1print/reference/support/fonts/makegsubfonts.py
|
1616
|
14125
|
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
|
mpl-2.0
| -733,030,620,572,698,800 | -7,216,497,582,265,854,000 | 28.063786 | 135 | 0.649628 | false |
byterom/android_external_chromium_org
|
third_party/protobuf/python/google/protobuf/internal/type_checkers.py
|
527
|
12163
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = '[email protected] (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
|
bsd-3-clause
| 1,794,764,426,537,833,200 | 5,133,878,453,156,557,000 | 41.527972 | 80 | 0.748582 | false |
Luffin/powerline
|
powerline/renderers/vim.py
|
32
|
5785
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import vim
from powerline.bindings.vim import vim_get_func, vim_getoption, environ, current_tabpage, get_vim_encoding
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from powerline.theme import Theme
from powerline.lib.unicode import unichr, register_strwidth_error
vim_mode = vim_get_func('mode', rettype='unicode')
if int(vim.eval('v:version')) >= 702:
_vim_mode = vim_mode
vim_mode = lambda: _vim_mode(1)
mode_translations = {
unichr(ord('V') - 0x40): '^V',
unichr(ord('S') - 0x40): '^S',
}
class VimRenderer(Renderer):
'''Powerline vim segment renderer.'''
character_translations = Renderer.character_translations.copy()
character_translations[ord('%')] = '%%'
segment_info = Renderer.segment_info.copy()
segment_info.update(environ=environ)
def __init__(self, *args, **kwargs):
if not hasattr(vim, 'strwidth'):
# Hope nobody want to change this at runtime
if vim.eval('&ambiwidth') == 'double':
kwargs = dict(**kwargs)
kwargs['ambigious'] = 2
super(VimRenderer, self).__init__(*args, **kwargs)
self.hl_groups = {}
self.prev_highlight = None
self.strwidth_error_name = register_strwidth_error(self.strwidth)
self.encoding = get_vim_encoding()
def shutdown(self):
self.theme.shutdown()
for match in self.local_themes.values():
if 'theme' in match:
match['theme'].shutdown()
def add_local_theme(self, matcher, theme):
if matcher in self.local_themes:
raise KeyError('There is already a local theme with given matcher')
self.local_themes[matcher] = theme
def get_matched_theme(self, match):
try:
return match['theme']
except KeyError:
match['theme'] = Theme(theme_config=match['config'], main_theme_config=self.theme_config, **self.theme_kwargs)
return match['theme']
def get_theme(self, matcher_info):
if matcher_info is None:
return self.get_matched_theme(self.local_themes[None])
for matcher in self.local_themes.keys():
if matcher and matcher(matcher_info):
return self.get_matched_theme(self.local_themes[matcher])
else:
return self.theme
if hasattr(vim, 'strwidth'):
if sys.version_info < (3,):
def strwidth(self, string):
# Does not work with tabs, but neither is strwidth from default
# renderer
return vim.strwidth(string.encode(self.encoding, 'replace'))
else:
@staticmethod
def strwidth(string):
return vim.strwidth(string)
def get_segment_info(self, segment_info, mode):
return segment_info or self.segment_info
def render(self, window=None, window_id=None, winnr=None, is_tabline=False):
'''Render all segments.'''
segment_info = self.segment_info.copy()
if window is vim.current.window:
mode = vim_mode()
mode = mode_translations.get(mode, mode)
else:
mode = 'nc'
segment_info.update(
window=window,
mode=mode,
window_id=window_id,
winnr=winnr,
buffer=window.buffer,
tabpage=current_tabpage(),
encoding=self.encoding,
)
segment_info['tabnr'] = segment_info['tabpage'].number
segment_info['bufnr'] = segment_info['buffer'].number
if is_tabline:
winwidth = int(vim_getoption('columns'))
else:
winwidth = segment_info['window'].width
statusline = super(VimRenderer, self).render(
mode=mode,
width=winwidth,
segment_info=segment_info,
matcher_info=(None if is_tabline else segment_info),
)
statusline = statusline.encode(self.encoding, self.strwidth_error_name)
return statusline
def reset_highlight(self):
self.hl_groups.clear()
def hlstyle(self, fg=None, bg=None, attrs=None):
'''Highlight a segment.
If an argument is None, the argument is ignored. If an argument is
False, the argument is reset to the terminal defaults. If an argument
is a valid color or attribute, it’s added to the vim highlight group.
'''
# In order not to hit E541 two consequent identical highlighting
# specifiers may be squashed into one.
attrs = attrs or 0 # Normalize `attrs`
if (fg, bg, attrs) == self.prev_highlight:
return ''
self.prev_highlight = (fg, bg, attrs)
# We don’t need to explicitly reset attributes in vim, so skip those
# calls
if not attrs and not bg and not fg:
return ''
if not (fg, bg, attrs) in self.hl_groups:
hl_group = {
'ctermfg': 'NONE',
'guifg': None,
'ctermbg': 'NONE',
'guibg': None,
'attrs': ['NONE'],
'name': '',
}
if fg is not None and fg is not False:
hl_group['ctermfg'] = fg[0]
hl_group['guifg'] = fg[1]
if bg is not None and bg is not False:
hl_group['ctermbg'] = bg[0]
hl_group['guibg'] = bg[1]
if attrs:
hl_group['attrs'] = []
if attrs & ATTR_BOLD:
hl_group['attrs'].append('bold')
if attrs & ATTR_ITALIC:
hl_group['attrs'].append('italic')
if attrs & ATTR_UNDERLINE:
hl_group['attrs'].append('underline')
hl_group['name'] = (
'Pl_'
+ str(hl_group['ctermfg']) + '_'
+ str(hl_group['guifg']) + '_'
+ str(hl_group['ctermbg']) + '_'
+ str(hl_group['guibg']) + '_'
+ ''.join(hl_group['attrs'])
)
self.hl_groups[(fg, bg, attrs)] = hl_group
vim.command('hi {group} ctermfg={ctermfg} guifg={guifg} guibg={guibg} ctermbg={ctermbg} cterm={attrs} gui={attrs}'.format(
group=hl_group['name'],
ctermfg=hl_group['ctermfg'],
guifg='#{0:06x}'.format(hl_group['guifg']) if hl_group['guifg'] is not None else 'NONE',
ctermbg=hl_group['ctermbg'],
guibg='#{0:06x}'.format(hl_group['guibg']) if hl_group['guibg'] is not None else 'NONE',
attrs=','.join(hl_group['attrs']),
))
return '%#' + self.hl_groups[(fg, bg, attrs)]['name'] + '#'
renderer = VimRenderer
|
mit
| -7,111,060,791,916,219,000 | 5,726,889,300,900,281,000 | 29.75 | 125 | 0.667878 | false |
2014c2g2/teamwork
|
exts/w2/static/Brython2.0.0-20140209-164925/Lib/browser/indexed_db.py
|
100
|
2966
|
class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
cursor.continue()
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
|
gpl-2.0
| -5,192,170,604,680,645,000 | -1,556,735,105,089,239,800 | 26.981132 | 76 | 0.639582 | false |
phlax/translate
|
translate/convert/po2prop.py
|
3
|
11398
|
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Gettext PO localization files to Java/Mozilla .properties files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html
for examples and usage instructions.
"""
import six
import warnings
from translate.convert import accesskey, convert
from translate.misc import quote
from translate.storage import po, properties
eol = u"\n"
def applytranslation(key, propunit, inunit, mixedkeys):
"""applies the translation for key in the po unit to the prop unit"""
# this converts the po-style string to a prop-style string
value = inunit.target
# handle mixed keys
for labelsuffix in properties.labelsuffixes:
if key.endswith(labelsuffix):
if key in mixedkeys:
value, akey = accesskey.extract(value)
break
else:
for akeysuffix in properties.accesskeysuffixes:
if key.endswith(akeysuffix):
if key in mixedkeys:
label, value = accesskey.extract(value)
if not value:
warnings.warn("Could not find accesskey for %s" % key)
# Use the source language accesskey
label, value = accesskey.extract(inunit.source)
else:
original = propunit.source
# For the sake of diffs we keep the case of the
# accesskey the same if we know the translation didn't
# change. Casing matters in XUL.
if value == propunit.source and original.lower() == value.lower():
if original.isupper():
value = value.upper()
elif original.islower():
value = value.lower()
return value
class reprop(object):
def __init__(self, templatefile, inputstore, personality, encoding=None,
remove_untranslated=False):
self.templatefile = templatefile
self.inputstore = inputstore
self.personality = properties.get_dialect(personality)
self.encoding = encoding
if self.encoding is None:
self.encoding = self.personality.default_encoding
self.remove_untranslated = remove_untranslated
self.mixer = accesskey.UnitMixer(properties.labelsuffixes,
properties.accesskeysuffixes)
def convertstore(self, includefuzzy=False):
self.includefuzzy = includefuzzy
self.inmultilinemsgid = False
self.inecho = False
self.inputstore.makeindex()
if self.personality.name == "gaia":
self._explode_gaia_plurals()
outputlines = []
# Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead
content = self.templatefile.read().decode(self.encoding)
for line in content.splitlines(True):
outputstr = self.convertline(line)
outputlines.append(outputstr)
return u"".join(outputlines).encode(self.encoding)
def _handle_accesskeys(self, inunit, currkey):
value = inunit.target
if self.personality.name == "mozilla":
keys = inunit.getlocations()
mixedkeys = self.mixer.match_entities(keys)
for key in keys:
if key == currkey and key in self.inputstore.locationindex:
propunit = self.inputstore.locationindex[key] # find the prop
value = applytranslation(key, propunit, inunit, mixedkeys)
break
return value
def _explode_gaia_plurals(self):
"""Explode the gaia plurals."""
from translate.lang import data
for unit in self.inputstore.units:
if not unit.hasplural():
continue
if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():
continue
names = data.cldr_plural_categories
location = unit.getlocations()[0]
for category, text in zip(names, unit.target.strings):
# TODO: for now we assume all forms are present. We need to
# fill in the rest after mapping things to the proper CLDR names.
if category == 'zero':
# [zero] cases are translated as separate units
continue
new_unit = self.inputstore.addsourceunit(u"fish") # not used
new_location = '%s[%s]' % (location, category)
new_unit.addlocation(new_location)
new_unit.target = text
self.inputstore.locationindex[new_location] = new_unit
# We don't want the plural marker to be translated:
del self.inputstore.locationindex[location]
def convertline(self, line):
returnline = u""
# handle multiline msgid if we're in one
if self.inmultilinemsgid:
msgid = quote.rstripeol(line).strip()
# see if there's more
self.inmultilinemsgid = (msgid[-1:] == '\\')
# if we're echoing...
if self.inecho:
returnline = line
# otherwise, this could be a comment
elif line.strip()[:1] == '#':
returnline = quote.rstripeol(line) + eol
else:
line = quote.rstripeol(line)
delimiter_char, delimiter_pos = self.personality.find_delimiter(line)
if quote.rstripeol(line)[-1:] == '\\':
self.inmultilinemsgid = True
if delimiter_pos == -1:
key = self.personality.key_strip(line)
delimiter = " %s " % self.personality.delimiters[0]
else:
key = self.personality.key_strip(line[:delimiter_pos])
# Calculate space around the equal sign
prespace = line[line.find(' ', len(key)):delimiter_pos]
postspacestart = len(line[delimiter_pos+1:])
postspaceend = len(line[delimiter_pos+1:].lstrip())
postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]
delimiter = prespace + delimiter_char + postspace
if key in self.inputstore.locationindex:
unit = self.inputstore.locationindex[key]
if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:
returnline = u""
self.inecho = False
else:
if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:
value = unit.source
else:
value = self._handle_accesskeys(unit, key)
self.inecho = False
assert isinstance(value, six.text_type)
returnline = "%(key)s%(del)s%(value)s%(term)s%(eol)s" % {
"key": "%s%s%s" % (self.personality.key_wrap_char,
key,
self.personality.key_wrap_char),
"del": delimiter,
"value": "%s%s%s" % (self.personality.value_wrap_char,
self.personality.encode(value),
self.personality.value_wrap_char),
"term": self.personality.pair_terminator,
"eol": eol,
}
else:
self.inecho = True
returnline = line + eol
assert isinstance(returnline, six.text_type)
return returnline
def convertstrings(inputfile, outputfile, templatefile, personality="strings",
includefuzzy=False, encoding=None, outputthreshold=None,
remove_untranslated=False):
""".strings specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="strings", includefuzzy=includefuzzy,
encoding=encoding, outputthreshold=outputthreshold,
remove_untranslated=remove_untranslated)
def convertmozillaprop(inputfile, outputfile, templatefile,
includefuzzy=False, remove_untranslated=False,
outputthreshold=None):
"""Mozilla specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="mozilla", includefuzzy=includefuzzy,
remove_untranslated=remove_untranslated,
outputthreshold=outputthreshold)
def convertprop(inputfile, outputfile, templatefile, personality="java",
includefuzzy=False, encoding=None, remove_untranslated=False,
outputthreshold=None):
inputstore = po.pofile(inputfile)
if not convert.should_output_store(inputstore, outputthreshold):
return False
if templatefile is None:
raise ValueError("must have template file for properties files")
# convertor = po2prop()
else:
convertor = reprop(templatefile, inputstore, personality, encoding,
remove_untranslated)
outputprop = convertor.convertstore(includefuzzy)
outputfile.write(outputprop)
return True
formats = {
("po", "properties"): ("properties", convertprop),
("po", "lang"): ("lang", convertprop),
("po", "strings"): ("strings", convertstrings),
}
def main(argv=None):
# handle command line options
parser = convert.ConvertOptionParser(formats, usetemplates=True,
description=__doc__)
parser.add_option(
"", "--personality", dest="personality",
default=properties.default_dialect, type="choice",
choices=list(properties.dialects.keys()),
help="override the input file format: %s (for .properties files, default: %s)" % (
", ".join(six.iterkeys(properties.dialects)), properties.default_dialect),
metavar="TYPE")
parser.add_option(
"", "--encoding", dest="encoding", default=None,
help="override the encoding set by the personality",
metavar="ENCODING")
parser.add_threshold_option()
parser.add_fuzzy_option()
parser.add_remove_untranslated_option()
parser.passthrough.append("personality")
parser.passthrough.append("encoding")
parser.run(argv)
if __name__ == '__main__':
main()
|
gpl-2.0
| 6,680,087,866,919,553,000 | 5,275,604,445,164,701,000 | 41.849624 | 110 | 0.585103 | false |
mordred-descriptor/mordred
|
mordred/_util.py
|
1
|
2230
|
from __future__ import print_function
import os
import sys
import numpy as np
def parse_enum(enum, v):
if isinstance(v, enum):
return v
else:
return enum[v]
def atoms_to_numpy(f, mol, dtype="float"):
return np.fromiter((f(a) for a in mol.GetAtoms()), dtype, mol.GetNumAtoms())
def conformer_to_numpy(conf):
return np.array([list(conf.GetAtomPosition(i)) for i in range(conf.GetNumAtoms())])
class Capture(object):
def __init__(self, target="stderr"):
self.target = target
self.orig = getattr(sys, target)
self.result = []
def write(self, text):
self.result.append(text)
def flush(self):
pass
def __enter__(self):
setattr(sys, self.target, self)
return self
def __exit__(self, *args):
setattr(sys, self.target, self.orig)
class DummyBar(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, *args, **kws):
pass
def update(self, *args, **kws):
pass
@classmethod
def write(cls, s, file=sys.stdout, end="\n"):
print(s, file=file, end=end) # noqa: T003
class NotebookWrapper(object):
def __init__(self, **kwargs):
from tqdm import tqdm_notebook
self.bar = tqdm_notebook(**kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
pass
def update(self, *args, **kwargs):
self.bar.update(*args, **kwargs)
def write(self, *args, **kwargs):
self.bar.update(*args, **kwargs)
def PathType(string):
if not os.path.isfile(string):
raise ValueError("file not exists: {}".format(string))
return string
def module_prog(pkg):
return "{} -m {}".format(os.path.basename(sys.executable), pkg)
def to_ordinal(n):
r"""Int to ordinal string.
>>> to_ordinal(1)
'first'
>>> to_ordinal(2)
'second'
>>> to_ordinal(3)
'third'
>>> to_ordinal(4)
'4-th'
>>> to_ordinal(104)
'104-th'
"""
if n == 1:
return "first"
elif n == 2:
return "second"
elif n == 3:
return "third"
else:
return "{}-th".format(n)
|
bsd-3-clause
| -8,487,181,237,958,162,000 | -1,738,550,681,758,508,000 | 18.561404 | 87 | 0.561435 | false |
thinkopensolutions/geraldo
|
site/newsite/django_1_0/django/utils/datastructures.py
|
10
|
13100
|
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def items(self):
item_list = []
for dict_ in self.dicts:
item_list.extend(dict_.items())
return item_list
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __init__(self, data=None):
if data is None:
data = {}
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(SortedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(SortedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(SortedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(SortedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError, "Key %r not found in %r" % (key, self)
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__(super(MultiValueDict, self).items())
def __deepcopy__(self, memo=None):
import copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def copy(self):
"""Returns a copy of this object."""
return self.__deepcopy__()
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError, "update expected at most 1 arguments, got %d" % len(args)
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError, "MultiValueDict.update() takes either a MultiValueDict or dictionary"
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError, self.warning
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
|
lgpl-3.0
| 8,426,823,669,840,034,000 | -352,138,381,262,204,800 | 31.107843 | 131 | 0.55458 | false |
mixturemodel-flow/tensorflow
|
tensorflow/tools/compatibility/ast_edits.py
|
47
|
18961
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `function_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_handle`: maps function names to custom handlers for the function
For an example, see `TFAPIChangeSpec`.
"""
class _FileEditTuple(collections.namedtuple(
"_FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a _FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class _FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
_FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class _ASTCallVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, filename, lines, api_change_spec):
self._filename = filename
self._file_edit = _FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = api_change_spec
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name:
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if argval_lineno is not None and argval_col_offset is not None:
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if (self._lines[argval_lineno - 1][key_start:key_end] ==
argkey + "="):
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name:
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
|
apache-2.0
| 4,042,845,686,375,823,400 | 4,247,991,545,239,019,500 | 37.150905 | 80 | 0.62349 | false |
bcoca/ansible
|
test/units/galaxy/test_collection_install.py
|
15
|
43234
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api, dependency_resolution
from ansible.galaxy.dependency_resolution.dataclasses import Candidate, Requirement
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
class RequirementCandidates():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file `.*` at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir')])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Requirement('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy')
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
assert os.path.isdir(collection_path)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 1
assert display_msgs[0] == 'Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`.'
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
|
gpl-3.0
| -2,369,160,147,788,881,400 | -3,717,081,350,317,333,500 | 45.86413 | 155 | 0.692914 | false |
pleaseproject/python-for-android
|
python-modules/twisted/twisted/internet/_signals.py
|
57
|
6075
|
# -*- test-case-name: twisted.test.test_process,twisted.internet.test.test_process -*-
# Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides a uniform interface to the several mechanisms which are
possibly available for dealing with signals.
This module is used to integrate child process termination into a
reactor event loop. This is a challenging feature to provide because
most platforms indicate process termination via SIGCHLD and do not
provide a way to wait for that signal and arbitrary I/O events at the
same time. The naive implementation involves installing a Python
SIGCHLD handler; unfortunately this leads to other syscalls being
interrupted (whenever SIGCHLD is received) and failing with EINTR
(which almost no one is prepared to handle). This interruption can be
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
however, if the SIGCHLD is delivered by the platform to a non-main
thread (not a common occurrence, but difficult to prove impossible),
the main thread (waiting on select() or another event notification
API) may not wake up leading to an arbitrary delay before the child
termination is noticed.
The basic solution to all these issues involves enabling SA_RESTART
(ie, disabling system call interruption) and registering a C signal
handler which writes a byte to a pipe. The other end of the pipe is
registered with the event loop, allowing it to wake up shortly after
SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
for the implementation of the event loop side of this solution. The
use of a pipe this way is known as the U{self-pipe
trick<http://cr.yp.to/docs/selfpipe.html>}.
The actual solution implemented in this module depends on the version
of Python. From version 2.6, C{signal.siginterrupt} and
C{signal.set_wakeup_fd} allow the necessary C signal handler which
writes to the pipe to be registered with C{SA_RESTART}. Prior to 2.6,
the L{twisted.internet._sigchld} extension module provides similar
functionality.
If neither of these is available, a Python signal handler is used
instead. This is essentially the naive solution mentioned above and
has the problems described there.
"""
import os
try:
from signal import set_wakeup_fd, siginterrupt
except ImportError:
set_wakeup_fd = siginterrupt = None
try:
import signal
except ImportError:
signal = None
from twisted.python.log import msg
try:
from twisted.internet._sigchld import installHandler as _extInstallHandler, \
isDefaultHandler as _extIsDefaultHandler
except ImportError:
_extInstallHandler = _extIsDefaultHandler = None
class _Handler(object):
"""
L{_Handler} is a signal handler which writes a byte to a file descriptor
whenever it is invoked.
@ivar fd: The file descriptor to which to write. If this is C{None},
nothing will be written.
"""
def __init__(self, fd):
self.fd = fd
def __call__(self, *args):
"""
L{_Handler.__call__} is the signal handler. It will write a byte to
the wrapped file descriptor, if there is one.
"""
if self.fd is not None:
try:
os.write(self.fd, '\0')
except:
pass
def _installHandlerUsingSignal(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by creating an instance of L{_Handler} with C{fd}
and installing it as the signal handler.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
previous = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
previous = signal.signal(signal.SIGCHLD, _Handler(fd))
if isinstance(previous, _Handler):
return previous.fd
return -1
def _installHandlerUsingSetWakeup(fd):
"""
Install a signal handler which will write a byte to C{fd} when
I{SIGCHLD} is received.
This is implemented by installing an instance of L{_Handler} wrapped
around C{None}, setting the I{SIGCHLD} handler as not allowed to
interrupt system calls, and using L{signal.set_wakeup_fd} to do the
actual writing.
@param fd: The file descriptor to which to write when I{SIGCHLD} is
received.
@type fd: C{int}
"""
if fd == -1:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
signal.signal(signal.SIGCHLD, _Handler(None))
siginterrupt(signal.SIGCHLD, False)
return set_wakeup_fd(fd)
def _isDefaultHandler():
"""
Determine whether the I{SIGCHLD} handler is the default or not.
"""
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
def _cannotInstallHandler(fd):
"""
Fail to install a signal handler for I{SIGCHLD}.
This implementation is used when the supporting code for the other
implementations is unavailable (on Python versions 2.5 and older where
neither the L{twisted.internet._sigchld} extension nor the standard
L{signal} module is available).
@param fd: Ignored; only for compatibility with the other
implementations of this interface.
@raise RuntimeError: Always raised to indicate no I{SIGCHLD} handler can
be installed.
"""
raise RuntimeError("Cannot install a SIGCHLD handler")
def _cannotDetermineDefault():
raise RuntimeError("No usable signal API available")
if set_wakeup_fd is not None:
msg('using set_wakeup_fd')
installHandler = _installHandlerUsingSetWakeup
isDefaultHandler = _isDefaultHandler
elif _extInstallHandler is not None:
msg('using _sigchld')
installHandler = _extInstallHandler
isDefaultHandler = _extIsDefaultHandler
elif signal is not None:
msg('using signal module')
installHandler = _installHandlerUsingSignal
isDefaultHandler = _isDefaultHandler
else:
msg('nothing unavailable')
installHandler = _cannotInstallHandler
isDefaultHandler = _cannotDetermineDefault
|
apache-2.0
| 3,705,167,962,739,671,000 | -765,294,046,166,755,600 | 32.016304 | 86 | 0.719506 | false |
Akasurde/ansible
|
test/support/integration/plugins/module_utils/docker/swarm.py
|
61
|
10842
|
# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <[email protected]>
# (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from time import sleep
try:
from docker.errors import APIError, NotFound
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils._text import to_native
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
LooseVersion,
)
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def __init__(self, **kwargs):
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
def get_swarm_node_id(self):
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return swarm_info['Swarm']['NodeID']
return None
def check_if_swarm_node(self, node_id=None):
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return True
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
return True
return False
else:
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return
if node_info['ID'] is not None:
return True
return False
def check_if_swarm_manager(self):
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self):
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail("Error running docker swarm module: must run on swarm manager node")
def check_if_swarm_worker(self):
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
return True
return False
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
if repeat_check < 1:
repeat_check = 1
if node_id is None:
node_id = self.get_swarm_node_id()
for retry in range(0, repeat_check):
if retry > 0:
sleep(5)
node_info = self.get_node_inspect(node_id=node_id)
if node_info['Status']['State'] == 'down':
return True
return False
def get_node_inspect(self, node_id=None, skip_missing=False):
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
if exc.status_code == 404:
if skip_missing:
return None
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
if 'ManagerStatus' in node_info:
if node_info['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
count_colons = node_info['ManagerStatus']['Addr'].count(":")
if count_colons == 1:
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
else:
swarm_leader_ip = node_info['Status']['Addr']
node_info['Status']['Addr'] = swarm_leader_ip
return node_info
def get_all_nodes_inspect(self):
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_list(self, output='short'):
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_list = []
nodes_inspect = self.get_all_nodes_inspect()
if nodes_inspect is None:
return None
if output == 'short':
for node in nodes_inspect:
nodes_list.append(node['Description']['Hostname'])
elif output == 'long':
for node in nodes_inspect:
node_property = {}
node_property.update({'ID': node['ID']})
node_property.update({'Hostname': node['Description']['Hostname']})
node_property.update({'Status': node['Status']['State']})
node_property.update({'Availability': node['Spec']['Availability']})
if 'ManagerStatus' in node:
if node['ManagerStatus']['Leader'] is True:
node_property.update({'Leader': True})
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
nodes_list.append(node_property)
else:
return None
return nodes_list
def get_node_name_by_id(self, nodeid):
return self.get_node_inspect(nodeid)['Description']['Hostname']
def get_unlock_key(self):
if self.docker_py_version < LooseVersion('2.7.0'):
return None
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
def get_service_inspect(self, service_id, skip_missing=False):
"""
Returns Swarm service info as in 'docker service inspect' command about single service
:param service_id: service ID or name
:param skip_missing: if True then function will return None instead of failing the task
:return:
Single service information structure
"""
try:
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
self.fail("Error inspecting swarm service: %s" % exc)
except Exception as exc:
self.fail("Error inspecting swarm service: %s" % exc)
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)
return service_info
|
gpl-3.0
| -5,740,601,000,794,393,000 | -7,996,379,120,386,215,000 | 37.721429 | 120 | 0.589836 | false |
shakamunyi/nova
|
nova/tests/functional/v3/test_fping.py
|
19
|
1663
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.plugins.v3 import fping
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.api.openstack.compute.contrib import test_fping
from nova import utils
class FpingSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-fping"
def setUp(self):
super(FpingSampleJsonTests, self).setUp()
def fake_check_fping(self):
pass
self.stubs.Set(utils, "execute", test_fping.execute)
self.stubs.Set(fping.FpingController, "check_fping",
fake_check_fping)
def test_get_fping(self):
self._post_server()
response = self._do_get('os-fping')
subs = self._get_regexes()
self._verify_response('fping-get-resp', subs, response, 200)
def test_get_fping_details(self):
uuid = self._post_server()
response = self._do_get('os-fping/%s' % (uuid))
subs = self._get_regexes()
self._verify_response('fping-get-details-resp', subs, response, 200)
|
apache-2.0
| 4,930,515,483,966,082,000 | 2,647,123,243,731,557,000 | 35.955556 | 78 | 0.677691 | false |
mozilla/zamboni
|
sites/stage/settings.py
|
4
|
9237
|
from mkt.settings import * # noqa
import logging
import environ
environ.Env.read_env(env_file='/etc/zamboni/settings.env')
env = environ.Env()
ENV = env('ENV')
DOMAIN = env('DOMAIN')
SITE_URL = 'https://{0}'.format(DOMAIN)
CRONJOB_LOCK_PREFIX = DOMAIN
BROWSERID_AUDIENCES = [SITE_URL]
STATIC_URL = env('STATIC_URL')
LOCAL_MIRROR_URL = '%s_files' % STATIC_URL
ADMINS = ()
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS')
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
EMAIL_URL = env.email_url('EMAIL_URL')
EMAIL_HOST = EMAIL_URL['EMAIL_HOST']
EMAIL_PORT = EMAIL_URL['EMAIL_PORT']
EMAIL_BACKEND = EMAIL_URL['EMAIL_BACKEND']
EMAIL_HOST_USER = EMAIL_URL['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = EMAIL_URL['EMAIL_HOST_PASSWORD']
ENGAGE_ROBOTS = False
SERVER_EMAIL = env('SERVER_EMAIL')
SESSION_COOKIE_SECURE = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {}
DATABASES['default'] = env.db('DATABASES_DEFAULT_URL')
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
DATABASES['slave'] = env.db('DATABASES_SLAVE_URL')
DATABASES['slave']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
DATABASES['slave']['ATOMIC_REQUESTS'] = True
DATABASES['slave']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
SERVICES_DATABASE = env.db('SERVICES_DATABASE_URL')
SLAVE_DATABASES = ['slave']
CACHE_PREFIX = 'mkt.%s' % ENV
CACHES = {}
CACHES['default'] = env.cache('CACHES_DEFAULT')
CACHES['default']['TIMEOUT'] = 500
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
SECRET_KEY = env('SECRET_KEY')
# Celery
BROKER_URL = env('BROKER_URL')
CELERY_ALWAYS_EAGER = False
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = env('NETAPP_STORAGE_ROOT') + '/shared_storage'
GUARDED_ADDONS_PATH = env('NETAPP_STORAGE_ROOT') + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
EXTENSION_ICONS_PATH = UPLOADS_PATH + '/extension_icons'
WEBSITE_ICONS_PATH = UPLOADS_PATH + '/website_icons'
FEATURED_APP_BG_PATH = UPLOADS_PATH + '/featured_app_background'
FEED_COLLECTION_BG_PATH = UPLOADS_PATH + '/feed_collection_background'
FEED_SHELF_BG_PATH = UPLOADS_PATH + '/feed_shelf_background'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
WEBAPP_PROMO_IMG_PATH = UPLOADS_PATH + '/webapp_promo_imgs'
WEBSITE_PROMO_IMG_PATH = UPLOADS_PATH + '/website_promo_imgs'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
EXTENSIONS_PATH = NETAPP_STORAGE + '/extensions'
SIGNED_EXTENSIONS_PATH = NETAPP_STORAGE + '/signed-extensions'
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
ADDONS_PATH = env('NETAPP_STORAGE_ROOT') + '/files'
SPIDERMONKEY = '/usr/bin/tracemonkey'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = env('RESPONSYS_ID')
ES_DEFAULT_NUM_REPLICAS = 2
ES_HOSTS = env('ES_HOSTS')
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_%s' % (v, ENV)) for k, v in ES_INDEXES.items())
STATSD_HOST = env('STATSD_HOST')
STATSD_PORT = env.int('STATSD_PORT', default=8125)
STATSD_PREFIX = 'mkt-{0}'.format(ENV)
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = False
NEW_FEATURES = True
CELERYD_TASK_SOFT_TIME_LIMIT = env.int('CELERYD_TASK_SOFT_TIME_LIMIT',
default=540)
CLEANCSS_BIN = 'cleancss'
LESS_BIN = 'lessc'
STYLUS_BIN = 'stylus'
UGLIFY_BIN = 'uglifyjs'
LESS_PREPROCESS = True
XSENDFILE = True
# False in Prod
ALLOW_SELF_REVIEWS = env.bool('ALLOW_SELF_REVIEWS', default=False)
GOOGLE_ANALYTICS_CREDENTIALS = env.dict('GOOGLE_ANALYTICS_CREDENTIALS')
GOOGLE_ANALYTICS_CREDENTIALS['user_agent'] = None
GOOGLE_ANALYTICS_CREDENTIALS['token_expiry'] = datetime.datetime(2013, 1, 3, 1, 20, 16, 45465) # noqa
GOOGLE_API_CREDENTIALS = env('GOOGLE_API_CREDENTIALS')
GEOIP_URL = env('GEOIP_URL')
RAISE_ON_SIGNAL_ERROR = True
API_THROTTLE = False
NEWRELIC_ENABLE = env.bool('NEWRELIC_ENABLE', default=False)
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/%s.ini' % DOMAIN
AES_KEYS = env.dict('AES_KEYS')
TASK_USER_ID = env('TASK_USER_ID', default=4757633)
SERVE_TMP_PATH = False
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (STATIC_URL.rstrip('/'),)
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
MEDIA_URL = STATIC_URL + 'media/'
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
SYSLOG_TAG = "http_app_mkt_{0}".format(ENV)
SYSLOG_TAG2 = "http_app_mkt_{0}_timer".format(ENV)
SYSLOG_CSP = "http_app_mkt_{0}csp".format(ENV)
STATSD_PREFIX = 'marketplace-{0}'.format(ENV)
WEBAPPS_RECEIPT_KEY = env('WEBAPPS_RECEIPT_KEY')
WEBAPPS_RECEIPT_URL = env('WEBAPPS_RECEIPT_URL')
WEBAPPS_UNIQUE_BY_DOMAIN = env.bool('WEBAPPS_UNIQUE_BY_DOMAIN', default=True)
SENTRY_DSN = env('SENTRY_DSN')
WEBAPPS_PUBLIC_KEY_DIRECTORY = NETAPP_STORAGE + '/public_keys'
PRODUCT_ICON_PATH = NETAPP_STORAGE + '/product-icons'
DUMPED_APPS_PATH = NETAPP_STORAGE + '/dumped-apps'
DUMPED_USERS_PATH = NETAPP_STORAGE + '/dumped-users'
SOLITUDE_HOSTS = (env('SOLITUDE_HOSTS'),)
SOLITUDE_OAUTH = {'key': env('SOLITUDE_OAUTH_KEY'),
'secret': env('SOLITUDE_OAUTH_SECRET')}
VALIDATOR_TIMEOUT = env.int('VALIDATOR_TIMEOUT', default=180)
VALIDATOR_IAF_URLS = ['https://marketplace.firefox.com',
'https://marketplace.allizom.org',
'https://marketplace-dev.allizom.org']
# Override the limited marketplace ones with these ones from AMO. Because
# the base gets overridden in the mkt.settings file, we'll set them back again.
# Note the addition of the testing locales dbg and rtl here.
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg', 'rtl', 'ln', 'tl')
LANGUAGES = lazy(langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
# Bug 748403
SIGNING_SERVER = env('SIGNING_SERVER')
SIGNING_SERVER_ACTIVE = True
SIGNING_VALID_ISSUERS = ['marketplace-cdn.allizom.org']
# Bug 793876
SIGNED_APPS_KEY = env('SIGNED_APPS_KEY')
SIGNED_APPS_SERVER_ACTIVE = True
SIGNED_APPS_SERVER = env('SIGNED_APPS_SERVER')
SIGNED_APPS_REVIEWER_SERVER_ACTIVE = True
SIGNED_APPS_REVIEWER_SERVER = env('SIGNED_APPS_REVIEWER_SERVER')
GOOGLE_ANALYTICS_DOMAIN = 'marketplace.firefox.com'
# See mkt/settings.py for more info.
APP_PURCHASE_KEY = DOMAIN
APP_PURCHASE_AUD = DOMAIN
APP_PURCHASE_TYP = 'mozilla-stage/payments/pay/v1'
APP_PURCHASE_SECRET = env('APP_PURCHASE_SECRET')
MONOLITH_PASSWORD = env('MONOLITH_PASSWORD')
MONOLITH_SERVER = env('MONOLITH_SERVER')
MONOLITH_INDEX = 'mkt{0}-time_*'.format(ENV)
# This is mainly for Marionette tests.
WEBAPP_MANIFEST_NAME = env('WEBAPP_MANIFEST_NAME', default='Marketplace Stage')
ENABLE_API_ERROR_SERVICE = env.bool('ENABLE_API_ERROR_SERVICE', default=True)
# Until Bango can properly do refunds.
BANGO_FAKE_REFUNDS = env.bool('BANGO_FAKE_REFUNDS', default=True)
ES_DEFAULT_NUM_REPLICAS = 2
ES_USE_PLUGINS = True
# Cache timeout on the /search/featured API.
CACHE_SEARCH_FEATURED_API_TIMEOUT = 60 * 5 # 5 min.
ALLOWED_CLIENTS_EMAIL_API = env.list('ALLOWED_CLIENTS_EMAIL_API')
POSTFIX_AUTH_TOKEN = env('POSTFIX_AUTH_TOKEN')
POSTFIX_DOMAIN = DOMAIN
# IARC content ratings.
IARC_ENV = env('IARC_ENV', default='test')
IARC_MOCK = False
IARC_PASSWORD = env('IARC_PASSWORD')
IARC_PLATFORM = env('IARC_PLATFORM', default='Firefox')
IARC_SERVICE_ENDPOINT = 'https://www.globalratings.com/IARCDEMOService/IARCServices.svc' # noqa
IARC_STOREFRONT_ID = env('IARC_STOREFRONT_ID', default=4)
IARC_SUBMISSION_ENDPOINT = 'https://www.globalratings.com/IARCDEMORating/Submission.aspx' # noqa
IARC_ALLOW_CERT_REUSE = True
# IARC V2
IARC_V2_STORE_ID = env('IARC_V2_STORE_ID', default=None)
IARC_V2_STORE_PASSWORD = env('IARC_V2_STORE_PASSWORD', default=None)
IARC_V2_SERVICE_ENDPOINT = env('IARC_V2_SERVICE_ENDPOINT', default=None)
IARC_V2_SUBMISSION_ENDPOINT = 'https://iarc-int.azurewebsites.net/Hosted/Index'
PAYMENT_PROVIDERS = env.list('PAYMENT_PROVIDERS', default=['bango'])
DEFAULT_PAYMENT_PROVIDER = env('DEFAULT_PAYMENT_PROVIDER', default='bango')
PRE_GENERATE_APKS = True
PRE_GENERATE_APK_URL = env('PRE_GENERATE_APK_URL')
FXA_AUTH_DOMAIN = env('FXA_AUTH_DOMAIN')
FXA_OAUTH_URL = env('FXA_OAUTH_URL')
FXA_CLIENT_ID = env('FXA_CLIENT_ID')
FXA_CLIENT_SECRET = env('FXA_CLIENT_SECRET')
FXA_SECRETS[FXA_CLIENT_ID] = FXA_CLIENT_SECRET
QA_APP_ID = 500427
RECOMMENDATIONS_API_URL = env('RECOMMENDATIONS_API_URL')
RECOMMENDATIONS_ENABLED = True
DEV_PAY_PROVIDERS = {
APP_PURCHASE_TYP: SITE_URL + '/mozpay/?req={jwt}',
}
# Bug 1145338
IAF_OVERRIDE_APPS = env.list('IAF_OVERRIDE_APPS')
|
bsd-3-clause
| 5,487,168,004,123,544,000 | -4,308,003,118,946,774,500 | 31.297203 | 102 | 0.714301 | false |
OnRampOrg/onramp
|
modules/FWC/bin/onramp_run.py
|
2
|
1220
|
#!/usr/bin/env python
#
# Curriculum Module Run Script
# - Run once per run of the module by a user
# - Run inside job submission. So in an allocation.
# - onramp_run_params.cfg file is available in current working directory
#
import os
import sys
from subprocess import call
from configobj import ConfigObj
#
# Read the configobj values
#
# This will always be the name of the file, so fine to hardcode here
conf_file = "onramp_runparams.cfg"
# Already validated the file in our onramp_preprocess.py script - no need to do it again
config = ConfigObj(conf_file)
#
# Load any modules for compiling
# - need to load mpi module on flux
#
try:
rtn = check_call("module load mpi")
except CalledProcessError as e:
print "Error loading module.\nError: %s" % e
sys.exit(-1)
#
# Run my program
#
os.chdir('src')
call(['time', 'mpirun', '-np', '1', 'FWC-serial', '-h', config['FWC']['grid_height'], '-w', config['FWC']['grid_width']])
# Exit 0 if all is ok
sys.exit(0)
# Exit with a negative value if there was a problem
#sys.exit(-1)
|
bsd-3-clause
| -2,378,379,285,913,207,000 | -3,454,787,004,342,953,500 | 26.727273 | 121 | 0.598361 | false |
dlazz/ansible
|
lib/ansible/modules/network/panos/panos_cert_gen_ssh.py
|
61
|
6225
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_cert_gen_ssh
short_description: generates a self-signed certificate using SSH protocol with SSH key
description:
- This module generates a self-signed certificate that can be used by GlobalProtect client, SSL connector, or
- otherwise. Root certificate must be preset on the system first. This module depends on paramiko for ssh.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- paramiko
notes:
- Checkmode is not supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
key_filename:
description:
- Location of the filename that is used for the auth. Either I(key_filename) or I(password) is required.
required: true
password:
description:
- Password credentials to use for auth. Either I(key_filename) or I(password) is required.
required: true
cert_friendly_name:
description:
- Human friendly certificate name (not CN but just a friendly name).
required: true
cert_cn:
description:
- Certificate CN (common name) embedded in the certificate signature.
required: true
signed_by:
description:
- Undersigning authority (CA) that MUST already be presents on the device.
required: true
rsa_nbits:
description:
- Number of bits used by the RSA algorithm for the certificate generation.
default: "2048"
'''
EXAMPLES = '''
# Generates a new self-signed certificate using ssh
- name: generate self signed certificate
panos_cert_gen_ssh:
ip_address: "192.168.1.1"
password: "paloalto"
cert_cn: "1.1.1.1"
cert_friendly_name: "test123"
signed_by: "root-ca"
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import time
try:
import paramiko
HAS_LIB = True
except ImportError:
HAS_LIB = False
_PROMPTBUFF = 4096
def wait_with_timeout(module, shell, prompt, timeout=60):
now = time.time()
result = ""
while True:
if shell.recv_ready():
result += shell.recv(_PROMPTBUFF)
endresult = result.strip()
if len(endresult) != 0 and endresult[-1] == prompt:
break
if time.time() - now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def generate_cert(module, ip_address, key_filename, password,
cert_cn, cert_friendly_name, signed_by, rsa_nbits):
stdout = ""
client = paramiko.SSHClient()
# add policy to accept all host keys, I haven't found
# a way to retrieve the instance SSH key fingerprint from AWS
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not key_filename:
client.connect(ip_address, username="admin", password=password)
else:
client.connect(ip_address, username="admin", key_filename=key_filename)
shell = client.invoke_shell()
# wait for the shell to start
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# generate self-signed certificate
if isinstance(cert_cn, list):
cert_cn = cert_cn[0]
cmd = 'request certificate generate signed-by {0} certificate-name {1} name {2} algorithm RSA rsa-nbits {3}\n'.format(
signed_by, cert_friendly_name, cert_cn, rsa_nbits)
shell.send(cmd)
# wait for the shell to complete
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# exit
shell.send('exit\n')
if 'Success' not in buff:
module.fail_json(msg="Error generating self signed certificate: " + stdout)
client.close()
return stdout
def main():
argument_spec = dict(
ip_address=dict(required=True),
key_filename=dict(),
password=dict(no_log=True),
cert_cn=dict(required=True),
cert_friendly_name=dict(required=True),
rsa_nbits=dict(default='2048'),
signed_by=dict(required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['key_filename', 'password']])
if not HAS_LIB:
module.fail_json(msg='paramiko is required for this module')
ip_address = module.params["ip_address"]
key_filename = module.params["key_filename"]
password = module.params["password"]
cert_cn = module.params["cert_cn"]
cert_friendly_name = module.params["cert_friendly_name"]
signed_by = module.params["signed_by"]
rsa_nbits = module.params["rsa_nbits"]
try:
stdout = generate_cert(module,
ip_address,
key_filename,
password,
cert_cn,
cert_friendly_name,
signed_by,
rsa_nbits)
except Exception as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
|
gpl-3.0
| -3,186,058,040,660,291,000 | -6,862,612,330,237,551,000 | 30.760204 | 122 | 0.632771 | false |
daphne-yu/aubio
|
python.old/aubio/task/cut.py
|
13
|
1576
|
from task import task
from aubio.aubioclass import *
class taskcut(task):
def __init__(self,input,slicetimes,params=None,output=None):
""" open the input file and initialize arguments
parameters should be set *before* calling this method.
"""
from os.path import basename,splitext
task.__init__(self,input,output=None,params=params)
self.soundoutbase, self.soundoutext = splitext(basename(self.input))
self.newname = "%s%s%09.5f%s%s" % (self.soundoutbase,".",
self.frameread*self.params.step,".",self.soundoutext)
self.fileo = sndfile(self.newname,model=self.filei)
self.myvec = fvec(self.params.hopsize,self.channels)
self.mycopy = fvec(self.params.hopsize,self.channels)
self.slicetimes = slicetimes
def __call__(self):
task.__call__(self)
# write to current file
if len(self.slicetimes) and self.frameread >= self.slicetimes[0][0]:
self.slicetimes.pop(0)
# write up to 1st zero crossing
zerocross = 0
while ( abs( self.myvec.get(zerocross,0) ) > self.params.zerothres ):
zerocross += 1
writesize = self.fileo.write(zerocross,self.myvec)
fromcross = 0
while (zerocross < self.readsize):
for i in range(self.channels):
self.mycopy.set(self.myvec.get(zerocross,i),fromcross,i)
fromcross += 1
zerocross += 1
del self.fileo
self.fileo = sndfile("%s%s%09.5f%s%s" % (self.soundoutbase,".",
self.frameread*self.params.step,".",self.soundoutext),model=self.filei)
writesize = self.fileo.write(fromcross,self.mycopy)
else:
writesize = self.fileo.write(self.readsize,self.myvec)
|
gpl-3.0
| 7,116,941,136,938,232,000 | 5,396,757,552,668,474,000 | 36.52381 | 75 | 0.702411 | false |
derekjchow/models
|
research/morph_net/op_regularizers/conv_group_lasso_regularizer_test.py
|
2
|
3639
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for op_regularizers.conv_group_lasso_regularizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from morph_net.op_regularizers import conv_group_lasso_regularizer
layers = tf.contrib.layers
ALIVE_THRESHOLD = 1.0
def assert_not_all_are_alive_or_dead(alive_vector):
assert not all(alive_vector), (
'All activations are alive, test case is trivial. Increase threshold')
assert any(alive_vector), (
'All activations are dead, test case is trivial. Decrease threshold')
class GroupLassoRegularizerTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
tf.set_random_seed(7907)
with tf.contrib.framework.arg_scope(
[layers.conv2d, layers.conv2d_transpose],
weights_initializer=tf.random_normal_initializer):
self.BuildModel()
with self.test_session():
tf.global_variables_initializer().run()
def BuildModel(self):
image = tf.constant(0.0, shape=[1, 17, 19, 3])
conv = layers.conv2d(image, 13, [7, 5], padding='SAME', scope='conv')
layers.conv2d_transpose(conv, 11, [5, 5], scope='convt')
# For Conv2D (Conv2DBackpropInput, aka conv2d transpose), the reduction
# indices for group lasso are (0, 1, 2) ((0, 1, 3)).
@parameterized.named_parameters(
('_regular_conv', 'conv/Conv2D', (0, 1, 2), 0.0),
('_transpose_conv', 'convt/conv2d_transpose', (0, 1, 3), 0.0),
('_regular_conv_l10.5', 'conv/Conv2D', (0, 1, 2), 0.5))
def testOp(self, op_name, axis, l1_fraction):
op = tf.get_default_graph().get_operation_by_name(op_name)
with self.test_session():
weights = op.inputs[1].eval()
l1_reg_vector = np.mean(np.abs(weights), axis=axis)
l2_reg_vector = np.sqrt(np.mean(weights**2, axis=axis))
expected_reg_vector = (
l1_fraction * l1_reg_vector + (1.0 - l1_fraction) * l2_reg_vector)
# We choose the threshold at the expectation value, so that some activations
# end up above threshold and others end up below. The weights are normally
# distributed, so the L2 norm is 1.0, and the L1 norm is sqrt(2/pi).
# With a general l1_fraction, we compute a weighted average of the two:
threshold = (1.0 - l1_fraction) + l1_fraction * np.sqrt(2 / np.pi)
expected_alive = expected_reg_vector > threshold
assert_not_all_are_alive_or_dead(expected_alive)
conv_reg = (
conv_group_lasso_regularizer.ConvGroupLassoRegularizer(
op, threshold=threshold, l1_fraction=l1_fraction))
with self.test_session():
actual_reg_vector = conv_reg.regularization_vector.eval()
actual_alive = conv_reg.alive_vector.eval()
self.assertAllClose(expected_reg_vector, actual_reg_vector)
self.assertAllEqual(expected_alive, actual_alive)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -5,607,525,104,580,974,000 | 7,265,323,945,698,762,000 | 38.554348 | 80 | 0.680956 | false |
SerpentCS/odoo
|
addons/website_report/controllers/main.py
|
243
|
1460
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.website.controllers.main import Website
from openerp.http import request, route
class Website(Website):
@route()
def customize_template_get(self, xml_id, full=False):
res = super(Website, self).customize_template_get(xml_id, full=full)
if full:
for r in request.session.get('report_view_ids', []):
res += super(Website, self).customize_template_get(r.get('xml_id'), full=full)
return res
|
agpl-3.0
| -3,517,937,525,633,126,400 | -6,244,245,207,911,536,000 | 41.941176 | 94 | 0.618493 | false |
mrhubbs/merge_csv
|
work.py
|
1
|
2511
|
"""
10-20-15
"""
import tempfile
import csv
def load_csv_as_dict(csv_path):
"""
Loads a CSV into a dictionary.
"""
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header_row = reader.next()
dat = [[] for _ in header_row]
row_len = len(header_row)
for row in reader:
row_idx = 0
if len(row) < row_len:
print("row too small, skipping")
continue
while row_idx < row_len:
try:
val = float(row[row_idx])
except (ValueError, SyntaxError):
val = str(row[row_idx])
dat[row_idx].append(val)
row_idx += 1
return {h: d for h, d in zip(header_row, dat)}
def save_dict_as_csv(dat, csv_path):
"""
Saves, in the CSV format, the data in the dict dat to the file
specified by csv_path.
"""
# Create a temporary csv file to write to.
csv_temp = tempfile.TemporaryFile()
writer = csv.writer(csv_temp, delimiter=',')
# Write the header.
writer.writerow(dat.keys())
# Write the rest of the data.
idx = 0
the_data = dat.values()
length = len(the_data[0])
header_range = range(len(dat.keys()))
while idx < length:
# Build the row.
row = [the_data[i][idx] for i in header_range]
# Write the row.
writer.writerow(row)
idx += 1
# Copy the temporary csv file to the actual file we should be outputting
# to. Not writing directly to our output file prevents us from corrupting
# it if something goes wrong.
copy_temp_file(csv_temp, csv_path)
csv_temp.close()
def get_smallest_number_of_lines(d):
lengths = [len(i) for i in d.values()]
if len(lengths) < 1:
return 0
else:
return min(lengths)
def truncate_dict(d, length):
for key, value in d.items():
d[key] = value[:length]
return d
def merge_dicts_by_mappings(dicts, mappings):
out = {}
for dictkey, mappings in mappings.items():
for _from, _to in mappings:
out[_to] = dicts[dictkey][_from]
return out
def copy_temp_file(temp_fd, fpath, bs=4096):
"""
Copies all data written to temp_fd to the file specified by fpath.
"""
temp_fd.seek(0)
copy = open(fpath, 'w')
dat = temp_fd.read(bs)
while dat:
copy.write(dat)
dat = temp_fd.read(bs)
copy.close()
|
gpl-2.0
| -3,712,199,236,590,659,000 | 1,165,873,395,471,809,500 | 22.036697 | 78 | 0.557149 | false |
Jenselme/AutobahnPython
|
examples/twisted/wamp/rpc/arguments/backend.py
|
2
|
2587
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component providing procedures with different kinds
of arguments.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
def ping():
return
def add2(a, b):
return a + b
def stars(nick="somebody", stars=0):
return u"{} starred {}x".format(nick, stars)
def orders(product, limit=5):
return [u"Product {}".format(i) for i in range(50)][:limit]
def arglen(*args, **kwargs):
return [len(args), len(kwargs)]
yield self.register(ping, u'com.arguments.ping')
yield self.register(add2, u'com.arguments.add2')
yield self.register(stars, u'com.arguments.stars')
yield self.register(orders, u'com.arguments.orders')
yield self.register(arglen, u'com.arguments.arglen')
print("Procedures registered; ready for frontend.")
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
)
runner.run(Component)
|
mit
| 7,128,921,889,894,515,000 | 3,070,760,526,574,012,000 | 35.43662 | 79 | 0.650947 | false |
cnbeining/videospeeder
|
videospeeder.py
|
2
|
8729
|
#!/usr/bin/env python
#coding:utf-8
# Author: Beining --<ACICFG>
# Contact: http://www.cnbeining.com/ |https://github.com/cnbeining/videospeeder
# Purpose: Acceletate video to bypass Letvcloud's transcode.
# Created: 08/28/2014
# LICENSE: GNU v2
import sys
import os
import os, sys, subprocess, shlex, re
from subprocess import call
import uuid
import math
import shutil
import getopt
#----------------------------------------------------------------------
def probe_file(filename):
cmnd = ['ffprobe', '-show_format', '-pretty', '-loglevel', 'quiet', filename]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#print filename
out, err = p.communicate()
#print out
if err:
print err
return None
return out
#----------------------------------------------------------------------
def time_to_sec(time_raw):
"""
str->int
ignore .*."""
time_list = time_raw.split(':')
hr = int(time_list[0]) * 3600
minute = int(time_list[1]) * 60
sec = int(float(time_list[2]))
return int(hr + minute + sec)
#----------------------------------------------------------------------
def get_abspath(filename):
""""""
return str(os.path.abspath(filename))
#----------------------------------------------------------------------
def process(filename, target_bitrate, speedtime, outputfile):
"""str,int,float,str->?
filename,outputfile comes with the path."""
tmpdir = '/tmp/videospeeder-' + str(uuid.uuid4())
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
audio_format = ''
audio_duration = ''
video_duration_sec = 0
video_size_byte = 0
audio_bitrate = ''
audio_duration_sec = 0
audio_size_byte = 0
video_format = ''
video_duration = ''
video_bitrate = ''
#demux audio file
print('INFO: Checking audio...')
os.system('ffmpeg -i \'' + filename + '\' -vn -c:a copy ' + tmpdir+'/audio.aac' +' > /dev/null 2>&1')
try:
for line in probe_file(tmpdir+'/audio.aac').split('\n'):
if 'format_name' in line:
audio_format = str(line.split('=')[1])
if 'duration' in line:
audio_duration = str(line.split('=')[1])
except:
print('ERROR: Cannot read audio file!')
shutil.rmtree(tmpdir)
exit()
#In case someone screw the audio up
if not 'aac' in audio_format:
print(audio_format)
print('ERROR: You have to use AAC as audio format!')
shutil.rmtree(tmpdir)
exit()
#Check original file
try:
for line in probe_file(filename).split('\n'):
if 'duration' in line:
video_duration = str(line.split('=')[1])
#Sti. there's a tag called "totalduration"...
break
except:
print('ERROR: Cannot read video file!')
shutil.rmtree(tmpdir)
exit()
#Calc...
#By bitrate
if target_bitrate != 0:
print('INFO: Doing calculation...')
try:
video_duration_sec = time_to_sec(video_duration)
video_size_byte = int(os.path.getsize(filename))
audio_duration_sec = time_to_sec(audio_duration)
audio_size_byte = int(os.path.getsize(tmpdir+'/audio.aac'))
except:
print('ERROR: Cannot calculate time, did you input a bitrate too high?')
shutil.rmtree(tmpdir)
exit()
try:
os.remove(tmpdir+'/audio.aac')
pass
except:
print('WARNING: Cannot remove the aac file now...')
time_audio = float(((audio_size_byte * 8.0) / audio_duration_sec) / 1180000)
time_video = float(((video_size_byte * 8.0) / video_duration_sec) / target_bitrate)
if time_audio < 1 and time_video < 1:
print('ERROR: Cannot calculate target, your target bitrate is higher than the original file!')
shutil.rmtree(tmpdir)
exit()
if time_audio == 1 and time_video == 1:
speedtime = 1.1
elif time_audio > time_video:
speedtime = time_audio
else:
speedtime = time_video
#Make patch
print('INFO: Adding ' + str(speedtime - 1) + ' times to audio...')
py_path = sys.path[0]
os.chdir(py_path)
os.system('ffmpeg -i \'' + filename + '\' -c copy ' + tmpdir+'/video.mkv' +'> /dev/null 2>&1')
os.system('mkvextract timecodes_v2 '+ tmpdir + '/video.mkv 0:' + tmpdir +'/tc-track0.txt '+ '1:' + tmpdir +'/tc-track1.txt > /dev/null 2>&1')
#Video
f = open(tmpdir + '/tc-track0.txt', 'r')
video_timecode = f.readlines()
f.close()
video_timecode_speed = '# timecode format v2' + '\n'
for i in video_timecode[1:]:
video_timecode_speed = video_timecode_speed + str(float(i.strip()) * speedtime) + '\n'
f = open(tmpdir + '/video_timecode_speed.txt', 'w')
f.write(video_timecode_speed)
f.close()
#Audio
f = open(tmpdir + '/tc-track1.txt', 'r')
audio_timecode = f.readlines()
f.close()
audio_timecode_speed = '# timecode format v2' + '\n'
for i in audio_timecode[1:]:
audio_timecode_speed = audio_timecode_speed + str(float(i.strip()) * speedtime) + '\n'
f = open(tmpdir + '/audio_timecode_speed.txt', 'w')
f.write(audio_timecode_speed)
f.close()
py_path = sys.path[0]
os.chdir(py_path)
print('INFO: Making patched mkv...')
os.system('mkvmerge -o ' + tmpdir + '/video_patched.mkv --timecodes 0:' + tmpdir + '/video_timecode_speed.txt --timecodes 1:' + tmpdir + '/audio_timecode_speed.txt ' +tmpdir + '/video.mkv > /dev/null 2>&1')
try:
os.remove(tmpdir+'/video.mkv')
pass
except:
print('WARNING: Cannot remove the temporary mkv file now...')
print('INFO: Making final output file...')
os.system('ffmpeg -i ' + tmpdir + '/video_patched.mkv -c copy '+outputfile +'> /dev/null 2>&1')
print('Done!')
#clean up
try:
shutil.rmtree(tmpdir)
except:
print('ERROR: Cannot remove temp dir, do it by yourself!')
#----------------------------------------------------------------------
def usage():
""""""
print('''Usage:
python videospeeder.py (-h) (-i input.mp4) (-o output.mp4) (-b 0) (-x 3)
-h: Default: None
Help.
-i: Default: Blank
Input file.
If the file and audioblacker are not under the same path,
it is suggested to use absolute path to avoid possible failure.
-o Default: input_filename.black.mp4
Output file.
Would be in the same folder with the original file if not specified.
-b: Default: 0
Target bitrate.
-x: Default: 3
Target speeding time.
If bitrate is set, it will override the speeding time, if also set.
Videospeeder will calculate both audio and video timing to make sure
that both the audio and the video meets the requirments.
Please notice that if your original video/audio bitrate is too small,
Videospeeder would throw you an ERROR and quit.
''')
#----------------------------------------------------------------------
if __name__=='__main__':
argv_list = []
argv_list = sys.argv[1:]
filename = ''
target_bitrate = 0
outputfile = ''
speedtime = 3
try:
opts, args = getopt.getopt(argv_list, "hi:b:x:o:", ['help', 'input','bitrate', 'speedtime'
'outputfile'])
except getopt.GetoptError:
usage()
exit()
for o, a in opts:
if o in ('-h', '--help'):
usage()
exit()
elif o in ('-i', '--input'):
filename = a
try:
argv_list.remove('-i')
except:
break
elif o in ('-b', '--bitrate'):
target_bitrate = int(a)
try:
argv_list.remove('-b')
except:
break
elif o in ('-x', '--speedtime'):
speedtime = int(a)
try:
argv_list.remove('-x')
except:
break
elif o in ('-o', '--outputfile'):
outputfile = a
try:
argv_list.remove('-o')
except:
break
if filename == '':
print('ERROR: No input file!')
exit()
if outputfile == '':
outputfile = filename.split('.')[0]
for i in filename.split('.')[1:-1]:
outputfile = outputfile + '.' + i
outputfile = outputfile + '.speed.mp4'
process(filename, target_bitrate, speedtime, outputfile)
exit()
|
gpl-2.0
| -4,584,543,461,234,426,000 | 6,530,764,499,340,313,000 | 33.101563 | 212 | 0.528354 | false |
2mny/mylar
|
mylar/torrent/clients/utorrent.py
|
2
|
2494
|
import os
from libs.utorrent.client import UTorrentClient
# Only compatible with uTorrent 3.0+
class TorrentClient(object):
def __init__(self):
self.conn = None
def connect(self, host, username, password):
if self.conn is not None:
return self.conn
if not host:
return False
if username and password:
self.conn = UTorrentClient(
host,
username,
password
)
else:
self.conn = UTorrentClient(host)
return self.conn
def find_torrent(self, hash):
try:
torrent_list = self.conn.list()[1]
for t in torrent_list['torrents']:
if t[0] == hash:
torrent = t
except Exception:
raise
return torrent if torrent else False
def get_torrent(self, torrent):
if not torrent[26]:
raise 'Only compatible with uTorrent 3.0+'
torrent_files = []
torrent_completed = False
torrent_directory = os.path.normpath(torrent[26])
try:
if torrent[4] == 1000:
torrent_completed = True
files = self.conn.getfiles(torrent[0])[1]['files'][1]
for f in files:
if not os.path.normpath(f[0]).startswith(torrent_directory):
file_path = os.path.join(torrent_directory, f[0].lstrip('/'))
else:
file_path = f[0]
torrent_files.append(file_path)
torrent_info = {
'hash': torrent[0],
'name': torrent[2],
'label': torrent[11] if torrent[11] else '',
'folder': torrent[26],
'completed': torrent_completed,
'files': torrent_files,
}
except Exception:
raise
return torrent_info
def start_torrent(self, torrent_hash):
return self.conn.start(torrent_hash)
def stop_torrent(self, torrent_hash):
return self.conn.stop(torrent_hash)
def delete_torrent(self, torrent):
deleted = []
try:
files = self.conn.getfiles(torrent[0])[1]['files'][1]
for f in files:
deleted.append(os.path.normpath(os.path.join(torrent[26], f[0])))
self.conn.removedata(torrent[0])
except Exception:
raise
return deleted
|
gpl-3.0
| -8,487,072,989,798,353,000 | 2,722,333,477,715,441,700 | 24.979167 | 81 | 0.514435 | false |
gnowgi/gnowsys-studio
|
objectapp/spam_checker/backends/all_is_spam.py
|
3
|
2624
|
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""All is spam, spam checker backend for Objectapp"""
def backend(comment, content_object, request):
"""Backend for setting all comments to spam"""
return True
|
agpl-3.0
| -789,794,925,686,651,400 | 6,259,945,681,204,185,000 | 48.509434 | 92 | 0.73971 | false |
louietsai/python-for-android
|
python3-alpha/python3-src/Lib/test/test_docxmlrpc.py
|
54
|
7827
|
from xmlrpc.server import DocXMLRPCServer
import http.client
import sys
from test import support
threading = support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server throws an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn((b'<dl><dt><a name="-<lambda>"><strong>'
b'<lambda></strong></a>(x, y)</dt></dl>'),
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
b'<tt>Add two instances together. This '
b'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
b'PEP008</a>, but has nothing<br>\nto do '
b'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
b'RFC1952</a>. Case should matter: pEp008 '
b'and rFC1952. Things<br>\nthat start '
b'with http and ftp should be '
b'auto-linked, too:<br>\n<a href="http://google.com">'
b'http://google.com</a>.</tt></dd></dl>'), response)
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp'
b'</strong></a>(method_name)</dt><dd><tt><a href="#-system.method'
b'Help">system.methodHelp</a>(\'add\') => "Adds '
b'two integers together"<br>\n <br>\nReturns a'
b' string containing documentation for '
b'the specified method.</tt></dd></dl>\n<dl><dt><a name'
b'="-system.methodSignature"><strong>system.methodSignature</strong>'
b'</a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">'
b'system.methodSignature</a>(\'add\') => [double, '
b'int, int]<br>\n <br>\nReturns a list '
b'describing the signature of the method.'
b' In the<br>\nabove example, the add '
b'method takes two integers as arguments'
b'<br>\nand returns a double result.<br>\n '
b'<br>\nThis server does NOT support system'
b'.methodSignature.</tt></dd></dl>\n<dl><dt><a name="-test_method">'
b'<strong>test_method</strong></a>(arg)</dt><dd><tt>Test '
b'method\'s docs. This method truly does'
b' very little.</tt></dd></dl>'), response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(b"""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
apache-2.0
| 5,204,292,303,289,409,000 | 3,910,861,494,365,181,000 | 38.530303 | 82 | 0.604702 | false |
vabue/RatticWeb
|
cred/migrations/0009_auto__del_field_cred_category.py
|
7
|
5872
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Cred.category'
db.delete_column('cred_cred', 'category_id')
def backwards(self, orm):
# Adding field 'Cred.category'
db.add_column('cred_cred', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cred.Tag'], null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cred.cred': {
'Meta': {'object_name': 'Cred'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': "orm['cred.Tag']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'cred.credaudit': {
'Meta': {'object_name': 'CredAudit'},
'audittype': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credlogs'", 'to': "orm['auth.User']"})
},
'cred.credchangeq': {
'Meta': {'object_name': 'CredChangeQ'},
'cred': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cred.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['cred']
|
gpl-2.0
| 1,294,871,639,913,571,300 | 228,859,554,648,156,480 | 64.255556 | 212 | 0.543086 | false |
waprin/google-cloud-python
|
gcloud/streaming/test_util.py
|
8
|
1660
|
import unittest2
class Test_calculate_wait_for_retry(unittest2.TestCase):
def _callFUT(self, *args, **kw):
from gcloud.streaming.util import calculate_wait_for_retry
return calculate_wait_for_retry(*args, **kw)
def test_w_negative_jitter_lt_max_wait(self):
import random
from gcloud._testing import _Monkey
with _Monkey(random, uniform=lambda lower, upper: lower):
self.assertEqual(self._callFUT(1, 60), 1.5)
def test_w_positive_jitter_gt_max_wait(self):
import random
from gcloud._testing import _Monkey
with _Monkey(random, uniform=lambda lower, upper: upper):
self.assertEqual(self._callFUT(4, 10), 10)
class Test_acceptable_mime_type(unittest2.TestCase):
def _callFUT(self, *args, **kw):
from gcloud.streaming.util import acceptable_mime_type
return acceptable_mime_type(*args, **kw)
def test_pattern_wo_slash(self):
with self.assertRaises(ValueError) as err:
self._callFUT(['text/*'], 'BOGUS')
self.assertEqual(
err.exception.args,
('Invalid MIME type: "BOGUS"',))
def test_accept_pattern_w_semicolon(self):
with self.assertRaises(ValueError) as err:
self._callFUT(['text/*;charset=utf-8'], 'text/plain')
self.assertEqual(
err.exception.args,
('MIME patterns with parameter unsupported: '
'"text/*;charset=utf-8"',))
def test_miss(self):
self.assertFalse(self._callFUT(['image/*'], 'text/plain'))
def test_hit(self):
self.assertTrue(self._callFUT(['text/*'], 'text/plain'))
|
apache-2.0
| -4,261,342,471,415,635,500 | -5,291,374,616,830,282,000 | 33.583333 | 66 | 0.621687 | false |
SGCreations/Flask
|
Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/setuptools/tests/test_easy_install.py
|
73
|
13214
|
"""Easy install Tests
"""
import sys
import os
import shutil
import tempfile
import unittest
import site
import contextlib
import textwrap
import tarfile
import logging
import distutils.core
from setuptools.compat import StringIO, BytesIO, next, urlparse
from setuptools.sandbox import run_setup, SandboxViolation
from setuptools.command.easy_install import (
easy_install, fix_jython_executable, get_script_args, nt_quote_arg)
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
WANTED = """\
#!%s
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""" % nt_quote_arg(fix_jython_executable(sys.executable, ""))
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestEasyInstallTest(unittest.TestCase):
def test_install_site_py(self):
dist = Distribution()
cmd = easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
self.assertTrue(os.path.exists(sitepy))
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
dist = FakeDist()
old_platform = sys.platform
try:
name, script = [i for i in next(get_script_args(dist))][0:2]
finally:
sys.platform = old_platform
self.assertEqual(script, WANTED)
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
self.assertEqual(cmd.package_index.scanned_urls, {})
# let's try without it (default behavior)
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
self.assertEqual(keys, ['link1', 'link2'])
class TestPTHFileWriter(unittest.TestCase):
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(os.getcwd()))
self.assertTrue(pth.dirty)
def test_add_from_site_is_ignored(self):
if os.name != 'nt':
location = '/test/location/does-not-have-to-exist'
else:
location = 'c:\\does_not_exist'
pth = PthDistributions('does-not_exist', [location, ])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(location))
self.assertTrue(not pth.dirty)
class TestUserInstallTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.old_enable_site = site.ENABLE_USER_SITE
self.old_file = easy_install_pkg.__file__
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
easy_install_pkg.__file__ = site.USER_SITE
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.ENABLE_USER_SITE = self.old_enable_site
easy_install_pkg.__file__ = self.old_file
def test_user_install_implied(self):
site.ENABLE_USER_SITE = True # disabled sometimes
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
self.assertTrue(cmd.user, 'user should be implied')
def test_multiproc_atexit(self):
try:
__import__('multiprocessing')
except ImportError:
# skip the test if multiprocessing is not available
return
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
def test_user_install_not_implied_without_usersite_enabled(self):
site.ENABLE_USER_SITE = False # usually enabled
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.initialize_options()
self.assertFalse(cmd.user, 'NOT user should be implied')
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
f = open(egg_file, 'w')
try:
f.write('Name: foo\n')
finally:
f.close()
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
self.assertEqual(os.path.realpath(res.location),
os.path.realpath(new_location))
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
def test_setup_requires(self):
"""Regression test for Distribute issue #318
Ensure that a package with setup_requires can be installed when
setuptools is installed in the user site-packages without causing a
SandboxViolation.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar'],
'dependency_links': [os.path.abspath(self.dir)]
}
test_pkg = os.path.join(self.dir, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
f = open(test_setup_py, 'w')
f.write(textwrap.dedent("""\
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
f.close()
foobar_path = os.path.join(self.dir, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
textwrap.dedent("""\
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
try:
with reset_setup_stop_context():
run_setup(test_setup_py, ['install'])
except SandboxViolation:
self.fail('Installation caused SandboxViolation')
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class TestSetupRequires(unittest.TestCase):
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with tempdir_context() as temp_install_dir:
with environment_context(PYTHONPATH=temp_install_dir):
ei_params = ['--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts', '--install-dir', temp_install_dir,
dist_file]
with reset_setup_stop_context():
with argv_context(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
self.assertRaises(SystemExit,
easy_install_pkg.main, ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
self.assertTrue(2 <= len(p_index.requests) <= 3)
self.assertEqual(p_index.requests[0].path, '/does-not-exist/')
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with tempdir_context() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
make_trivial_sdist(
dist_path,
textwrap.dedent("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""").lstrip())
yield dist_path
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
dist = tarfile.open(dist_path, 'w:gz')
try:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
finally:
dist.close()
@contextlib.contextmanager
def tempdir_context(cd=lambda dir:None):
temp_dir = tempfile.mkdtemp()
orig_dir = os.getcwd()
try:
cd(temp_dir)
yield temp_dir
finally:
cd(orig_dir)
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def environment_context(**updates):
old_env = os.environ.copy()
os.environ.update(updates)
try:
yield
finally:
for key in updates:
del os.environ[key]
os.environ.update(old_env)
@contextlib.contextmanager
def argv_context(repl):
old_argv = sys.argv[:]
sys.argv[:] = repl
yield
sys.argv[:] = old_argv
@contextlib.contextmanager
def reset_setup_stop_context():
"""
When the setuptools tests are run using setup.py test, and then
one wants to invoke another setup() command (such as easy_install)
within those tests, it's necessary to reset the global variable
in distutils.core so that the setup() command will run naturally.
"""
setup_stop_after = distutils.core._setup_stop_after
distutils.core._setup_stop_after = None
yield
distutils.core._setup_stop_after = setup_stop_after
|
apache-2.0
| 7,176,824,960,145,501,000 | -3,489,857,216,441,280,500 | 32.538071 | 81 | 0.583775 | false |
Scalr/pecha
|
scalrctl/commands/farm.py
|
2
|
6362
|
__author__ = 'Dmitriy Korsakov'
__doc__ = 'Farm management'
import json
import copy
from scalrctl import commands
from scalrctl import click
from scalrctl import request, settings
class FarmTerminate(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms terminate --farmId <ID> --force"
post_template = {
"terminateFarmRequest": {"force": True}
}
def get_options(self):
hlp = "It is used to terminate the Server immediately ignoring scalr.system.server_terminate_timeout."
force_terminate = click.Option(('--force', 'force'), is_flag=True, default=False, help=hlp)
options = [force_terminate, ]
options.extend(super(FarmTerminate, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
force = kwargs.pop("force", None)
post_data = copy.deepcopy(self.post_template)
post_data["terminateFarmRequest"]["force"] = force
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmTerminate, self).pre(*args, **kv)
return arguments, kw
class FarmLaunch(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms launch --farmId <ID>"
post_template = {}
def pre(self, *args, **kwargs):
"""
before request is made
"""
kv = {"import-data": {}}
kv.update(kwargs)
arguments, kw = super(FarmLaunch, self).pre(*args, **kv)
return arguments, kw
class FarmClone(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms clone --farmId <ID> --name MyNewFarm"
post_template = {
"cloneFarmRequest": {"name": ""}
}
def get_options(self):
hlp = "The name of a new Farm."
name = click.Option(('--name', 'name'), required=True, help=hlp)
options = [name, ]
options.extend(super(FarmClone, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
name = kwargs.pop("name", None)
post_data = copy.deepcopy(self.post_template)
post_data["cloneFarmRequest"]["name"] = name
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmClone, self).pre(*args, **kv)
return arguments, kw
class FarmSuspend(FarmLaunch):
epilog = "Example: scalr-ctl farms suspend --farmId <ID>"
post_template = {}
class FarmResume(FarmLaunch):
epilog = "Example: scalr-ctl farms resume --farmId <ID>"
post_template = {}
class FarmLock(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farm lock --farmId <ID> --comment <COMMENT> --unlock-permission <ANYONE|OWNER|TEAM>"
post_template = {
"lockFarmRequest": {"lockComment": "", "unlockPermission": "anyone"}
}
def get_options(self):
comment = click.Option(('--lockComment', 'comment'), default="", help="Comment to lock a Farm.")
hlp = "If you would like to prevent other users unlocking the Farm you should set 'owner' options.\
With 'team' options only members of the Farm's Teams can unlock this Farm.\
Default value 'anyone' means that anyone with access can unlock this Farm."
unlock_permission = click.Option((
'--unlockPermission', 'unlock_permission'),
default="anyone", show_default=True, help=hlp)
options = [comment, unlock_permission]
options.extend(super(FarmLock, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
comment = kwargs.pop("comment", None)
unlock_permission = kwargs.pop("unlock_permission", "anyone")
post_data = copy.deepcopy(self.post_template)
post_data["lockFarmRequest"]["lockComment"] = comment
post_data["lockFarmRequest"]["unlockPermission"] = unlock_permission
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmLock, self).pre(*args, **kv)
return arguments, kw
class FarmCreateFromTemplate(commands.Action):
def pre(self, *args, **kwargs):
"""
before request is made
"""
kwargs = self._apply_arguments(**kwargs)
stdin = kwargs.pop('stdin', None)
kwargs["FarmTemplate"] = self._read_object() if stdin else self._edit_example()
return args, kwargs
def run(self, *args, **kwargs):
"""
Callback for click subcommand.
"""
hide_output = kwargs.pop('hide_output', False) # [ST-88]
args, kwargs = self.pre(*args, **kwargs)
uri = self._request_template
payload = {}
data = {}
if '{envId}' in uri and not kwargs.get('envId') and settings.envId:
kwargs['envId'] = settings.envId
if kwargs:
# filtering in-body and empty params
uri = self._request_template.format(**kwargs)
for key, value in kwargs.items():
param = '{{{}}}'.format(key)
if value and (param not in self._request_template):
data.update(value)
if self.dry_run:
click.echo('{} {} {} {}'.format(self.http_method, uri,
payload, data))
# returns dummy response
return json.dumps({'data': {}, 'meta': {}})
data = json.dumps(data)
raw_response = request.request(self.http_method, self.api_level,
uri, payload, data)
response = self.post(raw_response)
text = self._format_response(response, hidden=hide_output, **kwargs)
if text is not None:
click.echo(text)
return response
def _edit_example(self):
commentary = \
'''# The body must be a valid FarmTemplate object.
#
# Type your FarmTemplate object below this line. The above text will not be sent to the API server.'''
text = click.edit(commentary)
if text:
raw_object = "".join([line for line in text.splitlines()
if not line.startswith("#")]).strip()
else:
raw_object = ""
return json.loads(raw_object)
|
apache-2.0
| -3,397,602,341,050,004,000 | -1,215,703,792,951,753,200 | 31.793814 | 117 | 0.584565 | false |
romeubertho/USP-IntroPython
|
django/learning_log/learning_log/learning_log/settings.py
|
1
|
3158
|
"""
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1m0oxdx$6bd^qj+si7&+sv38rg!1y^5=e^dsmd15=_27e(o!gy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Minhas aplicacoes
'learning_logs',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -2,199,174,246,038,705,700 | 5,229,728,319,286,833,000 | 24.885246 | 91 | 0.686827 | false |
zulip/django
|
tests/delete_regress/tests.py
|
182
|
13415
|
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS, models, transaction
from django.db.utils import ConnectionHandler
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Award, AwardNote, Book, Child, Eaten, Email, File, Food, FooFile,
FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
Researcher, Toy, Version,
)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
new_connections = ConnectionHandler(settings.DATABASES)
self.conn2 = new_connections[DEFAULT_DB_ALIAS]
self.conn2.set_autocommit(False)
def tearDown(self):
# Close down the second connection.
self.conn2.rollback()
self.conn2.close()
def test_concurrent_delete(self):
"""Concurrent deletes don't collide and lock the database (#9479)."""
with transaction.atomic():
Book.objects.create(id=1, pagecount=100)
Book.objects.create(id=2, pagecount=200)
Book.objects.create(id=3, pagecount=300)
with transaction.atomic():
# Start a transaction on the main connection.
self.assertEqual(3, Book.objects.count())
# Delete something using another database connection.
with self.conn2.cursor() as cursor2:
cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
self.conn2.commit()
# In the same transaction on the main connection, perform a
# queryset delete that covers the object deleted with the other
# connection. This causes an infinite loop under MySQL InnoDB
# unless we keep track of already deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
self.assertEqual(1, Book.objects.count())
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
AwardNote.objects.create(note='a peace prize',
award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints,
date=datetime.date.today())
PlayedWithNote.objects.create(played=played,
note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="[email protected]"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
Book.objects.create(pagecount=x + 100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
with self.assertRaises(TypeError):
Image.objects.values().delete()
with self.assertRaises(TypeError):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf': '1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class OrderedDeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
|
bsd-3-clause
| 6,881,175,808,583,520,000 | 630,638,318,367,748,100 | 36.472067 | 95 | 0.631308 | false |
ericpre/hyperspy
|
hyperspy/tests/samfire/test_samfire.py
|
2
|
18637
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import gc
import dill
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.samfire_utils.samfire_worker import create_worker
N_WORKERS = 1
class Mock_queue(object):
def __init__(self):
self.var = []
def put(self, value):
self.var.append(value)
def generate_test_model():
from hyperspy.signals import Signal1D
from hyperspy.components1d import Gaussian, Lorentzian
from scipy.ndimage import gaussian_filter
total = None
blurs = [1.5]
rnd = np.random.RandomState(17)
n_im = 400
radius = 5
domain = 15
# do circle/domain
cent = (domain // 2, domain // 2)
y, x = np.ogrid[-cent[0]:domain - cent[0], -cent[1]:domain - cent[1]]
mask = x * x + y * y <= radius * radius
lor_map = None
for blur in blurs:
s = Signal1D(np.ones((domain, domain, n_im)))
cent = tuple([int(0.5 * i) for i in s.data.shape[:-1]])
m0 = s.create_model()
gs01 = Lorentzian()
m0.append(gs01)
gs01.gamma.map['values'][:] = 50
gs01.gamma.map['is_set'][:] = True
gs01.centre.map['values'][:] = 300
gs01.centre.map['values'][mask] = 400
gs01.centre.map['values'] = gaussian_filter(
gs01.centre.map['values'],
blur)
gs01.centre.map['is_set'][:] = True
gs01.A.map['values'][:] = 100 * \
rnd.rand(domain, domain) + 300000
gs01.A.map['values'][mask] *= 0.75
gs01.A.map['values'] = gaussian_filter(gs01.A.map['values'], blur)
gs01.A.map['is_set'][:] = True
gs02 = Gaussian()
m0.append(gs02)
gs02.sigma.map['values'][:] = 15
gs02.sigma.map['is_set'][:] = True
gs02.centre.map['values'][:] = 400
gs02.centre.map['values'][mask] = 300
gs02.centre.map['values'] = gaussian_filter(
gs02.centre.map['values'],
blur)
gs02.centre.map['is_set'][:] = True
gs02.A.map['values'][:] = 50000
gs02.A.map['is_set'][:] = True
gs03 = Lorentzian()
m0.append(gs03)
gs03.gamma.map['values'][:] = 20
gs03.gamma.map['is_set'][:] = True
gs03.centre.map['values'][:] = 100
gs03.centre.map['values'][mask] = 900
gs03.centre.map['is_set'][:] = True
gs03.A.map['values'][:] = 100 * \
rnd.rand(domain, domain) + 50000
gs03.A.map['values'][mask] *= 0.
gs03.A.map['is_set'][:] = True
s11 = m0.as_signal()
if total is None:
total = s11.data.copy()
lor_map = gs01.centre.map['values'].copy()
else:
total = np.concatenate((total, s11.data), axis=1)
lor_map = np.concatenate(
(lor_map, gs01.centre.map['values'].copy()), axis=1)
s = Signal1D(total)
s.data = rnd.poisson(lam=s.data) + 0.1
s.change_dtype(np.float16)
s.estimate_poissonian_noise_variance()
m = s.inav[:, :7].create_model()
g = Gaussian()
l1 = Lorentzian()
l2 = Lorentzian()
g.sigma.value = 50
g.centre.value = 400
g.A.value = 50000
l1.gamma.value = 40
l1.centre.value = 300
l1.A.value = 300000
l2.gamma.value = 15
l2.centre.value = 100
l2.A.value = 50000
l2.centre.bmin = 0
l2.centre.bmax = 200
l2.A.bmin = 30000
l2.A.bmax = 100000
l2.gamma.bmin = 0
l2.gamma.bmax = 60
m.extend([g, l1, l2])
m.assign_current_values_to_all()
l2.active_is_multidimensional = True
return m, gs01, gs02, gs03
class TestSamfireEmpty:
def setup_method(self, method):
self.shape = (7, 15)
n_im = 50
s = hs.signals.Signal1D(np.ones(self.shape + (n_im,)) + 3.)
s.change_dtype(np.float16)
s.estimate_poissonian_noise_variance()
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Lorentzian())
m.append(hs.model.components1D.Lorentzian())
self.model = m
def teardown_method(self, method):
gc.collect()
def test_setup(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert samf.metadata._gt_dump is None
assert samf.pool is None
samf._setup(ipyparallel=False)
assert samf.metadata._gt_dump is not None
assert samf.pool is not None
samf.stop()
del samf
def test_samfire_init_marker(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
np.testing.assert_array_almost_equal(samf.metadata.marker,
np.zeros(self.shape))
samf.stop()
del samf
def test_samfire_init_model(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert samf.model is m
samf.stop()
del samf
def test_samfire_init_metadata(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert isinstance(samf.metadata, DictionaryTreeBrowser)
samf.stop()
del samf
def test_samfire_init_strategy_list(self):
from hyperspy.samfire import StrategyList
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert isinstance(samf.strategies, StrategyList)
def test_samfire_init_strategies(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
from hyperspy.samfire_utils.local_strategies import ReducedChiSquaredStrategy
from hyperspy.samfire_utils.global_strategies import HistogramStrategy
assert isinstance(samf.strategies[0],
ReducedChiSquaredStrategy)
assert isinstance(samf.strategies[1], HistogramStrategy)
samf.stop()
del samf
def test_samfire_init_fig(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
assert samf._figure is None
samf.stop()
del samf
def test_samfire_init_default(self):
m = self.model
from multiprocessing import cpu_count
samf = m.create_samfire(setup=False)
assert samf._workers == cpu_count() - 1
np.testing.assert_allclose(samf.metadata.marker, np.zeros(self.shape))
samf.stop()
del samf
def test_optional_components(self):
m = self.model
m[-1].active_is_multidimensional = False
samf = m.create_samfire(workers=N_WORKERS, setup=False)
samf.optional_components = [m[0], 1]
samf._enable_optional_components()
assert m[0].active_is_multidimensional
assert m[1].active_is_multidimensional
assert np.all([isinstance(a, int)
for a in samf.optional_components])
np.testing.assert_equal(samf.optional_components, [0, 1])
samf.stop()
del samf
def test_swap_dict_and_model(self):
m = self.model
for i in range(len(m)):
for ip, p in enumerate(m[i].parameters):
p.map['values'][0, 0] = 3.0 + i + ip
p.map['std'][0, 0] = 2.44 + i + ip
p.map['is_set'][0, 0] = True
m[1].active_is_multidimensional = True
m[1]._active_array[0, 0] = False
assert m[1]._active_array[1, 0]
m.chisq.data[0, 0] = 1200.
m.dof.data[0, 0] = 1.
small_m = m.inav[0, 0]
d = {'chisq.data': np.array(small_m.chisq.data[0]),
'dof.data': np.array(small_m.dof.data[0]),
'components': {component.name: {parameter.name: parameter.map for
parameter in component.parameters}
for component in small_m if component.active}
}
d = copy.deepcopy(d)
samf = m.create_samfire(workers=N_WORKERS, setup=False)
samf._swap_dict_and_model((1, 0), d)
assert m.chisq.data[1, 0] == 1200.
assert m.dof.data[1, 0] == 1.
assert d['dof.data'] == 0.
assert np.isnan(d['chisq.data'])
assert np.all(~m[1]._active_array[:2, 0])
for c in m:
if c.active:
for p in c.parameters:
assert (
p.map['values'][
0, 0] == p.map['values'][
1, 0])
assert p.map['std'][0, 0] == p.map['std'][1, 0]
assert (
p.map['is_set'][
0, 0] == p.map['is_set'][
1, 0])
samf.stop()
del samf
def test_next_pixels(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
ans = samf._next_pixels(3)
assert len(ans) == 0
ind_list = [(1, 2), (0, 1), (3, 3), (4, 6)]
for ind in ind_list:
samf.metadata.marker[ind] += 2.
ans = samf._next_pixels(10)
assert len(ans) == 4
for ind in ans:
assert ind in ind_list
for n, ind in enumerate(ind_list):
samf.metadata.marker[ind] += n
ans = samf._next_pixels(10)
assert ans == [(4, 6), ]
samf.stop()
del samf
def test_change_strategy(self):
m = self.model
samf = m.create_samfire(workers=N_WORKERS, setup=False)
from hyperspy.samfire_utils.local_strategies import ReducedChiSquaredStrategy
from hyperspy.samfire_utils.global_strategies import HistogramStrategy
ind = (0, 0)
samf.metadata.marker[ind] = -2
samf.strategies.append(ReducedChiSquaredStrategy())
samf.change_strategy(2)
assert samf.metadata.marker[ind] == -1
assert samf._active_strategy_ind == 2
samf.change_strategy(samf.strategies[1])
assert samf._active_strategy_ind == 1
assert samf.metadata.marker[ind] == -2
new_strat = HistogramStrategy()
samf.strategies.append(new_strat)
samf.change_strategy(3)
assert samf._active_strategy_ind == 3
assert samf.active_strategy is new_strat
assert samf.metadata.marker[ind] == -2
samf.stop()
del samf
@pytest.mark.xfail(reason="Sometimes the number of failed pixels > 3 when using multiprocessing. Unknown reason")
def test_multiprocessed():
"""This test uses multiprocessing.pool rather than ipyparallel"""
model, lor1, g, lor2 = generate_test_model()
shape = (7, 15)
model.fit()
samf = model.create_samfire(workers=N_WORKERS, ipyparallel=False)
samf.plot_every = np.nan
samf.strategies[0].radii = 1.
samf.strategies.remove(1)
samf.optional_components = [model[2]]
samf.start(bounded=True)
# let at most 3 pixels to fail randomly.
fitmask = samf.metadata.marker == -np.ones(shape)
print('number of pixels failed: {}'.format(
np.prod(shape) - np.sum(fitmask)))
assert np.sum(fitmask) >= np.prod(shape) - 5
for o_c, n_c in zip([g, lor1, lor2], model):
for p, p1 in zip(o_c.parameters, n_c.parameters):
if n_c._active_array is not None:
mask = np.logical_and(n_c._active_array, fitmask)
else:
mask = fitmask
np.testing.assert_allclose(
p1.map['values'][mask],
p.map['values'][:7, :15][mask],
rtol=0.3)
samf.stop()
del samf
gc.collect()
def test_create_worker_defaults():
worker = create_worker('worker')
assert worker.identity == 'worker'
assert worker.shared_queue is None
assert worker.result_queue is None
assert worker.individual_queue is None
np.testing.assert_equal(worker.best_AICc, np.inf)
np.testing.assert_equal(worker.best_values, [])
np.testing.assert_equal(worker.best_dof, np.inf)
np.testing.assert_equal(worker.last_time, 1)
class TestSamfireWorker:
def setup_method(self, method):
np.random.seed(17)
ax = np.arange(250)
self.widths = [5, 10, 15]
self.centres = [50, 105, 180]
self.areas = [5000, 10000, 20000]
g = hs.model.components1D.Gaussian()
g.sigma.value = self.widths[0]
g.A.value = self.areas[0]
l = hs.model.components1D.Lorentzian()
l.gamma.value = self.widths[1]
l.A.value = self.areas[1]
l1 = hs.model.components1D.Lorentzian()
l1.gamma.value = self.widths[2]
l1.A.value = self.areas[2]
d = g.function(ax - self.centres[0]) + \
l.function(ax - self.centres[1]) + \
l1.function(ax - self.centres[2])
s = hs.signals.Signal1D(np.array([d, d]))
s.add_poissonian_noise()
s.change_dtype(np.float16)
s.metadata.Signal.set_item("Noise_properties.variance",
s.deepcopy() + 1.)
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g1'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l1'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l2'
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g2'
m.append(hs.model.components1D.Gaussian())
m[-1].name = 'g3'
m.append(hs.model.components1D.Lorentzian())
m[-1].name = 'l3'
for c in m:
c.active_is_multidimensional = True
vals = {'g1': {},
'g2': {},
'g3': {},
'l1': {},
'l2': {},
'l3': {},
}
vals['g1']['centre'] = [50, 150]
vals['g1']['sigma'] = [5]
vals['g1']['A'] = [10000]
vals['l1']['centre'] = [43]
vals['l1']['gamma'] = [25]
vals['l1']['A'] = [10000]
vals['l2']['centre'] = [125]
vals['l2']['gamma'] = [8]
vals['l2']['A'] = [10000]
vals['g2']['centre'] = [105]
vals['g2']['sigma'] = [20]
vals['g2']['A'] = [10000]
vals['l3']['centre'] = [185]
vals['l3']['gamma'] = [11]
vals['l3']['A'] = [10000]
vals['g3']['centre'] = [175]
vals['g3']['sigma'] = [12]
vals['g3']['A'] = [10000]
self.vals = vals
self.model = m
self.q = Mock_queue()
self.ind = (1,)
self.args = {}
self.model_letter = 'sldkfjg'
from hyperspy.samfire_utils.fit_tests import red_chisq_test as rct
self._gt_dump = dill.dumps(rct(tolerance=1.0))
m_slice = m.inav[self.ind[::-1]]
m_slice.store(self.model_letter)
m_dict = m_slice.signal._to_dictionary(False)
m_dict['models'] = m_slice.signal.models._models.as_dictionary()
self.model_dictionary = m_dict
self.optional_comps = [1, 2, 3, 4, 5]
def teardown_method(self, method):
gc.collect()
def test_add_model(self):
worker = create_worker('worker')
worker.create_model(self.model_dictionary, self.model_letter)
from hyperspy.model import BaseModel
assert isinstance(worker.model, BaseModel)
for component in worker.model:
assert not component.active_is_multidimensional
assert component.active
del worker
def test_main_result(self):
worker = create_worker('worker')
worker.create_model(self.model_dictionary, self.model_letter)
worker.setup_test(self._gt_dump)
worker.set_optional_names({self.model[comp].name for comp in
self.optional_comps})
self.vals.update({
'signal.data': self.model.signal(),
'fitting_kwargs': {},
'variance.data':
self.model.signal.metadata.Signal.Noise_properties.variance()
})
keyword, (_id, _ind, result, found_solution) = \
worker.run_pixel(self.ind, self.vals)
assert _id == 'worker'
assert _ind == self.ind
assert found_solution
assert result['dof.data'][()] == 9
lor_components = [key for key in result['components'].keys() if
key.find('l') == 0]
assert len(result['components']) == 3
assert len(lor_components) == 2
gauss_name = list(set(result['components'].keys()) -
set(lor_components))[0]
gauss = result['components'][gauss_name]
np.testing.assert_allclose(gauss['A'][0]['values'], self.areas[0],
rtol=0.05)
np.testing.assert_allclose(gauss['sigma'][0]['values'], self.widths[0],
rtol=0.05)
np.testing.assert_allclose(gauss['centre'][0]['values'],
self.centres[0], rtol=0.05)
lor1 = result['components'][lor_components[0]]
lor1_values = tuple(lor1[par][0]['values'] for par in ['A', 'gamma',
'centre'])
lor2 = result['components'][lor_components[1]]
lor2_values = tuple(lor2[par][0]['values'] for par in ['A', 'gamma',
'centre'])
possible_values1 = (self.areas[1], self.widths[1], self.centres[1])
possible_values2 = (self.areas[2], self.widths[2], self.centres[2])
assert (np.allclose(lor1_values, possible_values1, rtol=0.05)
or
np.allclose(lor1_values, possible_values2, rtol=0.05))
assert (np.allclose(lor2_values, possible_values1, rtol=0.05)
or
np.allclose(lor2_values, possible_values2, rtol=0.05))
del worker
|
gpl-3.0
| -1,183,069,035,595,038,000 | -2,451,749,077,185,354,000 | 33.009124 | 113 | 0.555722 | false |
spr/album-sound-check
|
mutagen/ogg.py
|
2
|
17715
|
# Copyright 2006 Joe Wreschnig <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 3975 2007-01-13 21:51:17Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = file(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = file(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = file(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
|
gpl-2.0
| -2,378,062,474,908,291,000 | 7,782,431,786,394,868,000 | 34.572289 | 79 | 0.570082 | false |
elthariel/dff
|
api/gui/dialog/applymodule.py
|
1
|
12581
|
# DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Francois Percot <[email protected]>
#
from types import *
from PyQt4.QtGui import QAbstractItemView, QApplication, QCheckBox, QDialog, QGridLayout, QLabel, QMessageBox,QSplitter, QTableWidget, QTableWidgetItem, QVBoxLayout, QWidget
from PyQt4.QtCore import Qt, QObject, QRect, QSize, SIGNAL
# CORE
from api.loader import *
from api.env import *
from api.vfs import *
from api.taskmanager.taskmanager import *
from api.type import *
from api.gui.box.nodecombobox import NodeComboBox
from api.gui.box.stringcombobox import StringComboBox
from api.gui.box.boolcombobox import BoolComboBox
from api.gui.box.checkbox import CheckBoxWidgetEnable
from api.gui.button.pushbutton import PushButton
from api.gui.dialog.uiapplymodule import UiApplyModule
from api.gui.widget.applymoduletable import ApplyModuleTable
from ui.gui.utils.utils import DFF_Utils
class ApplyModule(QDialog, UiApplyModule):
def __init__(self, mainWindow):
QDialog.__init__(self, mainWindow)
UiApplyModule.__init__(self)
self.setupUi(self)
self.__mainWindow = mainWindow
self.loader = loader.loader()
self.env = env.env()
self.vfs = vfs.vfs()
self.initDialog()
self.initCallback()
def initDialog(self):
self.initArguments()
self.vlayout = QVBoxLayout(self)
self.vlayout.addWidget(self.label)
self.tableModules = ApplyModuleTable(self)
self.splitter = QSplitter(Qt.Vertical, self)
self.splitter.addWidget(self.tableModules)
self.splitter.addWidget(self.argumentsContainer)
self.vlayout.addWidget(self.splitter)
self.vlayout.addWidget(self.buttonBox)
def initCallback(self):
self.connect(self.tableModules, SIGNAL("currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)"), self.currentItemChanged)
self.connect(self.buttonBox,SIGNAL("accepted()"), self.validateModule)
#self.connect(self.tableModules, SIGNAL("itemChanged(QTableWidgetItem *)"), self.itemChanged)
def currentItemChanged(self, itemCurrent, itemPrevious):
if itemCurrent :
if (itemPrevious and itemCurrent.row() <> itemPrevious.row()) or not itemPrevious:
if itemCurrent.column() == 1 :
itemType = itemCurrent
itemCurrent = self.tableModules.item(itemCurrent.row(), 0)
else :
itemType = self.tableModules.item(itemCurrent.row(), 1)
self.reloadAllArguments(str(itemCurrent.text()), str(itemType.text()))
self.tableModules.resizeTableModules()
self.tableModules.scrollToItem(itemCurrent)
def validateModule(self):
errorArg = []
for i in self.valueArgs :
if not i.optional :
if i.type == "node" :
node = self.valueArgs[i].currentNode()
if node is None :
errorArg.append(i)
else :
value = str(self.valueArgs[i].currentText())
if value == "" :
errorArg.append(i)
if len(errorArg) > 0 :
QMessageBox.warning(self, QApplication.translate("ApplyModule", "Missing Arguments", None, QApplication.UnicodeUTF8), QApplication.translate("ApplyModule", "There are missing arguments.", None, QApplication.UnicodeUTF8))
else :
self.accept()
def initAllInformations(self, nameModule, typeModule, nodesSelected):
self.__nodesSelected = nodesSelected
self.deleteAllArguments()
self.deleteList()
self.fillListModules()
if nameModule <> None :
self.loadOneItem(nameModule, typeModule)
else :
self.deleteAllArguments()
self.tableModules.setColumnWidth(0, 333)
self.tableModules.setColumnWidth(1, 43)
###### MANAGE QTABLEWIDGET ######
def deleteList(self):
self.tableModules.clearContents()
for i in range(0, self.tableModules.rowCount()) :
self.tableModules.removeRow(0)
def fillListModules(self):
modules = self.loader.modules
self.tableModules.setSortingEnabled(False)
row = self.tableModules.rowCount()
self.tableModules.setRowCount(row + len(modules))
for mod in modules :
#if str(script) <> "opendump" and type(script) == StringType :
item = QTableWidgetItem(str(mod))
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
item2 = QTableWidgetItem(modules[mod].tags)
item2.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.tableModules.setItem(row, 0, item)
self.tableModules.setItem(row, 1, item2)
row = row + 1
def selectedItem(self, nameModule):
for i in range(0, self.tableModules.rowCount()) :
item = self.tableModules.item(i, 0)
if (item.text() == nameModule) :
self.tableModules.setCurrentItem(item)
return
def loadOneItem(self, nameModule, typeModule):
self.tableModules.setRowCount(1)
item = QTableWidgetItem(str(nameModule))
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
item2 = QTableWidgetItem(str(typeModule))
item2.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.tableModules.setItem(0, 0, item)
self.tableModules.setItem(0, 1, item2)
self.tableModules.setCurrentItem(item)
####### MANAGE ARGUMENTS
def initArguments(self):
self.argumentsContainer = QWidget(self)
self.gridArgs = QGridLayout(self.argumentsContainer)
self.labelArgs = {}
self.valueArgs = {}
self.checkBoxArgs = {}
self.hboxArgs = {}
self.browserButtons = {}
def deleteAllArguments(self):
if self.argumentsContainer <> None :
listarg = self.argumentsContainer.children()
for i in self.labelArgs :
self.gridArgs.removeWidget(self.labelArgs[i])
#self.labelArgs[i].hide()
self.labelArgs[i].deleteLater()
if self.valueArgs[i] <> None :
self.gridArgs.removeWidget(self.valueArgs[i])
#self.valueArgs[i].hide()
self.valueArgs[i].deleteLater()
if self.browserButtons[i] != None :
self.gridArgs.removeWidget(self.browserButtons[i])
#self.browserButtons[i].hide()
self.browserButtons[i].deleteLater()
if self.checkBoxArgs[i] != None :
self.gridArgs.removeWidget(self.checkBoxArgs[i])
#self.checkBoxArgs[i].hide()
self.checkBoxArgs[i].deleteLater()
self.valueArgs.clear()
self.labelArgs.clear()
self.browserButtons.clear()
self.checkBoxArgs.clear()
# if self.argumentsContainer <> None :
# listarg = self.argumentsContainer.children()
# self.argumentsContainer.destroy(True, True)
# self.argumentsContainer = None
def reloadAllArguments(self, nameModule, type):
self.deleteAllArguments()
if self.argumentsContainer == None :
self.argumentsContainer = QWidget(self)
iterator = 0
args = DFF_Utils.getArgs(nameModule)
vars_db = self.env.vars_db
for arg in args:
label = QLabel(arg.name + " ( "+ str(arg.type) + " ) " + ":", self.argumentsContainer)
label.setMinimumSize(QSize(80, 28))
label.setMaximumSize(QSize(120, 28))
list = self.env.getValuesInDb(arg.name, arg.type)
if arg.type == "node" :
value = NodeComboBox(self.argumentsContainer)
for i in range(0, len(list)) :
value.addPath(list[i])
button = PushButton(self.argumentsContainer, value, arg.name, self.__mainWindow.QSelectNodes , self.__mainWindow.dockNodeTree.treeItemModel.rootItemVFS.node)
currentItem = self.__mainWindow.dockNodeTree.treeView.getCurrentItem()
value.addPath(currentItem.node)
if self.__nodesSelected :
list = self.__nodesSelected
for i in range(0, len(self.__nodesSelected)):
value.addPath(self.__nodesSelected[i])
elif arg.type == "int":
value = StringComboBox(self.argumentsContainer)
value.setEditable(True)
for i in range(0, len(list)) :
value.addPath(str(list[i]))
button = None
elif arg.type == "string":
value = StringComboBox(self.argumentsContainer)
value.setEditable(True)
for i in range(0, len(list)) :
value.addPath(list[i])
button = None
elif arg.type == "path" :
value = StringComboBox(self.argumentsContainer)
value.setEditable(True)
for i in range(0, len(list)) :
value.addPath(list[i])
button = PushButton(self.argumentsContainer, value, arg.name)
elif arg.type == "bool" :
value = BoolComboBox(self.argumentsContainer)
button = None
if arg.optional :
checkBox = CheckBoxWidgetEnable(self.argumentsContainer, label, value, button)
else :
checkBox = None
self.gridArgs.addWidget(label, iterator, 0)
if value != None :
self.gridArgs.addWidget(value, iterator, 1)
if button != None:
self.gridArgs.addWidget(button, iterator, 2)
if checkBox != None :
self.gridArgs.addWidget(checkBox, iterator, 3)
value.setCurrentIndex(value.count() - 1)
self.labelArgs[arg] = label
self.valueArgs[arg] = value
self.checkBoxArgs[arg] = checkBox
self.browserButtons[arg] = button
iterator = iterator + 1
def currentType(self):
item = self.tableModules.currentItem()
if item.column() == 0 :
item = self.tableModules.item(item.row() , 1)
return str(item.text())
def currentModuleName(self):
item = self.tableModules.currentItem()
if item.column() == 1 :
item = self.tableModules.item(item.row(), 0)
return str(item.text())
# get Arguments
def getDFFArguments(self):
self.arg = self.env.libenv.argument("gui_input")
self.arg.thisown = 0
for i in self.valueArgs :
if i.type == "node" :
self.arg.add_node(str(i.name), self.valueArgs[i].currentNode())
# print DFF_Utils.getPath(self.valueArgs[i].currentNode())
else :
value = str(self.valueArgs[i].currentText())
if i.type == "path" :
tmp = libtype.Path(str(value))
tmp.thisown = 0
self.arg.add_path(str(i.name), tmp)
elif i.type == "int" :
self.arg.add_int(str(i.name), int(value))
elif i.type == "string" :
self.arg.add_string(str(i.name), value)
elif i.type == "bool" :
if value == "True" :
value = 1
else :
value = 0
self.arg.add_bool(str(i.name), int(value))
self.taskmanager = TaskManager()
modules = self.currentModuleName()
self.taskmanager.add(str(modules), self.arg, ["thread", "gui"])
return self.arg
|
gpl-2.0
| -41,241,805,569,301,700 | -7,312,400,061,368,240,000 | 40.114379 | 232 | 0.581591 | false |
jmarsik/mopidy
|
mopidy/http/handlers.py
|
1
|
7595
|
from __future__ import absolute_import, unicode_literals
import functools
import logging
import os
import socket
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.websocket
import mopidy
from mopidy import core, models
from mopidy.internal import encoding, jsonrpc
logger = logging.getLogger(__name__)
def make_mopidy_app_factory(apps, statics):
def mopidy_app_factory(config, core):
return [
(r'/ws/?', WebSocketHandler, {
'core': core,
}),
(r'/rpc', JsonRpcHandler, {
'core': core,
}),
(r'/(.+)', StaticFileHandler, {
'path': os.path.join(os.path.dirname(__file__), 'data'),
}),
(r'/', ClientListHandler, {
'apps': apps,
'statics': statics,
}),
]
return mopidy_app_factory
def make_jsonrpc_wrapper(core_actor):
objects={
'core.get_uri_schemes': core.Core.get_uri_schemes,
'core.get_version': core.Core.get_version,
'core.history': core.HistoryController,
'core.library': core.LibraryController,
'core.mixer': core.MixerController,
'core.playback': core.PlaybackController,
'core.playlists': core.PlaylistsController,
'core.tracklist': core.TracklistController,
'core.service': core.ServiceController,
}
services = core_actor.get_public_service_classes().get()
for t in services.keys():
objects[t] = services[t]
inspector = jsonrpc.JsonRpcInspector(objects)
objects={
'core.describe': inspector.describe,
'core.get_uri_schemes': core_actor.get_uri_schemes,
'core.get_version': core_actor.get_version,
'core.history': core_actor.history,
'core.library': core_actor.library,
'core.mixer': core_actor.mixer,
'core.playback': core_actor.playback,
'core.playlists': core_actor.playlists,
'core.tracklist': core_actor.tracklist,
'core.service': core_actor.service,
}
services = core_actor.get_public_services().get()
for t in services.keys():
objects[t] = services[t]
return jsonrpc.JsonRpcWrapper(
objects,
decoders=[models.model_json_decoder],
encoders=[models.ModelJSONEncoder]
)
def _send_broadcast(client, msg):
# We could check for client.ws_connection, but we don't really
# care why the broadcast failed, we just want the rest of them
# to succeed, so catch everything.
try:
client.write_message(msg)
except Exception as e:
error_msg = encoding.locale_decode(e)
logger.debug('Broadcast of WebSocket message to %s failed: %s',
client.request.remote_ip, error_msg)
# TODO: should this do the same cleanup as the on_message code?
class WebSocketHandler(tornado.websocket.WebSocketHandler):
# XXX This set is shared by all WebSocketHandler objects. This isn't
# optimal, but there's currently no use case for having more than one of
# these anyway.
clients = set()
@classmethod
def broadcast(cls, msg):
if hasattr(tornado.ioloop.IOLoop, 'current'):
loop = tornado.ioloop.IOLoop.current()
else:
loop = tornado.ioloop.IOLoop.instance() # Fallback for pre 3.0
# This can be called from outside the Tornado ioloop, so we need to
# safely cross the thread boundary by adding a callback to the loop.
for client in cls.clients:
# One callback per client to keep time we hold up the loop short
# NOTE: Pre 3.0 does not support *args or **kwargs...
loop.add_callback(functools.partial(_send_broadcast, client, msg))
def initialize(self, core):
self.jsonrpc = make_jsonrpc_wrapper(core)
def open(self):
if hasattr(self, 'set_nodelay'):
# New in Tornado 3.1
self.set_nodelay(True)
else:
self.stream.socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.clients.add(self)
logger.debug(
'New WebSocket connection from %s', self.request.remote_ip)
def on_close(self):
self.clients.discard(self)
logger.debug(
'Closed WebSocket connection from %s',
self.request.remote_ip)
def on_message(self, message):
if not message:
return
logger.debug(
'Received WebSocket message from %s: %r',
self.request.remote_ip, message)
try:
response = self.jsonrpc.handle_json(
tornado.escape.native_str(message))
if response and self.write_message(response):
logger.debug(
'Sent WebSocket message to %s: %r',
self.request.remote_ip, response)
except Exception as e:
error_msg = encoding.locale_decode(e)
logger.error('WebSocket request error: %s', error_msg)
if self.ws_connection:
# Tornado 3.2+ checks if self.ws_connection is None before
# using it, but not older versions.
self.close()
def check_origin(self, origin):
# Allow cross-origin WebSocket connections, like Tornado before 4.0
# defaulted to.
return True
def set_mopidy_headers(request_handler):
request_handler.set_header('Cache-Control', 'no-cache')
request_handler.set_header(
'X-Mopidy-Version', mopidy.__version__.encode('utf-8'))
class JsonRpcHandler(tornado.web.RequestHandler):
def initialize(self, core):
self.jsonrpc = make_jsonrpc_wrapper(core)
def head(self):
self.set_extra_headers()
self.finish()
def post(self):
data = self.request.body
if not data:
return
logger.debug(
'Received RPC message from %s: %r', self.request.remote_ip, data)
try:
self.set_extra_headers()
response = self.jsonrpc.handle_json(
tornado.escape.native_str(data))
if response and self.write(response):
logger.debug(
'Sent RPC message to %s: %r',
self.request.remote_ip, response)
except Exception as e:
logger.error('HTTP JSON-RPC request error: %s', e)
self.write_error(500)
def set_extra_headers(self):
set_mopidy_headers(self)
self.set_header('Accept', 'application/json')
self.set_header('Content-Type', 'application/json; utf-8')
class ClientListHandler(tornado.web.RequestHandler):
def initialize(self, apps, statics):
self.apps = apps
self.statics = statics
def get_template_path(self):
return os.path.dirname(__file__)
def get(self):
set_mopidy_headers(self)
names = set()
for app in self.apps:
names.add(app['name'])
for static in self.statics:
names.add(static['name'])
names.discard('mopidy')
self.render('data/clients.html', apps=sorted(list(names)))
class StaticFileHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
set_mopidy_headers(self)
class AddSlashHandler(tornado.web.RequestHandler):
@tornado.web.addslash
def prepare(self):
return super(AddSlashHandler, self).prepare()
|
apache-2.0
| 3,481,829,549,122,246,700 | 3,109,975,946,035,364,400 | 31.182203 | 78 | 0.59684 | false |
tannoa2/RackHD
|
test/tests/rackhd20/test_rackhd20_api_tags.py
|
13
|
2375
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Local methods
MON_NODES = fit_common.node_select()
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_tags(fit_common.unittest.TestCase):
def test_api_20_nodes_ID_tags(self):
# iterate through nodes
for nodeid in MON_NODES:
#add tag
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid + "/tags", action="patch", payload={"tags":["test_tag_" + nodeid]})
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
#check tag
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid + "/tags")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
self.assertIn("test_tag_" + nodeid, fit_common.json.dumps(api_data['json']), "Tag not set:" + fit_common.json.dumps(api_data['json']))
def test_api_20_tags_post_delete(self):
# create dummy node
data_payload = {"name": "testnode", "identifiers": ["FF", "FF"], "type": "compute"}
nodeid = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=data_payload)['json']['id']
# add tags
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid + "/tags", action="patch", payload={"tags":["test_node","dummy_node"]})
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# check tags
api_data = fit_common.rackhdapi("/api/2.0/tags/test_node/nodes")
self.assertIn("test_node", fit_common.json.dumps(api_data['json']), "Tag not set:" + fit_common.json.dumps(api_data['json']))
self.assertIn("dummy_node", fit_common.json.dumps(api_data['json']), "Tag not set:" + fit_common.json.dumps(api_data['json']))
# delete node
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + nodeid, action="delete")
self.assertEqual(api_data['status'], 204, 'Incorrect HTTP return code, expected 204, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
|
apache-2.0
| 3,799,985,091,683,075,000 | -6,462,053,736,677,675,000 | 48.479167 | 146 | 0.632421 | false |
EdLogan18/logan-repository
|
plugin.video.igorlista/mechanize/_html.py
|
132
|
20888
|
"""HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import codecs
import copy
import htmlentitydefs
import re
import _sgmllib_copy as sgmllib
import _beautifulsoup
import _form
from _headersutil import split_header_words, is_html as _is_html
import _request
import _rfc3986
DEFAULT_ENCODING = "latin-1"
COMPRESS_RE = re.compile(r"\s+")
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
encoding = v
try:
codecs.lookup(v)
except LookupError:
continue
else:
return encoding
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
class Args(object):
# idea for this argument-processing trick is from Peter Otten
def __init__(self, args_map):
self.__dict__["dictionary"] = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def __setattr__(self, key, value):
if key == "dictionary":
raise AttributeError()
self.dictionary[key] = value
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for e.g.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ParseResponse argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
self.select_default = select_default
if form_parser_class is None:
form_parser_class = _form.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
encoding = self.encoding
forms = _form.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def _get_title_text(self, parser):
import _pullparser
text = []
tok = None
while 1:
try:
tok = parser.get_token()
except _pullparser.NoMoreTokensError:
break
if tok.type == "data":
text.append(str(tok))
elif tok.type == "entityref":
t = unescape("&%s;" % tok.data,
parser._entitydefs, parser.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, parser.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type == "endtag" and tag_name == "title":
break
text.append(str(tok))
return COMPRESS_RE.sub(" ", "".join(text).strip())
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
bs = self._bs
base_url = self._base_url
encoding = self._encoding
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = _form.RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
inner_html = "".join([str(node) for node in title.contents])
return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set request class (mechanize.Request by default).
HTMLForm instances returned by .forms() will return instances of this
class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by mechanize.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
|
gpl-2.0
| -6,200,317,866,469,559,000 | -6,012,131,414,668,727,000 | 32.208267 | 78 | 0.548209 | false |
PoornimaNayak/autotest-client-tests
|
linux-tools/perl_IO_Socket_SSL/perl_IO_Socket_SSL.py
|
4
|
1280
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class perl_IO_Socket_SSL(test.test):
"""
Autotest module for testing basic functionality
of perl_IO_Socket_SSL
@author Athira Rajeev <[email protected]> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./perl-IO-Socket-SSL.sh'], cwd="%s/perl_IO_Socket_SSL" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
gpl-2.0
| -5,180,094,417,138,333,000 | -7,172,457,797,744,376,000 | 25.666667 | 109 | 0.552344 | false |
HackerTool/vivisect
|
vstruct/defs/windows/win_5_1_i386/ntoskrnl.py
|
7
|
243293
|
# Version: 5.1
# Architecture: i386
import vstruct
from vstruct.primitives import *
POLICY_AUDIT_EVENT_TYPE = v_enum()
POLICY_AUDIT_EVENT_TYPE.AuditCategorySystem = 0
POLICY_AUDIT_EVENT_TYPE.AuditCategoryLogon = 1
POLICY_AUDIT_EVENT_TYPE.AuditCategoryObjectAccess = 2
POLICY_AUDIT_EVENT_TYPE.AuditCategoryPrivilegeUse = 3
POLICY_AUDIT_EVENT_TYPE.AuditCategoryDetailedTracking = 4
POLICY_AUDIT_EVENT_TYPE.AuditCategoryPolicyChange = 5
POLICY_AUDIT_EVENT_TYPE.AuditCategoryAccountManagement = 6
POLICY_AUDIT_EVENT_TYPE.AuditCategoryDirectoryServiceAccess = 7
POLICY_AUDIT_EVENT_TYPE.AuditCategoryAccountLogon = 8
KINTERRUPT_MODE = v_enum()
KINTERRUPT_MODE.LevelSensitive = 0
KINTERRUPT_MODE.Latched = 1
ARBITER_REQUEST_SOURCE = v_enum()
ARBITER_REQUEST_SOURCE.ArbiterRequestUndefined = -1
ARBITER_REQUEST_SOURCE.ArbiterRequestLegacyReported = 0
ARBITER_REQUEST_SOURCE.ArbiterRequestHalReported = 1
ARBITER_REQUEST_SOURCE.ArbiterRequestLegacyAssigned = 2
ARBITER_REQUEST_SOURCE.ArbiterRequestPnpDetected = 3
ARBITER_REQUEST_SOURCE.ArbiterRequestPnpEnumerated = 4
DEVICE_RELATION_TYPE = v_enum()
DEVICE_RELATION_TYPE.BusRelations = 0
DEVICE_RELATION_TYPE.EjectionRelations = 1
DEVICE_RELATION_TYPE.PowerRelations = 2
DEVICE_RELATION_TYPE.RemovalRelations = 3
DEVICE_RELATION_TYPE.TargetDeviceRelation = 4
DEVICE_RELATION_TYPE.SingleBusRelations = 5
IO_ALLOCATION_ACTION = v_enum()
IO_ALLOCATION_ACTION.KeepObject = 1
IO_ALLOCATION_ACTION.DeallocateObject = 2
IO_ALLOCATION_ACTION.DeallocateObjectKeepRegisters = 3
BUS_QUERY_ID_TYPE = v_enum()
BUS_QUERY_ID_TYPE.BusQueryDeviceID = 0
BUS_QUERY_ID_TYPE.BusQueryHardwareIDs = 1
BUS_QUERY_ID_TYPE.BusQueryCompatibleIDs = 2
BUS_QUERY_ID_TYPE.BusQueryInstanceID = 3
BUS_QUERY_ID_TYPE.BusQueryDeviceSerialNumber = 4
MMSYSTEM_PTE_POOL_TYPE = v_enum()
MMSYSTEM_PTE_POOL_TYPE.SystemPteSpace = 0
MMSYSTEM_PTE_POOL_TYPE.NonPagedPoolExpansion = 1
MMSYSTEM_PTE_POOL_TYPE.MaximumPtePoolTypes = 2
POP_POLICY_DEVICE_TYPE = v_enum()
POP_POLICY_DEVICE_TYPE.PolicyDeviceSystemButton = 0
POP_POLICY_DEVICE_TYPE.PolicyDeviceThermalZone = 1
POP_POLICY_DEVICE_TYPE.PolicyDeviceBattery = 2
POP_POLICY_DEVICE_TYPE.PolicyInitiatePowerActionAPI = 3
POP_POLICY_DEVICE_TYPE.PolicySetPowerStateAPI = 4
POP_POLICY_DEVICE_TYPE.PolicyImmediateDozeS4 = 5
POP_POLICY_DEVICE_TYPE.PolicySystemIdle = 6
MEMORY_CACHING_TYPE = v_enum()
MEMORY_CACHING_TYPE.MmNonCached = 0
MEMORY_CACHING_TYPE.MmCached = 1
MEMORY_CACHING_TYPE.MmWriteCombined = 2
MEMORY_CACHING_TYPE.MmHardwareCoherentCached = 3
MEMORY_CACHING_TYPE.MmNonCachedUnordered = 4
MEMORY_CACHING_TYPE.MmUSWCCached = 5
MEMORY_CACHING_TYPE.MmMaximumCacheType = 6
NT_PRODUCT_TYPE = v_enum()
NT_PRODUCT_TYPE.NtProductWinNt = 1
NT_PRODUCT_TYPE.NtProductLanManNt = 2
NT_PRODUCT_TYPE.NtProductServer = 3
DEVICE_POWER_STATE = v_enum()
DEVICE_POWER_STATE.PowerDeviceUnspecified = 0
DEVICE_POWER_STATE.PowerDeviceD0 = 1
DEVICE_POWER_STATE.PowerDeviceD1 = 2
DEVICE_POWER_STATE.PowerDeviceD2 = 3
DEVICE_POWER_STATE.PowerDeviceD3 = 4
DEVICE_POWER_STATE.PowerDeviceMaximum = 5
PF_SCENARIO_TYPE = v_enum()
PF_SCENARIO_TYPE.PfApplicationLaunchScenarioType = 0
PF_SCENARIO_TYPE.PfSystemBootScenarioType = 1
PF_SCENARIO_TYPE.PfMaxScenarioType = 2
TOKEN_TYPE = v_enum()
TOKEN_TYPE.TokenPrimary = 1
TOKEN_TYPE.TokenImpersonation = 2
VI_DEADLOCK_RESOURCE_TYPE = v_enum()
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockUnknown = 0
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockMutex = 1
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockFastMutex = 2
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockFastMutexUnsafe = 3
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockSpinLock = 4
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockQueuedSpinLock = 5
VI_DEADLOCK_RESOURCE_TYPE.VfDeadlockTypeMaximum = 6
FSINFOCLASS = v_enum()
FSINFOCLASS.FileFsVolumeInformation = 1
FSINFOCLASS.FileFsLabelInformation = 2
FSINFOCLASS.FileFsSizeInformation = 3
FSINFOCLASS.FileFsDeviceInformation = 4
FSINFOCLASS.FileFsAttributeInformation = 5
FSINFOCLASS.FileFsControlInformation = 6
FSINFOCLASS.FileFsFullSizeInformation = 7
FSINFOCLASS.FileFsObjectIdInformation = 8
FSINFOCLASS.FileFsDriverPathInformation = 9
FSINFOCLASS.FileFsMaximumInformation = 10
ARBITER_ACTION = v_enum()
ARBITER_ACTION.ArbiterActionTestAllocation = 0
ARBITER_ACTION.ArbiterActionRetestAllocation = 1
ARBITER_ACTION.ArbiterActionCommitAllocation = 2
ARBITER_ACTION.ArbiterActionRollbackAllocation = 3
ARBITER_ACTION.ArbiterActionQueryAllocatedResources = 4
ARBITER_ACTION.ArbiterActionWriteReservedResources = 5
ARBITER_ACTION.ArbiterActionQueryConflict = 6
ARBITER_ACTION.ArbiterActionQueryArbitrate = 7
ARBITER_ACTION.ArbiterActionAddReserved = 8
ARBITER_ACTION.ArbiterActionBootAllocation = 9
POOL_TYPE = v_enum()
POOL_TYPE.NonPagedPool = 0
POOL_TYPE.PagedPool = 1
POOL_TYPE.NonPagedPoolMustSucceed = 2
POOL_TYPE.DontUseThisType = 3
POOL_TYPE.NonPagedPoolCacheAligned = 4
POOL_TYPE.PagedPoolCacheAligned = 5
POOL_TYPE.NonPagedPoolCacheAlignedMustS = 6
POOL_TYPE.MaxPoolType = 7
POOL_TYPE.NonPagedPoolSession = 32
POOL_TYPE.PagedPoolSession = 33
POOL_TYPE.NonPagedPoolMustSucceedSession = 34
POOL_TYPE.DontUseThisTypeSession = 35
POOL_TYPE.NonPagedPoolCacheAlignedSession = 36
POOL_TYPE.PagedPoolCacheAlignedSession = 37
POOL_TYPE.NonPagedPoolCacheAlignedMustSSession = 38
PCI_DISPATCH_STYLE = v_enum()
PCI_DISPATCH_STYLE.IRP_COMPLETE = 0
PCI_DISPATCH_STYLE.IRP_DOWNWARD = 1
PCI_DISPATCH_STYLE.IRP_UPWARD = 2
PCI_DISPATCH_STYLE.IRP_DISPATCH = 3
MODE = v_enum()
MODE.KernelMode = 0
MODE.UserMode = 1
MODE.MaximumMode = 2
FS_FILTER_SECTION_SYNC_TYPE = v_enum()
FS_FILTER_SECTION_SYNC_TYPE.SyncTypeOther = 0
FS_FILTER_SECTION_SYNC_TYPE.SyncTypeCreateSection = 1
OB_OPEN_REASON = v_enum()
OB_OPEN_REASON.ObCreateHandle = 0
OB_OPEN_REASON.ObOpenHandle = 1
OB_OPEN_REASON.ObDuplicateHandle = 2
OB_OPEN_REASON.ObInheritHandle = 3
OB_OPEN_REASON.ObMaxOpenReason = 4
CPU_VENDORS = v_enum()
CPU_VENDORS.CPU_NONE = 0
CPU_VENDORS.CPU_INTEL = 1
CPU_VENDORS.CPU_AMD = 2
CPU_VENDORS.CPU_CYRIX = 3
CPU_VENDORS.CPU_TRANSMETA = 4
CPU_VENDORS.CPU_CENTAUR = 5
CPU_VENDORS.CPU_RISE = 6
CPU_VENDORS.CPU_UNKNOWN = 7
DEVICE_TEXT_TYPE = v_enum()
DEVICE_TEXT_TYPE.DeviceTextDescription = 0
DEVICE_TEXT_TYPE.DeviceTextLocationInformation = 1
POWER_STATE_TYPE = v_enum()
POWER_STATE_TYPE.SystemPowerState = 0
POWER_STATE_TYPE.DevicePowerState = 1
BUS_DATA_TYPE = v_enum()
BUS_DATA_TYPE.ConfigurationSpaceUndefined = -1
BUS_DATA_TYPE.Cmos = 0
BUS_DATA_TYPE.EisaConfiguration = 1
BUS_DATA_TYPE.Pos = 2
BUS_DATA_TYPE.CbusConfiguration = 3
BUS_DATA_TYPE.PCIConfiguration = 4
BUS_DATA_TYPE.VMEConfiguration = 5
BUS_DATA_TYPE.NuBusConfiguration = 6
BUS_DATA_TYPE.PCMCIAConfiguration = 7
BUS_DATA_TYPE.MPIConfiguration = 8
BUS_DATA_TYPE.MPSAConfiguration = 9
BUS_DATA_TYPE.PNPISAConfiguration = 10
BUS_DATA_TYPE.SgiInternalConfiguration = 11
BUS_DATA_TYPE.MaximumBusDataType = 12
LSA_FOREST_TRUST_RECORD_TYPE = v_enum()
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustTopLevelName = 0
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustTopLevelNameEx = 1
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustDomainInfo = 2
LSA_FOREST_TRUST_RECORD_TYPE.ForestTrustRecordTypeLast = 2
FILE_INFORMATION_CLASS = v_enum()
FILE_INFORMATION_CLASS.FileDirectoryInformation = 1
FILE_INFORMATION_CLASS.FileFullDirectoryInformation = 2
FILE_INFORMATION_CLASS.FileBothDirectoryInformation = 3
FILE_INFORMATION_CLASS.FileBasicInformation = 4
FILE_INFORMATION_CLASS.FileStandardInformation = 5
FILE_INFORMATION_CLASS.FileInternalInformation = 6
FILE_INFORMATION_CLASS.FileEaInformation = 7
FILE_INFORMATION_CLASS.FileAccessInformation = 8
FILE_INFORMATION_CLASS.FileNameInformation = 9
FILE_INFORMATION_CLASS.FileRenameInformation = 10
FILE_INFORMATION_CLASS.FileLinkInformation = 11
FILE_INFORMATION_CLASS.FileNamesInformation = 12
FILE_INFORMATION_CLASS.FileDispositionInformation = 13
FILE_INFORMATION_CLASS.FilePositionInformation = 14
FILE_INFORMATION_CLASS.FileFullEaInformation = 15
FILE_INFORMATION_CLASS.FileModeInformation = 16
FILE_INFORMATION_CLASS.FileAlignmentInformation = 17
FILE_INFORMATION_CLASS.FileAllInformation = 18
FILE_INFORMATION_CLASS.FileAllocationInformation = 19
FILE_INFORMATION_CLASS.FileEndOfFileInformation = 20
FILE_INFORMATION_CLASS.FileAlternateNameInformation = 21
FILE_INFORMATION_CLASS.FileStreamInformation = 22
FILE_INFORMATION_CLASS.FilePipeInformation = 23
FILE_INFORMATION_CLASS.FilePipeLocalInformation = 24
FILE_INFORMATION_CLASS.FilePipeRemoteInformation = 25
FILE_INFORMATION_CLASS.FileMailslotQueryInformation = 26
FILE_INFORMATION_CLASS.FileMailslotSetInformation = 27
FILE_INFORMATION_CLASS.FileCompressionInformation = 28
FILE_INFORMATION_CLASS.FileObjectIdInformation = 29
FILE_INFORMATION_CLASS.FileCompletionInformation = 30
FILE_INFORMATION_CLASS.FileMoveClusterInformation = 31
FILE_INFORMATION_CLASS.FileQuotaInformation = 32
FILE_INFORMATION_CLASS.FileReparsePointInformation = 33
FILE_INFORMATION_CLASS.FileNetworkOpenInformation = 34
FILE_INFORMATION_CLASS.FileAttributeTagInformation = 35
FILE_INFORMATION_CLASS.FileTrackingInformation = 36
FILE_INFORMATION_CLASS.FileIdBothDirectoryInformation = 37
FILE_INFORMATION_CLASS.FileIdFullDirectoryInformation = 38
FILE_INFORMATION_CLASS.FileValidDataLengthInformation = 39
FILE_INFORMATION_CLASS.FileShortNameInformation = 40
FILE_INFORMATION_CLASS.FileMaximumInformation = 41
EXCEPTION_DISPOSITION = v_enum()
EXCEPTION_DISPOSITION.ExceptionContinueExecution = 0
EXCEPTION_DISPOSITION.ExceptionContinueSearch = 1
EXCEPTION_DISPOSITION.ExceptionNestedException = 2
EXCEPTION_DISPOSITION.ExceptionCollidedUnwind = 3
PNP_VETO_TYPE = v_enum()
PNP_VETO_TYPE.PNP_VetoTypeUnknown = 0
PNP_VETO_TYPE.PNP_VetoLegacyDevice = 1
PNP_VETO_TYPE.PNP_VetoPendingClose = 2
PNP_VETO_TYPE.PNP_VetoWindowsApp = 3
PNP_VETO_TYPE.PNP_VetoWindowsService = 4
PNP_VETO_TYPE.PNP_VetoOutstandingOpen = 5
PNP_VETO_TYPE.PNP_VetoDevice = 6
PNP_VETO_TYPE.PNP_VetoDriver = 7
PNP_VETO_TYPE.PNP_VetoIllegalDeviceRequest = 8
PNP_VETO_TYPE.PNP_VetoInsufficientPower = 9
PNP_VETO_TYPE.PNP_VetoNonDisableable = 10
PNP_VETO_TYPE.PNP_VetoLegacyDriver = 11
PNP_VETO_TYPE.PNP_VetoInsufficientRights = 12
PCI_SIGNATURE = v_enum()
PCI_SIGNATURE.PciPdoExtensionType = 1768116272
PCI_SIGNATURE.PciFdoExtensionType = 1768116273
PCI_SIGNATURE.PciArb_Io = 1768116274
PCI_SIGNATURE.PciArb_Memory = 1768116275
PCI_SIGNATURE.PciArb_Interrupt = 1768116276
PCI_SIGNATURE.PciArb_BusNumber = 1768116277
PCI_SIGNATURE.PciTrans_Interrupt = 1768116278
PCI_SIGNATURE.PciInterface_BusHandler = 1768116279
PCI_SIGNATURE.PciInterface_IntRouteHandler = 1768116280
PCI_SIGNATURE.PciInterface_PciCb = 1768116281
PCI_SIGNATURE.PciInterface_LegacyDeviceDetection = 1768116282
PCI_SIGNATURE.PciInterface_PmeHandler = 1768116283
PCI_SIGNATURE.PciInterface_DevicePresent = 1768116284
PCI_SIGNATURE.PciInterface_NativeIde = 1768116285
PCI_SIGNATURE.PciInterface_AgpTarget = 1768116286
SECURITY_OPERATION_CODE = v_enum()
SECURITY_OPERATION_CODE.SetSecurityDescriptor = 0
SECURITY_OPERATION_CODE.QuerySecurityDescriptor = 1
SECURITY_OPERATION_CODE.DeleteSecurityDescriptor = 2
SECURITY_OPERATION_CODE.AssignSecurityDescriptor = 3
PP_NPAGED_LOOKASIDE_NUMBER = v_enum()
PP_NPAGED_LOOKASIDE_NUMBER.LookasideSmallIrpList = 0
PP_NPAGED_LOOKASIDE_NUMBER.LookasideLargeIrpList = 1
PP_NPAGED_LOOKASIDE_NUMBER.LookasideMdlList = 2
PP_NPAGED_LOOKASIDE_NUMBER.LookasideCreateInfoList = 3
PP_NPAGED_LOOKASIDE_NUMBER.LookasideNameBufferList = 4
PP_NPAGED_LOOKASIDE_NUMBER.LookasideTwilightList = 5
PP_NPAGED_LOOKASIDE_NUMBER.LookasideCompletionList = 6
PP_NPAGED_LOOKASIDE_NUMBER.LookasideMaximumList = 7
SECURITY_IMPERSONATION_LEVEL = v_enum()
SECURITY_IMPERSONATION_LEVEL.SecurityAnonymous = 0
SECURITY_IMPERSONATION_LEVEL.SecurityIdentification = 1
SECURITY_IMPERSONATION_LEVEL.SecurityImpersonation = 2
SECURITY_IMPERSONATION_LEVEL.SecurityDelegation = 3
DEVICE_USAGE_NOTIFICATION_TYPE = v_enum()
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeUndefined = 0
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypePaging = 1
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeHibernation = 2
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeDumpFile = 3
PROXY_CLASS = v_enum()
PROXY_CLASS.ProxyFull = 0
PROXY_CLASS.ProxyService = 1
PROXY_CLASS.ProxyTree = 2
PROXY_CLASS.ProxyDirectory = 3
PLUGPLAY_EVENT_CATEGORY = v_enum()
PLUGPLAY_EVENT_CATEGORY.HardwareProfileChangeEvent = 0
PLUGPLAY_EVENT_CATEGORY.TargetDeviceChangeEvent = 1
PLUGPLAY_EVENT_CATEGORY.DeviceClassChangeEvent = 2
PLUGPLAY_EVENT_CATEGORY.CustomDeviceEvent = 3
PLUGPLAY_EVENT_CATEGORY.DeviceInstallEvent = 4
PLUGPLAY_EVENT_CATEGORY.DeviceArrivalEvent = 5
PLUGPLAY_EVENT_CATEGORY.PowerEvent = 6
PLUGPLAY_EVENT_CATEGORY.VetoEvent = 7
PLUGPLAY_EVENT_CATEGORY.BlockedDriverEvent = 8
PLUGPLAY_EVENT_CATEGORY.MaxPlugEventCategory = 9
INTERFACE_TYPE = v_enum()
INTERFACE_TYPE.InterfaceTypeUndefined = -1
INTERFACE_TYPE.Internal = 0
INTERFACE_TYPE.Isa = 1
INTERFACE_TYPE.Eisa = 2
INTERFACE_TYPE.MicroChannel = 3
INTERFACE_TYPE.TurboChannel = 4
INTERFACE_TYPE.PCIBus = 5
INTERFACE_TYPE.VMEBus = 6
INTERFACE_TYPE.NuBus = 7
INTERFACE_TYPE.PCMCIABus = 8
INTERFACE_TYPE.CBus = 9
INTERFACE_TYPE.MPIBus = 10
INTERFACE_TYPE.MPSABus = 11
INTERFACE_TYPE.ProcessorInternal = 12
INTERFACE_TYPE.InternalPowerBus = 13
INTERFACE_TYPE.PNPISABus = 14
INTERFACE_TYPE.PNPBus = 15
INTERFACE_TYPE.MaximumInterfaceType = 16
KWAIT_REASON = v_enum()
KWAIT_REASON.Executive = 0
KWAIT_REASON.FreePage = 1
KWAIT_REASON.PageIn = 2
KWAIT_REASON.PoolAllocation = 3
KWAIT_REASON.DelayExecution = 4
KWAIT_REASON.Suspended = 5
KWAIT_REASON.UserRequest = 6
KWAIT_REASON.WrExecutive = 7
KWAIT_REASON.WrFreePage = 8
KWAIT_REASON.WrPageIn = 9
KWAIT_REASON.WrPoolAllocation = 10
KWAIT_REASON.WrDelayExecution = 11
KWAIT_REASON.WrSuspended = 12
KWAIT_REASON.WrUserRequest = 13
KWAIT_REASON.WrEventPair = 14
KWAIT_REASON.WrQueue = 15
KWAIT_REASON.WrLpcReceive = 16
KWAIT_REASON.WrLpcReply = 17
KWAIT_REASON.WrVirtualMemory = 18
KWAIT_REASON.WrPageOut = 19
KWAIT_REASON.WrRendezvous = 20
KWAIT_REASON.Spare2 = 21
KWAIT_REASON.Spare3 = 22
KWAIT_REASON.Spare4 = 23
KWAIT_REASON.Spare5 = 24
KWAIT_REASON.Spare6 = 25
KWAIT_REASON.WrKernel = 26
KWAIT_REASON.MaximumWaitReason = 27
ALTERNATIVE_ARCHITECTURE_TYPE = v_enum()
ALTERNATIVE_ARCHITECTURE_TYPE.StandardDesign = 0
ALTERNATIVE_ARCHITECTURE_TYPE.NEC98x86 = 1
ALTERNATIVE_ARCHITECTURE_TYPE.EndAlternatives = 2
MMLISTS = v_enum()
MMLISTS.ZeroedPageList = 0
MMLISTS.FreePageList = 1
MMLISTS.StandbyPageList = 2
MMLISTS.ModifiedPageList = 3
MMLISTS.ModifiedNoWritePageList = 4
MMLISTS.BadPageList = 5
MMLISTS.ActiveAndValid = 6
MMLISTS.TransitionPage = 7
MEMORY_TYPE = v_enum()
MEMORY_TYPE.MemoryExceptionBlock = 0
MEMORY_TYPE.MemorySystemBlock = 1
MEMORY_TYPE.MemoryFree = 2
MEMORY_TYPE.MemoryBad = 3
MEMORY_TYPE.MemoryLoadedProgram = 4
MEMORY_TYPE.MemoryFirmwareTemporary = 5
MEMORY_TYPE.MemoryFirmwarePermanent = 6
MEMORY_TYPE.MemoryFreeContiguous = 7
MEMORY_TYPE.MemorySpecialMemory = 8
MEMORY_TYPE.MemoryMaximum = 9
PS_QUOTA_TYPE = v_enum()
PS_QUOTA_TYPE.PsNonPagedPool = 0
PS_QUOTA_TYPE.PsPagedPool = 1
PS_QUOTA_TYPE.PsPageFile = 2
PS_QUOTA_TYPE.PsQuotaTypes = 3
ReplacesCorHdrNumericDefines = v_enum()
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_ILONLY = 1
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_32BITREQUIRED = 2
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_IL_LIBRARY = 4
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_STRONGNAMESIGNED = 8
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_TRACKDEBUGDATA = 65536
ReplacesCorHdrNumericDefines.COR_VERSION_MAJOR_V2 = 2
ReplacesCorHdrNumericDefines.COR_VERSION_MAJOR = 2
ReplacesCorHdrNumericDefines.COR_VERSION_MINOR = 0
ReplacesCorHdrNumericDefines.COR_DELETED_NAME_LENGTH = 8
ReplacesCorHdrNumericDefines.COR_VTABLEGAP_NAME_LENGTH = 8
ReplacesCorHdrNumericDefines.NATIVE_TYPE_MAX_CB = 1
ReplacesCorHdrNumericDefines.COR_ILMETHOD_SECT_SMALL_MAX_DATASIZE = 255
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_METHODRVA = 1
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_EHRVA = 2
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_BASICBLOCK = 8
ReplacesCorHdrNumericDefines.COR_VTABLE_32BIT = 1
ReplacesCorHdrNumericDefines.COR_VTABLE_64BIT = 2
ReplacesCorHdrNumericDefines.COR_VTABLE_FROM_UNMANAGED = 4
ReplacesCorHdrNumericDefines.COR_VTABLE_CALL_MOST_DERIVED = 16
ReplacesCorHdrNumericDefines.IMAGE_COR_EATJ_THUNK_SIZE = 32
ReplacesCorHdrNumericDefines.MAX_CLASS_NAME = 1024
ReplacesCorHdrNumericDefines.MAX_PACKAGE_NAME = 1024
ARBITER_RESULT = v_enum()
ARBITER_RESULT.ArbiterResultUndefined = -1
ARBITER_RESULT.ArbiterResultSuccess = 0
ARBITER_RESULT.ArbiterResultExternalConflict = 1
ARBITER_RESULT.ArbiterResultNullRequest = 2
SYSTEM_POWER_STATE = v_enum()
SYSTEM_POWER_STATE.PowerSystemUnspecified = 0
SYSTEM_POWER_STATE.PowerSystemWorking = 1
SYSTEM_POWER_STATE.PowerSystemSleeping1 = 2
SYSTEM_POWER_STATE.PowerSystemSleeping2 = 3
SYSTEM_POWER_STATE.PowerSystemSleeping3 = 4
SYSTEM_POWER_STATE.PowerSystemHibernate = 5
SYSTEM_POWER_STATE.PowerSystemShutdown = 6
SYSTEM_POWER_STATE.PowerSystemMaximum = 7
MEMORY_CACHING_TYPE_ORIG = v_enum()
MEMORY_CACHING_TYPE_ORIG.MmFrameBufferCached = 2
POWER_ACTION = v_enum()
POWER_ACTION.PowerActionNone = 0
POWER_ACTION.PowerActionReserved = 1
POWER_ACTION.PowerActionSleep = 2
POWER_ACTION.PowerActionHibernate = 3
POWER_ACTION.PowerActionShutdown = 4
POWER_ACTION.PowerActionShutdownReset = 5
POWER_ACTION.PowerActionShutdownOff = 6
POWER_ACTION.PowerActionWarmEject = 7
PNP_DEVNODE_STATE = v_enum()
PNP_DEVNODE_STATE.DeviceNodeUnspecified = 768
PNP_DEVNODE_STATE.DeviceNodeUninitialized = 769
PNP_DEVNODE_STATE.DeviceNodeInitialized = 770
PNP_DEVNODE_STATE.DeviceNodeDriversAdded = 771
PNP_DEVNODE_STATE.DeviceNodeResourcesAssigned = 772
PNP_DEVNODE_STATE.DeviceNodeStartPending = 773
PNP_DEVNODE_STATE.DeviceNodeStartCompletion = 774
PNP_DEVNODE_STATE.DeviceNodeStartPostWork = 775
PNP_DEVNODE_STATE.DeviceNodeStarted = 776
PNP_DEVNODE_STATE.DeviceNodeQueryStopped = 777
PNP_DEVNODE_STATE.DeviceNodeStopped = 778
PNP_DEVNODE_STATE.DeviceNodeRestartCompletion = 779
PNP_DEVNODE_STATE.DeviceNodeEnumeratePending = 780
PNP_DEVNODE_STATE.DeviceNodeEnumerateCompletion = 781
PNP_DEVNODE_STATE.DeviceNodeAwaitingQueuedDeletion = 782
PNP_DEVNODE_STATE.DeviceNodeAwaitingQueuedRemoval = 783
PNP_DEVNODE_STATE.DeviceNodeQueryRemoved = 784
PNP_DEVNODE_STATE.DeviceNodeRemovePendingCloses = 785
PNP_DEVNODE_STATE.DeviceNodeRemoved = 786
PNP_DEVNODE_STATE.DeviceNodeDeletePendingCloses = 787
PNP_DEVNODE_STATE.DeviceNodeDeleted = 788
PROFILE_STATUS = v_enum()
PROFILE_STATUS.DOCK_NOTDOCKDEVICE = 0
PROFILE_STATUS.DOCK_QUIESCENT = 1
PROFILE_STATUS.DOCK_ARRIVING = 2
PROFILE_STATUS.DOCK_DEPARTING = 3
PROFILE_STATUS.DOCK_EJECTIRP_COMPLETED = 4
MI_PFN_CACHE_ATTRIBUTE = v_enum()
MI_PFN_CACHE_ATTRIBUTE.MiNonCached = 0
MI_PFN_CACHE_ATTRIBUTE.MiCached = 1
MI_PFN_CACHE_ATTRIBUTE.MiWriteCombined = 2
MI_PFN_CACHE_ATTRIBUTE.MiNotMapped = 3
class KEXECUTE_OPTIONS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExecuteDisable = v_uint8()
class PCI_PMC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint8()
self.Support = PM_SUPPORT()
class _unnamed_14487(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
class _unnamed_14486(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UserData = v_ptr32()
self.Owner = v_ptr32()
class _unnamed_16779(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EndingOffset = v_ptr32()
self.ResourceToRelease = v_ptr32()
class SEGMENT_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddress = v_ptr32()
self.TotalNumberOfPtes = v_uint32()
self.SizeOfSegment = LARGE_INTEGER()
self.NonExtendedPtes = v_uint32()
self.ImageCommitment = v_uint32()
self.ControlArea = v_ptr32()
self.Subsection = v_ptr32()
self.LargeControlArea = v_ptr32()
self.MmSectionFlags = v_ptr32()
self.MmSubSectionFlags = v_ptr32()
self._pad0030 = v_bytes(size=4)
class DUAL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Map = v_ptr32()
self.SmallDir = v_ptr32()
self.Guard = v_uint32()
self.FreeDisplay = vstruct.VArray([ RTL_BITMAP() for i in xrange(24) ])
self.FreeSummary = v_uint32()
self.FreeBins = LIST_ENTRY()
class SID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.SubAuthorityCount = v_uint8()
self.IdentifierAuthority = SID_IDENTIFIER_AUTHORITY()
self.SubAuthority = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class MMPTE_HARDWARE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class PCI_FUNCTION_RESOURCES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Limit = vstruct.VArray([ IO_RESOURCE_DESCRIPTOR() for i in xrange(7) ])
self.Current = vstruct.VArray([ CM_PARTIAL_RESOURCE_DESCRIPTOR() for i in xrange(7) ])
class _unnamed_13153(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EntireFrame = v_uint32()
class DBGKD_SET_SPECIAL_CALL64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpecialCall = v_uint64()
class _unnamed_13092(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Bytes = _unnamed_14544()
class KTSS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Backlink = v_uint16()
self.Reserved0 = v_uint16()
self.Esp0 = v_uint32()
self.Ss0 = v_uint16()
self.Reserved1 = v_uint16()
self.NotUsed1 = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.CR3 = v_uint32()
self.Eip = v_uint32()
self.EFlags = v_uint32()
self.Eax = v_uint32()
self.Ecx = v_uint32()
self.Edx = v_uint32()
self.Ebx = v_uint32()
self.Esp = v_uint32()
self.Ebp = v_uint32()
self.Esi = v_uint32()
self.Edi = v_uint32()
self.Es = v_uint16()
self.Reserved2 = v_uint16()
self.Cs = v_uint16()
self.Reserved3 = v_uint16()
self.Ss = v_uint16()
self.Reserved4 = v_uint16()
self.Ds = v_uint16()
self.Reserved5 = v_uint16()
self.Fs = v_uint16()
self.Reserved6 = v_uint16()
self.Gs = v_uint16()
self.Reserved7 = v_uint16()
self.LDT = v_uint16()
self.Reserved8 = v_uint16()
self.Flags = v_uint16()
self.IoMapBase = v_uint16()
self.IoMaps = vstruct.VArray([ KiIoAccessMap() for i in xrange(1) ])
self.IntDirectionMap = vstruct.VArray([ v_uint8() for i in xrange(32) ])
class CURDIR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DosPath = UNICODE_STRING()
self.Handle = v_ptr32()
class DBGKD_GET_INTERNAL_BREAKPOINT32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint32()
self.Flags = v_uint32()
self.Calls = v_uint32()
self.MaxCallsPerPeriod = v_uint32()
self.MinInstructions = v_uint32()
self.MaxInstructions = v_uint32()
self.TotalInstructions = v_uint32()
class DBGKD_MANIPULATE_STATE32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ApiNumber = v_uint32()
self.ProcessorLevel = v_uint16()
self.Processor = v_uint16()
self.ReturnStatus = v_uint32()
self.u = _unnamed_11882()
class _unnamed_11075(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self._pad0028 = v_bytes(size=32)
class PROCESSOR_POWER_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint32()
self.DynamicThrottle = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.DisableCStates = v_uint32()
self.PolicyCount = v_uint32()
self.Policy = vstruct.VArray([ PROCESSOR_POWER_POLICY_INFO() for i in xrange(3) ])
class _unnamed_11597(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Long = v_uint32()
class _unnamed_12520(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class BITMAP_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Links = LIST_ENTRY()
self.BasePage = v_uint64()
self.FirstDirtyPage = v_uint32()
self.LastDirtyPage = v_uint32()
self.DirtyPages = v_uint32()
self.Bitmap = v_ptr32()
class HARDWARE_PTE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class HANDLE_TABLE_ENTRY_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AuditMask = v_uint32()
class DBGKD_WRITE_MEMORY32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint32()
self.TransferCount = v_uint32()
self.ActualBytesWritten = v_uint32()
class _unnamed_13252(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_ptr32()
class PCI_INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_ptr32()
self.MinSize = v_uint16()
self.MinVersion = v_uint16()
self.MaxVersion = v_uint16()
self.Flags = v_uint16()
self.ReferenceCount = v_uint32()
self.Signature = v_uint32()
self.Constructor = v_ptr32()
self.Initializer = v_ptr32()
class _unnamed_16629(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceId = vstruct.VArray([ v_uint16() for i in xrange(1) ])
class MMWSLENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class _unnamed_12976(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AsynchronousParameters = _unnamed_14745()
class CM_PARTIAL_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Revision = v_uint16()
self.Count = v_uint32()
self.PartialDescriptors = vstruct.VArray([ CM_PARTIAL_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class DBGKD_RESTORE_BREAKPOINT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointHandle = v_uint32()
class DEVICE_CAPABILITIES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.DeviceD1 = v_uint32()
self.Address = v_uint32()
self.UINumber = v_uint32()
self.DeviceState = vstruct.VArray([ DEVICE_POWER_STATE() for i in xrange(7) ])
self.SystemWake = v_uint32()
self.DeviceWake = v_uint32()
self.D1Latency = v_uint32()
self.D2Latency = v_uint32()
self.D3Latency = v_uint32()
class _unnamed_12973(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MasterIrp = v_ptr32()
class _unnamed_16624(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClassGuid = GUID()
self.SymbolicLinkName = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0014 = v_bytes(size=2)
class _unnamed_16310(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.MinBusNumber = v_uint32()
self.MaxBusNumber = v_uint32()
self.Reserved = v_uint32()
class _unnamed_16315(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Priority = v_uint32()
self.Reserved1 = v_uint32()
self.Reserved2 = v_uint32()
class EXCEPTION_RECORD64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_uint64()
self.ExceptionAddress = v_uint64()
self.NumberParameters = v_uint32()
self.unusedAlignment = v_uint32()
self.ExceptionInformation = vstruct.VArray([ v_uint64() for i in xrange(15) ])
class _unnamed_16250(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ProviderId = v_uint32()
self.DataPath = v_ptr32()
self.BufferSize = v_uint32()
self.Buffer = v_ptr32()
class KPROCESS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.ProfileListHead = LIST_ENTRY()
self.DirectoryTableBase = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.LdtDescriptor = KGDTENTRY()
self.Int21Descriptor = KIDTENTRY()
self.IopmOffset = v_uint16()
self.Iopl = v_uint8()
self.Unused = v_uint8()
self.ActiveProcessors = v_uint32()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.ReadyListHead = LIST_ENTRY()
self.SwapListEntry = SINGLE_LIST_ENTRY()
self.VdmTrapcHandler = v_ptr32()
self.ThreadListHead = LIST_ENTRY()
self.ProcessLock = v_uint32()
self.Affinity = v_uint32()
self.StackCount = v_uint16()
self.BasePriority = v_uint8()
self.ThreadQuantum = v_uint8()
self.AutoAlignment = v_uint8()
self.State = v_uint8()
self.ThreadSeed = v_uint8()
self.DisableBoost = v_uint8()
self.PowerState = v_uint8()
self.DisableQuantum = v_uint8()
self.IdealNode = v_uint8()
self.Flags = KEXECUTE_OPTIONS()
class DEVICE_OBJECT_POWER_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdleCount = v_uint32()
self.ConservationIdleTime = v_uint32()
self.PerformanceIdleTime = v_uint32()
self.DeviceObject = v_ptr32()
self.IdleList = LIST_ENTRY()
self.DeviceType = v_uint8()
self._pad001c = v_bytes(size=3)
self.State = v_uint32()
self.NotifySourceList = LIST_ENTRY()
self.NotifyTargetList = LIST_ENTRY()
self.PowerChannelSummary = POWER_CHANNEL_SUMMARY()
self.Volume = LIST_ENTRY()
class MMPTE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class HEAP_TAG_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Allocs = v_uint32()
self.Frees = v_uint32()
self.Size = v_uint32()
self.TagIndex = v_uint16()
self.CreatorBackTraceIndex = v_uint16()
self.TagName = vstruct.VArray([ v_uint16() for i in xrange(24) ])
class VI_POOL_ENTRY_INUSE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_ptr32()
self.CallingAddress = v_ptr32()
self.NumberOfBytes = v_uint32()
self.Tag = v_uint32()
class HEAP_LOOKASIDE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = SLIST_HEADER()
self.Depth = v_uint16()
self.MaximumDepth = v_uint16()
self.TotalAllocates = v_uint32()
self.AllocateMisses = v_uint32()
self.TotalFrees = v_uint32()
self.FreeMisses = v_uint32()
self.LastTotalAllocates = v_uint32()
self.LastAllocateMisses = v_uint32()
self.Counters = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self._pad0030 = v_bytes(size=4)
class MMPTE_TRANSITION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class _unnamed_16247(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatedResources = v_ptr32()
self.AllocatedResourcesTranslated = v_ptr32()
class OBJECT_HANDLE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HandleAttributes = v_uint32()
self.GrantedAccess = v_uint32()
class OWNER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OwnerThread = v_uint32()
self.OwnerCount = v_uint32()
class DEVOBJ_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
self.PowerFlags = v_uint32()
self.Dope = v_ptr32()
self.ExtensionFlags = v_uint32()
self.DeviceNode = v_ptr32()
self.AttachedTo = v_ptr32()
self.StartIoCount = v_uint32()
self.StartIoKey = v_uint32()
self.StartIoFlags = v_uint32()
self.Vpb = v_ptr32()
class _unnamed_14357(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.bits = _unnamed_16509()
class ARBITER_ALLOCATION_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.CurrentMinimum = v_uint64()
self.CurrentMaximum = v_uint64()
self.Entry = v_ptr32()
self.CurrentAlternative = v_ptr32()
self.AlternativeCount = v_uint32()
self.Alternatives = v_ptr32()
self.Flags = v_uint16()
self.RangeAttributes = v_uint8()
self.RangeAvailableAttributes = v_uint8()
self.WorkSpace = v_uint32()
class DBGKD_SET_INTERNAL_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint64()
self.Flags = v_uint32()
self._pad0010 = v_bytes(size=4)
class _unnamed_16089(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.EaList = v_ptr32()
self.EaListLength = v_uint32()
self.EaIndex = v_uint32()
class MM_DRIVER_VERIFIER_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Level = v_uint32()
self.RaiseIrqls = v_uint32()
self.AcquireSpinLocks = v_uint32()
self.SynchronizeExecutions = v_uint32()
self.AllocationsAttempted = v_uint32()
self.AllocationsSucceeded = v_uint32()
self.AllocationsSucceededSpecialPool = v_uint32()
self.AllocationsWithNoTag = v_uint32()
self.TrimRequests = v_uint32()
self.Trims = v_uint32()
self.AllocationsFailed = v_uint32()
self.AllocationsFailedDeliberately = v_uint32()
self.Loads = v_uint32()
self.Unloads = v_uint32()
self.UnTrackedPool = v_uint32()
self.UserTrims = v_uint32()
self.CurrentPagedPoolAllocations = v_uint32()
self.CurrentNonPagedPoolAllocations = v_uint32()
self.PeakPagedPoolAllocations = v_uint32()
self.PeakNonPagedPoolAllocations = v_uint32()
self.PagedBytes = v_uint32()
self.NonPagedBytes = v_uint32()
self.PeakPagedBytes = v_uint32()
self.PeakNonPagedBytes = v_uint32()
self.BurstAllocationsFailedDeliberately = v_uint32()
self.SessionTrims = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(2) ])
class PI_BUS_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.NumberCSNs = v_uint32()
self.ReadDataPort = v_ptr32()
self.DataPortMapped = v_uint8()
self._pad0010 = v_bytes(size=3)
self.AddressPort = v_ptr32()
self.AddrPortMapped = v_uint8()
self._pad0018 = v_bytes(size=3)
self.CommandPort = v_ptr32()
self.CmdPortMapped = v_uint8()
self._pad0020 = v_bytes(size=3)
self.NextSlotNumber = v_uint32()
self.DeviceList = SINGLE_LIST_ENTRY()
self.CardList = SINGLE_LIST_ENTRY()
self.PhysicalBusDevice = v_ptr32()
self.FunctionalBusDevice = v_ptr32()
self.AttachedDevice = v_ptr32()
self.BusNumber = v_uint32()
self.SystemPowerState = v_uint32()
self.DevicePowerState = v_uint32()
class MAILSLOT_CREATE_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MailslotQuota = v_uint32()
self.MaximumMessageSize = v_uint32()
self.ReadTimeout = LARGE_INTEGER()
self.TimeoutSpecified = v_uint8()
self._pad0018 = v_bytes(size=7)
class FS_FILTER_CALLBACK_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFsFilterCallbackData = v_uint32()
self.Operation = v_uint8()
self.Reserved = v_uint8()
self._pad0008 = v_bytes(size=2)
self.DeviceObject = v_ptr32()
self.FileObject = v_ptr32()
self.Parameters = FS_FILTER_PARAMETERS()
class ACCESS_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OperationID = LUID()
self.SecurityEvaluated = v_uint8()
self.GenerateAudit = v_uint8()
self.GenerateOnClose = v_uint8()
self.PrivilegesAllocated = v_uint8()
self.Flags = v_uint32()
self.RemainingDesiredAccess = v_uint32()
self.PreviouslyGrantedAccess = v_uint32()
self.OriginalDesiredAccess = v_uint32()
self.SubjectSecurityContext = SECURITY_SUBJECT_CONTEXT()
self.SecurityDescriptor = v_ptr32()
self.AuxData = v_ptr32()
self.Privileges = _unnamed_14065()
self.AuditPrivileges = v_uint8()
self._pad0064 = v_bytes(size=3)
self.ObjectName = UNICODE_STRING()
self.ObjectTypeName = UNICODE_STRING()
class FILE_STANDARD_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.NumberOfLinks = v_uint32()
self.DeletePending = v_uint8()
self.Directory = v_uint8()
self._pad0018 = v_bytes(size=2)
class EX_PUSH_LOCK_CACHE_AWARE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Locks = vstruct.VArray([ v_ptr32() for i in xrange(1) ])
class POOL_BLOCK_HEAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = POOL_HEADER()
self.List = LIST_ENTRY()
class DBGKD_SET_SPECIAL_CALL32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpecialCall = v_uint32()
class SYSTEM_POWER_LEVEL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Enable = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.BatteryLevel = v_uint32()
self.PowerPolicy = POWER_ACTION_POLICY()
self.MinSystemState = v_uint32()
class DBGKD_LOAD_SYMBOLS32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PathNameLength = v_uint32()
self.BaseOfDll = v_uint32()
self.ProcessId = v_uint32()
self.CheckSum = v_uint32()
self.SizeOfImage = v_uint32()
self.UnloadSymbols = v_uint8()
self._pad0018 = v_bytes(size=3)
class DBGKM_EXCEPTION32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = EXCEPTION_RECORD32()
self.FirstChance = v_uint32()
class PAGEFAULT_HISTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentIndex = v_uint32()
self.MaxIndex = v_uint32()
self.SpinLock = v_uint32()
self.Reserved = v_ptr32()
self.WatchInfo = vstruct.VArray([ PROCESS_WS_WATCH_INFORMATION() for i in xrange(1) ])
class _unnamed_16107(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FsInformationClass = v_uint32()
class WNODE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BufferSize = v_uint32()
self.ProviderId = v_uint32()
self.HistoricalContext = v_uint64()
self.CountLost = v_uint32()
self._pad0018 = v_bytes(size=4)
self.Guid = GUID()
self.ClientContext = v_uint32()
self.Flags = v_uint32()
class PROCESS_WS_WATCH_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FaultingPc = v_ptr32()
self.FaultingVa = v_ptr32()
class SECTION_OBJECT_POINTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSectionObject = v_ptr32()
self.SharedCacheMap = v_ptr32()
self.ImageSectionObject = v_ptr32()
class MDL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Size = v_uint16()
self.MdlFlags = v_uint16()
self.Process = v_ptr32()
self.MappedSystemVa = v_ptr32()
self.StartVa = v_ptr32()
self.ByteCount = v_uint32()
self.ByteOffset = v_uint32()
class KTRAP_FRAME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DbgEbp = v_uint32()
self.DbgEip = v_uint32()
self.DbgArgMark = v_uint32()
self.DbgArgPointer = v_uint32()
self.TempSegCs = v_uint32()
self.TempEsp = v_uint32()
self.Dr0 = v_uint32()
self.Dr1 = v_uint32()
self.Dr2 = v_uint32()
self.Dr3 = v_uint32()
self.Dr6 = v_uint32()
self.Dr7 = v_uint32()
self.SegGs = v_uint32()
self.SegEs = v_uint32()
self.SegDs = v_uint32()
self.Edx = v_uint32()
self.Ecx = v_uint32()
self.Eax = v_uint32()
self.PreviousPreviousMode = v_uint32()
self.ExceptionList = v_ptr32()
self.SegFs = v_uint32()
self.Edi = v_uint32()
self.Esi = v_uint32()
self.Ebx = v_uint32()
self.Ebp = v_uint32()
self.ErrCode = v_uint32()
self.Eip = v_uint32()
self.SegCs = v_uint32()
self.EFlags = v_uint32()
self.HardwareEsp = v_uint32()
self.HardwareSegSs = v_uint32()
self.V86Es = v_uint32()
self.V86Ds = v_uint32()
self.V86Fs = v_uint32()
self.V86Gs = v_uint32()
class CM_INDEX_HINT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.HashKey = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class SEP_AUDIT_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PolicyElements = SEP_AUDIT_POLICY_CATEGORIES()
class MMPTE_SOFTWARE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class IO_TIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.TimerFlag = v_uint16()
self.TimerList = LIST_ENTRY()
self.TimerRoutine = v_ptr32()
self.Context = v_ptr32()
self.DeviceObject = v_ptr32()
class Wx86ThreadState(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CallBx86Eip = v_ptr32()
self.DeallocationCpu = v_ptr32()
self.UseKnownWx86Dll = v_uint8()
self.OleStubInvoked = v_uint8()
self._pad000c = v_bytes(size=2)
class _unnamed_12112(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FreeListsInUseTerminate = v_uint16()
class _unnamed_12111(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FreeListsInUseUlong = vstruct.VArray([ v_uint32() for i in xrange(4) ])
class _unnamed_16218(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceTextType = v_uint32()
self.LocaleId = v_uint32()
class MM_SESSION_SPACE_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Initialized = v_uint32()
class _unnamed_14629(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.type0 = PCI_HEADER_TYPE_0()
class EVENT_COUNTER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = SINGLE_LIST_ENTRY()
self.RefCount = v_uint32()
self.Event = KEVENT()
class SECURITY_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.Sbz1 = v_uint8()
self.Control = v_uint16()
self.Owner = v_ptr32()
self.Group = v_ptr32()
self.Sacl = v_ptr32()
self.Dacl = v_ptr32()
class SECURITY_TOKEN_AUDIT_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.GrantMask = v_uint32()
self.DenyMask = v_uint32()
class EX_WORK_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WorkerQueue = KQUEUE()
self.DynamicThreadCount = v_uint32()
self.WorkItemsProcessed = v_uint32()
self.WorkItemsProcessedLastPass = v_uint32()
self.QueueDepthLastPass = v_uint32()
self.Info = EX_QUEUE_WORKER_INFO()
class OBJECT_TYPE_INITIALIZER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.UseDefaultObject = v_uint8()
self.CaseInsensitive = v_uint8()
self.InvalidAttributes = v_uint32()
self.GenericMapping = GENERIC_MAPPING()
self.ValidAccessMask = v_uint32()
self.SecurityRequired = v_uint8()
self.MaintainHandleCount = v_uint8()
self.MaintainTypeList = v_uint8()
self._pad0020 = v_bytes(size=1)
self.PoolType = v_uint32()
self.DefaultPagedPoolCharge = v_uint32()
self.DefaultNonPagedPoolCharge = v_uint32()
self.DumpProcedure = v_ptr32()
self.OpenProcedure = v_ptr32()
self.CloseProcedure = v_ptr32()
self.DeleteProcedure = v_ptr32()
self.ParseProcedure = v_ptr32()
self.SecurityProcedure = v_ptr32()
self.QueryNameProcedure = v_ptr32()
self.OkayToCloseProcedure = v_ptr32()
class VACB_LEVEL_REFERENCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Reference = v_uint32()
self.SpecialReference = v_uint32()
class _unnamed_16627(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceIds = vstruct.VArray([ v_uint16() for i in xrange(1) ])
class HEAP_ENTRY_EXTRA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatorBackTraceIndex = v_uint16()
self.TagIndex = v_uint16()
self.Settable = v_uint32()
class POP_DEVICE_SYS_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IrpMinor = v_uint8()
self._pad0004 = v_bytes(size=3)
self.SystemState = v_uint32()
self.Event = KEVENT()
self.SpinLock = v_uint32()
self.Thread = v_ptr32()
self.GetNewDeviceList = v_uint8()
self._pad0024 = v_bytes(size=3)
self.Order = PO_DEVICE_NOTIFY_ORDER()
self.Status = v_uint32()
self.FailedDevice = v_ptr32()
self.Waking = v_uint8()
self.Cancelled = v_uint8()
self.IgnoreErrors = v_uint8()
self.IgnoreNotImplemented = v_uint8()
self.WaitAny = v_uint8()
self.WaitAll = v_uint8()
self._pad027c = v_bytes(size=2)
self.PresentIrpQueue = LIST_ENTRY()
self.Head = POP_DEVICE_POWER_IRP()
self.PowerIrpState = vstruct.VArray([ POP_DEVICE_POWER_IRP() for i in xrange(20) ])
class VI_DEADLOCK_RESOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
self.NodeCount = v_uint32()
self.ResourceAddress = v_ptr32()
self.ThreadOwner = v_ptr32()
self.ResourceList = LIST_ENTRY()
self.HashChainList = LIST_ENTRY()
self.StackTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
self.LastAcquireTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
self.LastReleaseTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
class HEAP_PSEUDO_TAG_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Allocs = v_uint32()
self.Frees = v_uint32()
self.Size = v_uint32()
class _unnamed_13834(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Generic = _unnamed_14637()
class CM_KEY_REFERENCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.KeyCell = v_uint32()
self.KeyHive = v_ptr32()
class MMSECTION_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BeingDeleted = v_uint32()
class IA64_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Continue = v_uint32()
self.CurrentSymbolStart = v_uint64()
self.CurrentSymbolEnd = v_uint64()
class DBGKD_GET_INTERNAL_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint64()
self.Flags = v_uint32()
self.Calls = v_uint32()
self.MaxCallsPerPeriod = v_uint32()
self.MinInstructions = v_uint32()
self.MaxInstructions = v_uint32()
self.TotalInstructions = v_uint32()
class PROCESSOR_POWER_POLICY_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TimeCheck = v_uint32()
self.DemoteLimit = v_uint32()
self.PromoteLimit = v_uint32()
self.DemotePercent = v_uint8()
self.PromotePercent = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(2) ])
self.AllowDemotion = v_uint32()
class _unnamed_16213(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdType = v_uint32()
class POP_POWER_ACTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Updates = v_uint8()
self.State = v_uint8()
self.Shutdown = v_uint8()
self._pad0004 = v_bytes(size=1)
self.Action = v_uint32()
self.LightestState = v_uint32()
self.Flags = v_uint32()
self.Status = v_uint32()
self.IrpMinor = v_uint8()
self._pad0018 = v_bytes(size=3)
self.SystemState = v_uint32()
self.NextSystemState = v_uint32()
self.ShutdownBugCode = v_ptr32()
self.DevState = v_ptr32()
self.HiberContext = v_ptr32()
self.LastWakeState = v_uint32()
self.WakeTime = v_uint64()
self.SleepTime = v_uint64()
class OBJECT_CREATE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Attributes = v_uint32()
self.RootDirectory = v_ptr32()
self.ParseContext = v_ptr32()
self.ProbeMode = v_uint8()
self._pad0010 = v_bytes(size=3)
self.PagedPoolCharge = v_uint32()
self.NonPagedPoolCharge = v_uint32()
self.SecurityDescriptorCharge = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.SecurityQos = v_ptr32()
self.SecurityQualityOfService = SECURITY_QUALITY_OF_SERVICE()
class OBJECT_HEADER_CREATOR_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TypeList = LIST_ENTRY()
self.CreatorUniqueProcess = v_ptr32()
self.CreatorBackTraceIndex = v_uint16()
self.Reserved = v_uint16()
class PAGED_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.L = GENERAL_LOOKASIDE()
self.Lock__ObsoleteButDoNotDelete = FAST_MUTEX()
self._pad0100 = v_bytes(size=96)
class HEAP_STOP_ON_TAG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HeapAndTagIndex = v_uint32()
class PO_NOTIFY_ORDER_LEVEL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LevelReady = KEVENT()
self.DeviceCount = v_uint32()
self.ActiveCount = v_uint32()
self.WaitSleep = LIST_ENTRY()
self.ReadySleep = LIST_ENTRY()
self.Pending = LIST_ENTRY()
self.Complete = LIST_ENTRY()
self.ReadyS0 = LIST_ENTRY()
self.WaitS0 = LIST_ENTRY()
class RTL_BITMAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfBitMap = v_uint32()
self.Buffer = v_ptr32()
class LARGE_INTEGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class _unnamed_12162(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CriticalSection = RTL_CRITICAL_SECTION()
self._pad0038 = v_bytes(size=32)
class NPAGED_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.L = GENERAL_LOOKASIDE()
self.Lock__ObsoleteButDoNotDelete = v_uint32()
self._pad0100 = v_bytes(size=124)
class _unnamed_11794(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadMemory = DBGKD_READ_MEMORY64()
self._pad0028 = v_bytes(size=24)
class KLOCK_QUEUE_HANDLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LockQueue = KSPIN_LOCK_QUEUE()
self.OldIrql = v_uint8()
self._pad000c = v_bytes(size=3)
class VPB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.Flags = v_uint16()
self.VolumeLabelLength = v_uint16()
self.DeviceObject = v_ptr32()
self.RealDevice = v_ptr32()
self.SerialNumber = v_uint32()
self.ReferenceCount = v_uint32()
self.VolumeLabel = vstruct.VArray([ v_uint16() for i in xrange(32) ])
class SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlArea = v_ptr32()
self.TotalNumberOfPtes = v_uint32()
self.NonExtendedPtes = v_uint32()
self.WritableUserReferences = v_uint32()
self.SizeOfSegment = v_uint64()
self.SegmentPteTemplate = MMPTE()
self.NumberOfCommittedPages = v_uint32()
self.ExtendInfo = v_ptr32()
self.SystemImageBase = v_ptr32()
self.BasedAddress = v_ptr32()
self.u1 = _unnamed_12605()
self.u2 = _unnamed_12606()
self.PrototypePte = v_ptr32()
self.ThePtes = vstruct.VArray([ MMPTE() for i in xrange(1) ])
self._pad0040 = v_bytes(size=4)
class _unnamed_15247(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TestAllocation = _unnamed_16554()
self._pad0010 = v_bytes(size=4)
class PP_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.P = v_ptr32()
self.L = v_ptr32()
class OBJECT_NAME_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = UNICODE_STRING()
class IO_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Revision = v_uint16()
self.Count = v_uint32()
self.Descriptors = vstruct.VArray([ IO_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class _unnamed_16445(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PageNo = v_uint32()
self.StartPage = v_uint32()
self.EndPage = v_uint32()
self.CheckSum = v_uint32()
class _unnamed_16446(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.NextTable = v_uint32()
self.CheckSum = v_uint32()
self.EntryCount = v_uint32()
class PRIVATE_CACHE_MAP_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DontUse = v_uint32()
class FS_FILTER_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AcquireForModifiedPageWriter = _unnamed_16779()
self._pad0014 = v_bytes(size=12)
class HEAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = HEAP_ENTRY()
self.Signature = v_uint32()
self.Flags = v_uint32()
self.ForceFlags = v_uint32()
self.VirtualMemoryThreshold = v_uint32()
self.SegmentReserve = v_uint32()
self.SegmentCommit = v_uint32()
self.DeCommitFreeBlockThreshold = v_uint32()
self.DeCommitTotalFreeThreshold = v_uint32()
self.TotalFreeSize = v_uint32()
self.MaximumAllocationSize = v_uint32()
self.ProcessHeapsListIndex = v_uint16()
self.HeaderValidateLength = v_uint16()
self.HeaderValidateCopy = v_ptr32()
self.NextAvailableTagIndex = v_uint16()
self.MaximumTagIndex = v_uint16()
self.TagEntries = v_ptr32()
self.UCRSegments = v_ptr32()
self.UnusedUnCommittedRanges = v_ptr32()
self.AlignRound = v_uint32()
self.AlignMask = v_uint32()
self.VirtualAllocdBlocks = LIST_ENTRY()
self.Segments = vstruct.VArray([ v_ptr32() for i in xrange(64) ])
self.u = _unnamed_12111()
self.u2 = _unnamed_12112()
self.AllocatorBackTraceIndex = v_uint16()
self.NonDedicatedListLength = v_uint32()
self.LargeBlocksIndex = v_ptr32()
self.PseudoTagEntries = v_ptr32()
self.FreeLists = vstruct.VArray([ LIST_ENTRY() for i in xrange(128) ])
self.LockVariable = v_ptr32()
self.CommitRoutine = v_ptr32()
self.FrontEndHeap = v_ptr32()
self.FrontHeapLockCount = v_uint16()
self.FrontEndHeapType = v_uint8()
self.LastSegmentIndex = v_uint8()
class HANDLE_TRACE_DEBUG_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentStackIndex = v_uint32()
self.TraceDb = vstruct.VArray([ HANDLE_TRACE_DB_ENTRY() for i in xrange(4096) ])
class PRIVILEGE_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrivilegeCount = v_uint32()
self.Control = v_uint32()
self.Privilege = vstruct.VArray([ LUID_AND_ATTRIBUTES() for i in xrange(1) ])
class CM_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.List = vstruct.VArray([ CM_FULL_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class EPROCESS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Pcb = KPROCESS()
self.ProcessLock = EX_PUSH_LOCK()
self.CreateTime = LARGE_INTEGER()
self.ExitTime = LARGE_INTEGER()
self.RundownProtect = EX_RUNDOWN_REF()
self.UniqueProcessId = v_ptr32()
self.ActiveProcessLinks = LIST_ENTRY()
self.QuotaUsage = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.QuotaPeak = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.CommitCharge = v_uint32()
self.PeakVirtualSize = v_uint32()
self.VirtualSize = v_uint32()
self.SessionProcessLinks = LIST_ENTRY()
self.DebugPort = v_ptr32()
self.ExceptionPort = v_ptr32()
self.ObjectTable = v_ptr32()
self.Token = EX_FAST_REF()
self.WorkingSetLock = FAST_MUTEX()
self.WorkingSetPage = v_uint32()
self.AddressCreationLock = FAST_MUTEX()
self.HyperSpaceLock = v_uint32()
self.ForkInProgress = v_ptr32()
self.HardwareTrigger = v_uint32()
self.VadRoot = v_ptr32()
self.VadHint = v_ptr32()
self.CloneRoot = v_ptr32()
self.NumberOfPrivatePages = v_uint32()
self.NumberOfLockedPages = v_uint32()
self.Win32Process = v_ptr32()
self.Job = v_ptr32()
self.SectionObject = v_ptr32()
self.SectionBaseAddress = v_ptr32()
self.QuotaBlock = v_ptr32()
self.WorkingSetWatch = v_ptr32()
self.Win32WindowStation = v_ptr32()
self.InheritedFromUniqueProcessId = v_ptr32()
self.LdtInformation = v_ptr32()
self.VadFreeHint = v_ptr32()
self.VdmObjects = v_ptr32()
self.DeviceMap = v_ptr32()
self.PhysicalVadList = LIST_ENTRY()
self.PageDirectoryPte = HARDWARE_PTE()
self._pad0170 = v_bytes(size=4)
self.Session = v_ptr32()
self.ImageFileName = vstruct.VArray([ v_uint8() for i in xrange(16) ])
self.JobLinks = LIST_ENTRY()
self.LockedPagesList = v_ptr32()
self.ThreadListHead = LIST_ENTRY()
self.SecurityPort = v_ptr32()
self.PaeTop = v_ptr32()
self.ActiveThreads = v_uint32()
self.GrantedAccess = v_uint32()
self.DefaultHardErrorProcessing = v_uint32()
self.LastThreadExitStatus = v_uint32()
self.Peb = v_ptr32()
self.PrefetchTrace = EX_FAST_REF()
self.ReadOperationCount = LARGE_INTEGER()
self.WriteOperationCount = LARGE_INTEGER()
self.OtherOperationCount = LARGE_INTEGER()
self.ReadTransferCount = LARGE_INTEGER()
self.WriteTransferCount = LARGE_INTEGER()
self.OtherTransferCount = LARGE_INTEGER()
self.CommitChargeLimit = v_uint32()
self.CommitChargePeak = v_uint32()
self.AweInfo = v_ptr32()
self.SeAuditProcessCreationInfo = SE_AUDIT_PROCESS_CREATION_INFO()
self.Vm = MMSUPPORT()
self.LastFaultCount = v_uint32()
self.ModifiedPageCount = v_uint32()
self.NumberOfVads = v_uint32()
self.JobStatus = v_uint32()
self.Flags = v_uint32()
self.ExitStatus = v_uint32()
self.NextPageColor = v_uint16()
self.SubSystemMinorVersion = v_uint8()
self.SubSystemMajorVersion = v_uint8()
self.PriorityClass = v_uint8()
self.WorkingSetAcquiredUnsafe = v_uint8()
self._pad0258 = v_bytes(size=2)
self.Cookie = v_uint32()
self._pad0260 = v_bytes(size=4)
class PHYSICAL_MEMORY_RUN(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BasePage = v_uint32()
self.PageCount = v_uint32()
class CM_KEY_BODY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
self.KeyControlBlock = v_ptr32()
self.NotifyBlock = v_ptr32()
self.ProcessID = v_ptr32()
self.Callers = v_uint32()
self.CallerAddress = vstruct.VArray([ v_ptr32() for i in xrange(10) ])
self.KeyBodyList = LIST_ENTRY()
class KMUTANT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.MutantListEntry = LIST_ENTRY()
self.OwnerThread = v_ptr32()
self.Abandoned = v_uint8()
self.ApcDisable = v_uint8()
self._pad0020 = v_bytes(size=2)
class FX_SAVE_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.U = _unnamed_10880()
self.NpxSavedCpu = v_uint32()
self.Cr0NpxState = v_uint32()
class POWER_SEQUENCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SequenceD1 = v_uint32()
self.SequenceD2 = v_uint32()
self.SequenceD3 = v_uint32()
class KTIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.DueTime = ULARGE_INTEGER()
self.TimerListEntry = LIST_ENTRY()
self.Dpc = v_ptr32()
self.Period = v_uint32()
class MM_PAGED_POOL_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PagedPoolAllocationMap = v_ptr32()
self.EndOfPagedPoolBitmap = v_ptr32()
self.PagedPoolLargeSessionAllocationMap = v_ptr32()
self.FirstPteForPagedPool = v_ptr32()
self.LastPteForPagedPool = v_ptr32()
self.NextPdeForPagedPoolExpansion = v_ptr32()
self.PagedPoolHint = v_uint32()
self.PagedPoolCommit = v_uint32()
self.AllocatedPagedPool = v_uint32()
class HIVE_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = v_ptr32()
self.BaseName = v_ptr32()
self.CmHive = v_ptr32()
self.Flags = v_uint32()
self.CmHive2 = v_ptr32()
self.ThreadFinished = v_uint8()
self.ThreadStarted = v_uint8()
self.Allocate = v_uint8()
self._pad0018 = v_bytes(size=1)
class CM_PARTIAL_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint8()
self.ShareDisposition = v_uint8()
self.Flags = v_uint16()
self.u = _unnamed_13834()
class RTLP_RANGE_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.Allocated = _unnamed_14486()
self.Attributes = v_uint8()
self.PublicFlags = v_uint8()
self.PrivateFlags = v_uint16()
self.ListEntry = LIST_ENTRY()
self._pad0028 = v_bytes(size=4)
class _unnamed_14765(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceQueueEntry = KDEVICE_QUEUE_ENTRY()
self.Thread = v_ptr32()
self.AuxiliaryBuffer = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.CurrentStackLocation = v_ptr32()
self.OriginalFileObject = v_ptr32()
class _unnamed_14762(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Create = _unnamed_15988()
class _unnamed_13383(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CellData = CELL_DATA()
class MMVAD_LONG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVpn = v_uint32()
self.EndingVpn = v_uint32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.u = _unnamed_14102()
self.ControlArea = v_ptr32()
self.FirstPrototypePte = v_ptr32()
self.LastContiguousPte = v_ptr32()
self.u2 = _unnamed_14103()
self.u3 = _unnamed_14104()
self.u4 = _unnamed_14105()
class CM_VIEW_OF_FILE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LRUViewList = LIST_ENTRY()
self.PinViewList = LIST_ENTRY()
self.FileOffset = v_uint32()
self.Size = v_uint32()
self.ViewAddress = v_ptr32()
self.Bcb = v_ptr32()
self.UseCount = v_uint32()
class _unnamed_16143(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.StartSid = v_ptr32()
self.SidList = v_ptr32()
self.SidListLength = v_uint32()
class CM_FULL_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.PartialResourceList = CM_PARTIAL_RESOURCE_LIST()
class DBGKD_WRITE_MEMORY64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint64()
self.TransferCount = v_uint32()
self.ActualBytesWritten = v_uint32()
class DBGKD_GET_VERSION64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.ProtocolVersion = v_uint16()
self.Flags = v_uint16()
self.MachineType = v_uint16()
self.MaxPacketType = v_uint8()
self.MaxStateChange = v_uint8()
self.MaxManipulate = v_uint8()
self.Simulation = v_uint8()
self.Unused = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self.KernBase = v_uint64()
self.PsLoadedModuleList = v_uint64()
self.DebuggerDataList = v_uint64()
class _unnamed_16069(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FileName = v_ptr32()
self.FileInformationClass = v_uint32()
self.FileIndex = v_uint32()
class FAST_IO_DISPATCH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFastIoDispatch = v_uint32()
self.FastIoCheckIfPossible = v_ptr32()
self.FastIoRead = v_ptr32()
self.FastIoWrite = v_ptr32()
self.FastIoQueryBasicInfo = v_ptr32()
self.FastIoQueryStandardInfo = v_ptr32()
self.FastIoLock = v_ptr32()
self.FastIoUnlockSingle = v_ptr32()
self.FastIoUnlockAll = v_ptr32()
self.FastIoUnlockAllByKey = v_ptr32()
self.FastIoDeviceControl = v_ptr32()
self.AcquireFileForNtCreateSection = v_ptr32()
self.ReleaseFileForNtCreateSection = v_ptr32()
self.FastIoDetachDevice = v_ptr32()
self.FastIoQueryNetworkOpenInfo = v_ptr32()
self.AcquireForModWrite = v_ptr32()
self.MdlRead = v_ptr32()
self.MdlReadComplete = v_ptr32()
self.PrepareMdlWrite = v_ptr32()
self.MdlWriteComplete = v_ptr32()
self.FastIoReadCompressed = v_ptr32()
self.FastIoWriteCompressed = v_ptr32()
self.MdlReadCompleteCompressed = v_ptr32()
self.MdlWriteCompleteCompressed = v_ptr32()
self.FastIoQueryOpen = v_ptr32()
self.ReleaseForModWrite = v_ptr32()
self.AcquireForCcFlush = v_ptr32()
self.ReleaseForCcFlush = v_ptr32()
class CM_KEY_CONTROL_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.RefCount = v_uint32()
self.ExtFlags = v_uint32()
self.KeyHash = CM_KEY_HASH()
self.ParentKcb = v_ptr32()
self.NameBlock = v_ptr32()
self.CachedSecurity = v_ptr32()
self.ValueCache = CACHED_CHILD_LIST()
self.IndexHint = v_ptr32()
self.KeyBodyListHead = LIST_ENTRY()
self.KcbLastWriteTime = LARGE_INTEGER()
self.KcbMaxNameLen = v_uint16()
self.KcbMaxValueNameLen = v_uint16()
self.KcbMaxValueDataLen = v_uint32()
self.KcbUserFlags = v_uint32()
self._pad0050 = v_bytes(size=4)
class MMVAD_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CommitCharge = v_uint32()
class MMWSL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Quota = v_uint32()
self.FirstFree = v_uint32()
self.FirstDynamic = v_uint32()
self.LastEntry = v_uint32()
self.NextSlot = v_uint32()
self.Wsle = v_ptr32()
self.LastInitializedWsle = v_uint32()
self.NonDirectCount = v_uint32()
self.HashTable = v_ptr32()
self.HashTableSize = v_uint32()
self.NumberOfCommittedPageTables = v_uint32()
self.HashTableStart = v_ptr32()
self.HighestPermittedHashAddress = v_ptr32()
self.NumberOfImageWaiters = v_uint32()
self.VadBitMapHint = v_uint32()
self.UsedPageTableEntries = vstruct.VArray([ v_uint16() for i in xrange(768) ])
self.CommittedPageTables = vstruct.VArray([ v_uint32() for i in xrange(24) ])
class DBGKD_CONTINUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContinueStatus = v_uint32()
class _unnamed_14102(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class _unnamed_14103(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags2 = v_uint32()
class SUPPORTED_RANGES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Sorted = v_uint8()
self.Reserved = v_uint8()
self.NoIO = v_uint32()
self.IO = SUPPORTED_RANGE()
self.NoMemory = v_uint32()
self._pad0030 = v_bytes(size=4)
self.Memory = SUPPORTED_RANGE()
self.NoPrefetchMemory = v_uint32()
self._pad0058 = v_bytes(size=4)
self.PrefetchMemory = SUPPORTED_RANGE()
self.NoDma = v_uint32()
self._pad0080 = v_bytes(size=4)
self.Dma = SUPPORTED_RANGE()
class WORK_QUEUE_ITEM(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = LIST_ENTRY()
self.WorkerRoutine = v_ptr32()
self.Parameter = v_ptr32()
class _unnamed_14104(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = LIST_ENTRY()
class _unnamed_14105(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Banked = v_ptr32()
class EPROCESS_QUOTA_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Usage = v_uint32()
self.Limit = v_uint32()
self.Peak = v_uint32()
self.Return = v_uint32()
class KSPECIAL_REGISTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Cr0 = v_uint32()
self.Cr2 = v_uint32()
self.Cr3 = v_uint32()
self.Cr4 = v_uint32()
self.KernelDr0 = v_uint32()
self.KernelDr1 = v_uint32()
self.KernelDr2 = v_uint32()
self.KernelDr3 = v_uint32()
self.KernelDr6 = v_uint32()
self.KernelDr7 = v_uint32()
self.Gdtr = DESCRIPTOR()
self.Idtr = DESCRIPTOR()
self.Tr = v_uint16()
self.Ldtr = v_uint16()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(6) ])
class KINTERRUPT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.InterruptListEntry = LIST_ENTRY()
self.ServiceRoutine = v_ptr32()
self.ServiceContext = v_ptr32()
self.SpinLock = v_uint32()
self.TickCount = v_uint32()
self.ActualLock = v_ptr32()
self.DispatchAddress = v_ptr32()
self.Vector = v_uint32()
self.Irql = v_uint8()
self.SynchronizeIrql = v_uint8()
self.FloatingSave = v_uint8()
self.Connected = v_uint8()
self.Number = v_uint8()
self.ShareVector = v_uint8()
self._pad0030 = v_bytes(size=2)
self.Mode = v_uint32()
self.ServiceCount = v_uint32()
self.DispatchCount = v_uint32()
self.DispatchCode = vstruct.VArray([ v_uint32() for i in xrange(106) ])
class RTL_CRITICAL_SECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DebugInfo = v_ptr32()
self.LockCount = v_uint32()
self.RecursionCount = v_uint32()
self.OwningThread = v_ptr32()
self.LockSemaphore = v_ptr32()
self.SpinCount = v_uint32()
class _unnamed_16782(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Argument1 = v_ptr32()
self.Argument2 = v_ptr32()
self.Argument3 = v_ptr32()
self.Argument4 = v_ptr32()
self.Argument5 = v_ptr32()
class _unnamed_16780(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ResourceToRelease = v_ptr32()
class _unnamed_16781(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SyncType = v_uint32()
self.PageProtection = v_uint32()
class KSYSTEM_TIME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.High1Time = v_uint32()
self.High2Time = v_uint32()
class PO_DEVICE_NOTIFY_ORDER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DevNodeSequence = v_uint32()
self.WarmEjectPdoPointer = v_ptr32()
self.OrderLevel = vstruct.VArray([ PO_NOTIFY_ORDER_LEVEL() for i in xrange(8) ])
class _unnamed_11882(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadMemory = DBGKD_READ_MEMORY32()
self._pad0028 = v_bytes(size=28)
class FLOATING_SAVE_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlWord = v_uint32()
self.StatusWord = v_uint32()
self.TagWord = v_uint32()
self.ErrorOffset = v_uint32()
self.ErrorSelector = v_uint32()
self.DataOffset = v_uint32()
self.DataSelector = v_uint32()
self.RegisterArea = vstruct.VArray([ v_uint8() for i in xrange(80) ])
self.Cr0NpxState = v_uint32()
class WMI_LOGGER_MODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SequentialFile = v_uint32()
class KQUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.EntryListHead = LIST_ENTRY()
self.CurrentCount = v_uint32()
self.MaximumCount = v_uint32()
self.ThreadListHead = LIST_ENTRY()
class POOL_TRACKER_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Key = v_uint32()
self.NonPagedAllocs = v_uint32()
self.NonPagedFrees = v_uint32()
self.NonPagedBytes = v_uint32()
self.PagedAllocs = v_uint32()
self.PagedFrees = v_uint32()
self.PagedBytes = v_uint32()
class _unnamed_16666(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DiskId = GUID()
class WMI_BUFFER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Free = v_uint32()
class LUID_AND_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Luid = LUID()
self.Attributes = v_uint32()
class _unnamed_15560(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Base = v_uint32()
self.Limit = v_uint32()
class MMMOD_WRITER_MDL_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Links = LIST_ENTRY()
self.WriteOffset = LARGE_INTEGER()
self.u = _unnamed_15130()
self.Irp = v_ptr32()
self.LastPageToWrite = v_uint32()
self.PagingListHead = v_ptr32()
self.CurrentList = v_ptr32()
self.PagingFile = v_ptr32()
self.File = v_ptr32()
self.ControlArea = v_ptr32()
self.FileResource = v_ptr32()
self.Mdl = MDL()
self.Page = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class CACHED_CHILD_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.ValueList = v_uint32()
class KTHREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.MutantListHead = LIST_ENTRY()
self.InitialStack = v_ptr32()
self.StackLimit = v_ptr32()
self.Teb = v_ptr32()
self.TlsArray = v_ptr32()
self.KernelStack = v_ptr32()
self.DebugActive = v_uint8()
self.State = v_uint8()
self.Alerted = vstruct.VArray([ v_uint8() for i in xrange(2) ])
self.Iopl = v_uint8()
self.NpxState = v_uint8()
self.Saturation = v_uint8()
self.Priority = v_uint8()
self.ApcState = KAPC_STATE()
self.ContextSwitches = v_uint32()
self.IdleSwapBlock = v_uint8()
self.Spare0 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.WaitStatus = v_uint32()
self.WaitIrql = v_uint8()
self.WaitMode = v_uint8()
self.WaitNext = v_uint8()
self.WaitReason = v_uint8()
self.WaitBlockList = v_ptr32()
self.WaitListEntry = LIST_ENTRY()
self.WaitTime = v_uint32()
self.BasePriority = v_uint8()
self.DecrementCount = v_uint8()
self.PriorityDecrement = v_uint8()
self.Quantum = v_uint8()
self.WaitBlock = vstruct.VArray([ KWAIT_BLOCK() for i in xrange(4) ])
self.LegoData = v_ptr32()
self.KernelApcDisable = v_uint32()
self.UserAffinity = v_uint32()
self.SystemAffinityActive = v_uint8()
self.PowerState = v_uint8()
self.NpxIrql = v_uint8()
self.InitialNode = v_uint8()
self.ServiceTable = v_ptr32()
self.Queue = v_ptr32()
self.ApcQueueLock = v_uint32()
self._pad00f0 = v_bytes(size=4)
self.Timer = KTIMER()
self.QueueListEntry = LIST_ENTRY()
self.SoftAffinity = v_uint32()
self.Affinity = v_uint32()
self.Preempted = v_uint8()
self.ProcessReadyQueue = v_uint8()
self.KernelStackResident = v_uint8()
self.NextProcessor = v_uint8()
self.CallbackStack = v_ptr32()
self.Win32Thread = v_ptr32()
self.TrapFrame = v_ptr32()
self.ApcStatePointer = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self.PreviousMode = v_uint8()
self.EnableStackSwap = v_uint8()
self.LargeStack = v_uint8()
self.ResourceIndex = v_uint8()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.SavedApcState = KAPC_STATE()
self.Alertable = v_uint8()
self.ApcStateIndex = v_uint8()
self.ApcQueueable = v_uint8()
self.AutoAlignment = v_uint8()
self.StackBase = v_ptr32()
self.SuspendApc = KAPC()
self.SuspendSemaphore = KSEMAPHORE()
self.ThreadListEntry = LIST_ENTRY()
self.FreezeCount = v_uint8()
self.SuspendCount = v_uint8()
self.IdealProcessor = v_uint8()
self.DisableBoost = v_uint8()
self._pad01c0 = v_bytes(size=4)
class _unnamed_12531(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class ADAPTER_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class _unnamed_10508(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContextFlags = v_uint32()
self.Dr0 = v_uint32()
self.Dr1 = v_uint32()
self.Dr2 = v_uint32()
self.Dr3 = v_uint32()
self.Dr6 = v_uint32()
self.Dr7 = v_uint32()
self.FloatSave = FLOATING_SAVE_AREA()
self.SegGs = v_uint32()
self.SegFs = v_uint32()
self.SegEs = v_uint32()
self.SegDs = v_uint32()
self.Edi = v_uint32()
self.Esi = v_uint32()
self.Ebx = v_uint32()
self.Edx = v_uint32()
self.Ecx = v_uint32()
self.Eax = v_uint32()
self.Ebp = v_uint32()
self.Eip = v_uint32()
self.SegCs = v_uint32()
self.EFlags = v_uint32()
self.Esp = v_uint32()
self.SegSs = v_uint32()
self.ExtendedRegisters = vstruct.VArray([ v_uint8() for i in xrange(512) ])
class DBGKD_GET_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Unused = v_uint32()
class GENERIC_MAPPING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.GenericRead = v_uint32()
self.GenericWrite = v_uint32()
self.GenericExecute = v_uint32()
self.GenericAll = v_uint32()
class DEVICE_NODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Sibling = v_ptr32()
self.Child = v_ptr32()
self.Parent = v_ptr32()
self.LastChild = v_ptr32()
self.Level = v_uint32()
self.Notify = v_ptr32()
self.State = v_uint32()
self.PreviousState = v_uint32()
self.StateHistory = vstruct.VArray([ PNP_DEVNODE_STATE() for i in xrange(20) ])
self.StateHistoryEntry = v_uint32()
self.CompletionStatus = v_uint32()
self.PendingIrp = v_ptr32()
self.Flags = v_uint32()
self.UserFlags = v_uint32()
self.Problem = v_uint32()
self.PhysicalDeviceObject = v_ptr32()
self.ResourceList = v_ptr32()
self.ResourceListTranslated = v_ptr32()
self.InstancePath = UNICODE_STRING()
self.ServiceName = UNICODE_STRING()
self.DuplicatePDO = v_ptr32()
self.ResourceRequirements = v_ptr32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.ChildInterfaceType = v_uint32()
self.ChildBusNumber = v_uint32()
self.ChildBusTypeIndex = v_uint16()
self.RemovalPolicy = v_uint8()
self.HardwareRemovalPolicy = v_uint8()
self.TargetDeviceNotify = LIST_ENTRY()
self.DeviceArbiterList = LIST_ENTRY()
self.DeviceTranslatorList = LIST_ENTRY()
self.NoTranslatorMask = v_uint16()
self.QueryTranslatorMask = v_uint16()
self.NoArbiterMask = v_uint16()
self.QueryArbiterMask = v_uint16()
self.OverUsed1 = _unnamed_12916()
self.OverUsed2 = _unnamed_12917()
self.BootResources = v_ptr32()
self.CapabilityFlags = v_uint32()
self.DockInfo = _unnamed_12918()
self.DisableableDepends = v_uint32()
self.PendedSetInterfaceState = LIST_ENTRY()
self.LegacyBusListEntry = LIST_ENTRY()
self.DriverUnloadRetryCount = v_uint32()
class RTL_ATOM_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.CriticalSection = RTL_CRITICAL_SECTION()
self.RtlHandleTable = RTL_HANDLE_TABLE()
self.NumberOfBuckets = v_uint32()
self.Buckets = vstruct.VArray([ v_ptr32() for i in xrange(1) ])
class _unnamed_15130(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoStatus = IO_STATUS_BLOCK()
class KUSER_SHARED_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TickCountLow = v_uint32()
self.TickCountMultiplier = v_uint32()
self.InterruptTime = KSYSTEM_TIME()
self.SystemTime = KSYSTEM_TIME()
self.TimeZoneBias = KSYSTEM_TIME()
self.ImageNumberLow = v_uint16()
self.ImageNumberHigh = v_uint16()
self.NtSystemRoot = vstruct.VArray([ v_uint16() for i in xrange(260) ])
self.MaxStackTraceDepth = v_uint32()
self.CryptoExponent = v_uint32()
self.TimeZoneId = v_uint32()
self.Reserved2 = vstruct.VArray([ v_uint32() for i in xrange(8) ])
self.NtProductType = v_uint32()
self.ProductTypeIsValid = v_uint8()
self._pad026c = v_bytes(size=3)
self.NtMajorVersion = v_uint32()
self.NtMinorVersion = v_uint32()
self.ProcessorFeatures = vstruct.VArray([ v_uint8() for i in xrange(64) ])
self.Reserved1 = v_uint32()
self.Reserved3 = v_uint32()
self.TimeSlip = v_uint32()
self.AlternativeArchitecture = v_uint32()
self._pad02c8 = v_bytes(size=4)
self.SystemExpirationDate = LARGE_INTEGER()
self.SuiteMask = v_uint32()
self.KdDebuggerEnabled = v_uint8()
self.NXSupportPolicy = v_uint8()
self._pad02d8 = v_bytes(size=2)
self.ActiveConsoleId = v_uint32()
self.DismountCount = v_uint32()
self.ComPlusPackage = v_uint32()
self.LastSystemRITEventTickCount = v_uint32()
self.NumberOfPhysicalPages = v_uint32()
self.SafeBootMode = v_uint8()
self._pad02f0 = v_bytes(size=3)
self.TraceLogging = v_uint32()
self._pad02f8 = v_bytes(size=4)
self.TestRetInstruction = v_uint64()
self.SystemCall = v_uint32()
self.SystemCallReturn = v_uint32()
self.SystemCallPad = vstruct.VArray([ v_uint64() for i in xrange(3) ])
self.TickCount = KSYSTEM_TIME()
self._pad0330 = v_bytes(size=4)
self.Cookie = v_uint32()
self._pad0338 = v_bytes(size=4)
class IMAGE_ROM_OPTIONAL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_uint16()
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.BaseOfData = v_uint32()
self.BaseOfBss = v_uint32()
self.GprMask = v_uint32()
self.CprMask = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.GpValue = v_uint32()
class _unnamed_16242(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemContext = v_uint32()
self.Type = v_uint32()
self.State = POWER_STATE()
self.ShutdownType = v_uint32()
class HEAP_FREE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.PreviousSize = v_uint16()
self.SmallTagIndex = v_uint8()
self.Flags = v_uint8()
self.UnusedBytes = v_uint8()
self.SegmentIndex = v_uint8()
self.FreeList = LIST_ENTRY()
class LDR_DATA_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InLoadOrderLinks = LIST_ENTRY()
self.InMemoryOrderLinks = LIST_ENTRY()
self.InInitializationOrderLinks = LIST_ENTRY()
self.DllBase = v_ptr32()
self.EntryPoint = v_ptr32()
self.SizeOfImage = v_uint32()
self.FullDllName = UNICODE_STRING()
self.BaseDllName = UNICODE_STRING()
self.Flags = v_uint32()
self.LoadCount = v_uint16()
self.TlsIndex = v_uint16()
self.HashLinks = LIST_ENTRY()
self.TimeDateStamp = v_uint32()
self.EntryPointActivationContext = v_ptr32()
self.PatchInformation = v_ptr32()
class MMADDRESS_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartVpn = v_uint32()
self.EndVpn = v_uint32()
class _unnamed_15988(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityContext = v_ptr32()
self.Options = v_uint32()
self.FileAttributes = v_uint16()
self.ShareAccess = v_uint16()
self.EaLength = v_uint32()
class DBGKD_READ_MEMORY64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint64()
self.TransferCount = v_uint32()
self.ActualBytesRead = v_uint32()
class PO_MEMORY_IMAGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.Version = v_uint32()
self.CheckSum = v_uint32()
self.LengthSelf = v_uint32()
self.PageSelf = v_uint32()
self.PageSize = v_uint32()
self.ImageType = v_uint32()
self._pad0020 = v_bytes(size=4)
self.SystemTime = LARGE_INTEGER()
self.InterruptTime = v_uint64()
self.FeatureFlags = v_uint32()
self.HiberFlags = v_uint8()
self.spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.NoHiberPtes = v_uint32()
self.HiberVa = v_uint32()
self.HiberPte = LARGE_INTEGER()
self.NoFreePages = v_uint32()
self.FreeMapCheck = v_uint32()
self.WakeCheck = v_uint32()
self.TotalPages = v_uint32()
self.FirstTablePage = v_uint32()
self.LastFilePage = v_uint32()
self.PerfInfo = PO_HIBER_PERF()
class HEAP_UCR_SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.ReservedSize = v_uint32()
self.CommittedSize = v_uint32()
self.filler = v_uint32()
class HHIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.GetCellRoutine = v_ptr32()
self.ReleaseCellRoutine = v_ptr32()
self.Allocate = v_ptr32()
self.Free = v_ptr32()
self.FileSetSize = v_ptr32()
self.FileWrite = v_ptr32()
self.FileRead = v_ptr32()
self.FileFlush = v_ptr32()
self.BaseBlock = v_ptr32()
self.DirtyVector = RTL_BITMAP()
self.DirtyCount = v_uint32()
self.DirtyAlloc = v_uint32()
self.RealWrites = v_uint8()
self._pad003c = v_bytes(size=3)
self.Cluster = v_uint32()
self.Flat = v_uint8()
self.ReadOnly = v_uint8()
self.Log = v_uint8()
self._pad0044 = v_bytes(size=1)
self.HiveFlags = v_uint32()
self.LogSize = v_uint32()
self.RefreshCount = v_uint32()
self.StorageTypeCount = v_uint32()
self.Version = v_uint32()
self.Storage = vstruct.VArray([ DUAL() for i in xrange(2) ])
class TEB_ACTIVE_FRAME_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.FrameName = v_ptr32()
class TEB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NtTib = NT_TIB()
self.EnvironmentPointer = v_ptr32()
self.ClientId = CLIENT_ID()
self.ActiveRpcHandle = v_ptr32()
self.ThreadLocalStoragePointer = v_ptr32()
self.ProcessEnvironmentBlock = v_ptr32()
self.LastErrorValue = v_uint32()
self.CountOfOwnedCriticalSections = v_uint32()
self.CsrClientThread = v_ptr32()
self.Win32ThreadInfo = v_ptr32()
self.User32Reserved = vstruct.VArray([ v_uint32() for i in xrange(26) ])
self.UserReserved = vstruct.VArray([ v_uint32() for i in xrange(5) ])
self.WOW32Reserved = v_ptr32()
self.CurrentLocale = v_uint32()
self.FpSoftwareStatusRegister = v_uint32()
self.SystemReserved1 = vstruct.VArray([ v_ptr32() for i in xrange(54) ])
self.ExceptionCode = v_uint32()
self.ActivationContextStack = ACTIVATION_CONTEXT_STACK()
self.SpareBytes1 = vstruct.VArray([ v_uint8() for i in xrange(24) ])
self.GdiTebBatch = GDI_TEB_BATCH()
self.RealClientId = CLIENT_ID()
self.GdiCachedProcessHandle = v_ptr32()
self.GdiClientPID = v_uint32()
self.GdiClientTID = v_uint32()
self.GdiThreadLocalInfo = v_ptr32()
self.Win32ClientInfo = vstruct.VArray([ v_uint32() for i in xrange(62) ])
self.glDispatchTable = vstruct.VArray([ v_ptr32() for i in xrange(233) ])
self.glReserved1 = vstruct.VArray([ v_uint32() for i in xrange(29) ])
self.glReserved2 = v_ptr32()
self.glSectionInfo = v_ptr32()
self.glSection = v_ptr32()
self.glTable = v_ptr32()
self.glCurrentRC = v_ptr32()
self.glContext = v_ptr32()
self.LastStatusValue = v_uint32()
self.StaticUnicodeString = UNICODE_STRING()
self.StaticUnicodeBuffer = vstruct.VArray([ v_uint16() for i in xrange(261) ])
self._pad0e0c = v_bytes(size=2)
self.DeallocationStack = v_ptr32()
self.TlsSlots = vstruct.VArray([ v_ptr32() for i in xrange(64) ])
self.TlsLinks = LIST_ENTRY()
self.Vdm = v_ptr32()
self.ReservedForNtRpc = v_ptr32()
self.DbgSsReserved = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self.HardErrorsAreDisabled = v_uint32()
self.Instrumentation = vstruct.VArray([ v_ptr32() for i in xrange(16) ])
self.WinSockData = v_ptr32()
self.GdiBatchCount = v_uint32()
self.InDbgPrint = v_uint8()
self.FreeStackOnTermination = v_uint8()
self.HasFiberData = v_uint8()
self.IdealProcessor = v_uint8()
self.Spare3 = v_uint32()
self.ReservedForPerf = v_ptr32()
self.ReservedForOle = v_ptr32()
self.WaitingOnLoaderLock = v_uint32()
self.Wx86Thread = Wx86ThreadState()
self.TlsExpansionSlots = v_ptr32()
self.ImpersonationLocale = v_uint32()
self.IsImpersonating = v_uint32()
self.NlsCache = v_ptr32()
self.pShimData = v_ptr32()
self.HeapVirtualAffinity = v_uint32()
self.CurrentTransactionHandle = v_ptr32()
self.ActiveFrame = v_ptr32()
self.SafeThunkCall = v_uint8()
self.BooleanSpare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
class DRIVER_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
self.Flags = v_uint32()
self.DriverStart = v_ptr32()
self.DriverSize = v_uint32()
self.DriverSection = v_ptr32()
self.DriverExtension = v_ptr32()
self.DriverName = UNICODE_STRING()
self.HardwareDatabase = v_ptr32()
self.FastIoDispatch = v_ptr32()
self.DriverInit = v_ptr32()
self.DriverStartIo = v_ptr32()
self.DriverUnload = v_ptr32()
self.MajorFunction = vstruct.VArray([ v_ptr32() for i in xrange(28) ])
class OBJECT_SYMBOLIC_LINK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LinkTarget = UNICODE_STRING()
self.LinkTargetRemaining = UNICODE_STRING()
self.LinkTargetObject = v_ptr32()
self.DosDeviceDriveIndex = v_uint32()
class EJOB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Event = KEVENT()
self.JobLinks = LIST_ENTRY()
self.ProcessListHead = LIST_ENTRY()
self.JobLock = ERESOURCE()
self.TotalUserTime = LARGE_INTEGER()
self.TotalKernelTime = LARGE_INTEGER()
self.ThisPeriodTotalUserTime = LARGE_INTEGER()
self.ThisPeriodTotalKernelTime = LARGE_INTEGER()
self.TotalPageFaultCount = v_uint32()
self.TotalProcesses = v_uint32()
self.ActiveProcesses = v_uint32()
self.TotalTerminatedProcesses = v_uint32()
self.PerProcessUserTimeLimit = LARGE_INTEGER()
self.PerJobUserTimeLimit = LARGE_INTEGER()
self.LimitFlags = v_uint32()
self.MinimumWorkingSetSize = v_uint32()
self.MaximumWorkingSetSize = v_uint32()
self.ActiveProcessLimit = v_uint32()
self.Affinity = v_uint32()
self.PriorityClass = v_uint8()
self._pad00b0 = v_bytes(size=3)
self.UIRestrictionsClass = v_uint32()
self.SecurityLimitFlags = v_uint32()
self.Token = v_ptr32()
self.Filter = v_ptr32()
self.EndOfJobTimeAction = v_uint32()
self.CompletionPort = v_ptr32()
self.CompletionKey = v_ptr32()
self.SessionId = v_uint32()
self.SchedulingClass = v_uint32()
self._pad00d8 = v_bytes(size=4)
self.ReadOperationCount = v_uint64()
self.WriteOperationCount = v_uint64()
self.OtherOperationCount = v_uint64()
self.ReadTransferCount = v_uint64()
self.WriteTransferCount = v_uint64()
self.OtherTransferCount = v_uint64()
self.IoInfo = IO_COUNTERS()
self.ProcessMemoryLimit = v_uint32()
self.JobMemoryLimit = v_uint32()
self.PeakProcessMemoryUsed = v_uint32()
self.PeakJobMemoryUsed = v_uint32()
self.CurrentJobMemoryUsed = v_uint32()
self.MemoryLimitsLock = FAST_MUTEX()
self.JobSetLinks = LIST_ENTRY()
self.MemberLevel = v_uint32()
self.JobFlags = v_uint32()
self._pad0180 = v_bytes(size=4)
class _unnamed_16023(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Key = v_uint32()
self.ByteOffset = LARGE_INTEGER()
class DBGKD_READ_WRITE_IO_EXTENDED64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.AddressSpace = v_uint32()
self.IoAddress = v_uint64()
self.DataValue = v_uint32()
self._pad0020 = v_bytes(size=4)
class IO_STATUS_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Status = v_uint32()
self.Information = v_uint32()
class KPROCESSOR_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContextFrame = CONTEXT()
self.SpecialRegisters = KSPECIAL_REGISTERS()
class KiIoAccessMap(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DirectionMap = vstruct.VArray([ v_uint8() for i in xrange(32) ])
self.IoMap = vstruct.VArray([ v_uint8() for i in xrange(8196) ])
class KAPC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.Spare0 = v_uint32()
self.Thread = v_ptr32()
self.ApcListEntry = LIST_ENTRY()
self.KernelRoutine = v_ptr32()
self.RundownRoutine = v_ptr32()
self.NormalRoutine = v_ptr32()
self.NormalContext = v_ptr32()
self.SystemArgument1 = v_ptr32()
self.SystemArgument2 = v_ptr32()
self.ApcStateIndex = v_uint8()
self.ApcMode = v_uint8()
self.Inserted = v_uint8()
self._pad0030 = v_bytes(size=1)
class POOL_TRACKER_BIG_PAGES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Va = v_ptr32()
self.Key = v_uint32()
self.NumberOfPages = v_uint32()
class SID_IDENTIFIER_AUTHORITY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Value = vstruct.VArray([ v_uint8() for i in xrange(6) ])
class RTL_RANGE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
self.Flags = v_uint32()
self.Count = v_uint32()
self.Stamp = v_uint32()
class LARGE_CONTROL_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Segment = v_ptr32()
self.DereferenceList = LIST_ENTRY()
self.NumberOfSectionReferences = v_uint32()
self.NumberOfPfnReferences = v_uint32()
self.NumberOfMappedViews = v_uint32()
self.NumberOfSubsections = v_uint16()
self.FlushInProgressCount = v_uint16()
self.NumberOfUserReferences = v_uint32()
self.u = _unnamed_12520()
self.FilePointer = v_ptr32()
self.WaitingForDeletion = v_ptr32()
self.ModifiedWriteCount = v_uint16()
self.NumberOfSystemCacheViews = v_uint16()
self.StartingFrame = v_uint32()
self.UserGlobalList = LIST_ENTRY()
self.SessionId = v_uint32()
class VI_POOL_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InUse = VI_POOL_ENTRY_INUSE()
class POOL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PreviousSize = v_uint16()
self.BlockSize = v_uint16()
self.ProcessBilled = v_ptr32()
class SHARED_CACHE_MAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeByteSize = v_uint16()
self.OpenCount = v_uint32()
self.FileSize = LARGE_INTEGER()
self.BcbList = LIST_ENTRY()
self.SectionSize = LARGE_INTEGER()
self.ValidDataLength = LARGE_INTEGER()
self.ValidDataGoal = LARGE_INTEGER()
self.InitialVacbs = vstruct.VArray([ v_ptr32() for i in xrange(4) ])
self.Vacbs = v_ptr32()
self.FileObject = v_ptr32()
self.ActiveVacb = v_ptr32()
self.NeedToZero = v_ptr32()
self.ActivePage = v_uint32()
self.NeedToZeroPage = v_uint32()
self.ActiveVacbSpinLock = v_uint32()
self.VacbActiveCount = v_uint32()
self.DirtyPages = v_uint32()
self.SharedCacheMapLinks = LIST_ENTRY()
self.Flags = v_uint32()
self.Status = v_uint32()
self.Mbcb = v_ptr32()
self.Section = v_ptr32()
self.CreateEvent = v_ptr32()
self.WaitOnActiveCount = v_ptr32()
self.PagesToWrite = v_uint32()
self.BeyondLastFlush = v_uint64()
self.Callbacks = v_ptr32()
self.LazyWriteContext = v_ptr32()
self.PrivateList = LIST_ENTRY()
self.LogHandle = v_ptr32()
self.FlushToLsnRoutine = v_ptr32()
self.DirtyPageThreshold = v_uint32()
self.LazyWritePassCount = v_uint32()
self.UninitializeEvent = v_ptr32()
self.NeedToZeroVacb = v_ptr32()
self.BcbSpinLock = v_uint32()
self.Reserved = v_ptr32()
self.Event = KEVENT()
self.VacbPushLock = EX_PUSH_LOCK()
self._pad00d8 = v_bytes(size=4)
self.PrivateCacheMap = PRIVATE_CACHE_MAP()
class TRACE_ENABLE_FLAG_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint16()
self.Length = v_uint8()
self.Flag = v_uint8()
class MI_VERIFIER_POOL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListIndex = v_uint32()
self.Verifier = v_ptr32()
class MMBANKED_SECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BasePhysicalPage = v_uint32()
self.BasedPte = v_ptr32()
self.BankSize = v_uint32()
self.BankShift = v_uint32()
self.BankedRoutine = v_ptr32()
self.Context = v_ptr32()
self.CurrentMappedPte = v_ptr32()
self.BankTemplate = vstruct.VArray([ MMPTE() for i in xrange(1) ])
class PCI_POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentSystemState = v_uint32()
self.CurrentDeviceState = v_uint32()
self.SystemWakeLevel = v_uint32()
self.DeviceWakeLevel = v_uint32()
self.SystemStateMapping = vstruct.VArray([ DEVICE_POWER_STATE() for i in xrange(7) ])
self.WaitWakeIrp = v_ptr32()
self.SavedCancelRoutine = v_ptr32()
self.Paging = v_uint32()
self.Hibernate = v_uint32()
self.CrashDump = v_uint32()
class RTL_CRITICAL_SECTION_DEBUG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.CreatorBackTraceIndex = v_uint16()
self.CriticalSection = v_ptr32()
self.ProcessLocksList = LIST_ENTRY()
self.EntryCount = v_uint32()
self.ContentionCount = v_uint32()
self.Spare = vstruct.VArray([ v_uint32() for i in xrange(2) ])
class PNP_DEVICE_EVENT_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.Argument = v_uint32()
self.CallerEvent = v_ptr32()
self.Callback = v_ptr32()
self.Context = v_ptr32()
self.VetoType = v_ptr32()
self.VetoName = v_ptr32()
self.Data = PLUGPLAY_EVENT_BLOCK()
class ARBITER_CONFLICT_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OwningObject = v_ptr32()
self._pad0008 = v_bytes(size=4)
self.Start = v_uint64()
self.End = v_uint64()
class SID_AND_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Sid = v_ptr32()
self.Attributes = v_uint32()
class VI_DEADLOCK_GLOBALS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Nodes = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.Resources = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.Threads = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.TimeAcquire = v_uint64()
self.TimeRelease = v_uint64()
self.BytesAllocated = v_uint32()
self.ResourceDatabase = v_ptr32()
self.ThreadDatabase = v_ptr32()
self.AllocationFailures = v_uint32()
self.NodesTrimmedBasedOnAge = v_uint32()
self.NodesTrimmedBasedOnCount = v_uint32()
self.NodesSearched = v_uint32()
self.MaxNodesSearched = v_uint32()
self.SequenceNumber = v_uint32()
self.RecursionDepthLimit = v_uint32()
self.SearchedNodesLimit = v_uint32()
self.DepthLimitHits = v_uint32()
self.SearchLimitHits = v_uint32()
self.ABC_ACB_Skipped = v_uint32()
self.FreeResourceList = LIST_ENTRY()
self.FreeThreadList = LIST_ENTRY()
self.FreeNodeList = LIST_ENTRY()
self.FreeResourceCount = v_uint32()
self.FreeThreadCount = v_uint32()
self.FreeNodeCount = v_uint32()
self.Instigator = v_ptr32()
self.NumberOfParticipants = v_uint32()
self.Participant = vstruct.VArray([ v_ptr32() for i in xrange(32) ])
self.CacheReductionInProgress = v_uint32()
class TOKEN(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TokenSource = TOKEN_SOURCE()
self.TokenId = LUID()
self.AuthenticationId = LUID()
self.ParentTokenId = LUID()
self.ExpirationTime = LARGE_INTEGER()
self.TokenLock = v_ptr32()
self._pad0038 = v_bytes(size=4)
self.AuditPolicy = SEP_AUDIT_POLICY()
self.ModifiedId = LUID()
self.SessionId = v_uint32()
self.UserAndGroupCount = v_uint32()
self.RestrictedSidCount = v_uint32()
self.PrivilegeCount = v_uint32()
self.VariableLength = v_uint32()
self.DynamicCharged = v_uint32()
self.DynamicAvailable = v_uint32()
self.DefaultOwnerIndex = v_uint32()
self.UserAndGroups = v_ptr32()
self.RestrictedSids = v_ptr32()
self.PrimaryGroup = v_ptr32()
self.Privileges = v_ptr32()
self.DynamicPart = v_ptr32()
self.DefaultDacl = v_ptr32()
self.TokenType = v_uint32()
self.ImpersonationLevel = v_uint32()
self.TokenFlags = v_uint32()
self.TokenInUse = v_uint8()
self._pad0090 = v_bytes(size=3)
self.ProxyData = v_ptr32()
self.AuditData = v_ptr32()
self.OriginatingLogonSession = LUID()
self.VariablePart = v_uint32()
self._pad00a8 = v_bytes(size=4)
class MMCOLOR_TABLES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint32()
self.Blink = v_ptr32()
self.Count = v_uint32()
class DISPATCHER_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint8()
self.Absolute = v_uint8()
self.Size = v_uint8()
self.Inserted = v_uint8()
self.SignalState = v_uint32()
self.WaitListHead = LIST_ENTRY()
class _unnamed_16509(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceNumber = v_uint32()
class _unnamed_16110(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OutputBufferLength = v_uint32()
self.InputBufferLength = v_uint32()
self.FsControlCode = v_uint32()
self.Type3InputBuffer = v_ptr32()
class _unnamed_16505(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Mbr = _unnamed_16663()
self._pad0010 = v_bytes(size=8)
class DBGKD_READ_WRITE_IO64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoAddress = v_uint64()
self.DataSize = v_uint32()
self.DataValue = v_uint32()
class PROCESSOR_POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdleFunction = v_ptr32()
self.Idle0KernelTimeLimit = v_uint32()
self.Idle0LastTime = v_uint32()
self.IdleHandlers = v_ptr32()
self.IdleState = v_ptr32()
self.IdleHandlersCount = v_uint32()
self.LastCheck = v_uint64()
self.IdleTimes = PROCESSOR_IDLE_TIMES()
self.IdleTime1 = v_uint32()
self.PromotionCheck = v_uint32()
self.IdleTime2 = v_uint32()
self.CurrentThrottle = v_uint8()
self.ThermalThrottleLimit = v_uint8()
self.CurrentThrottleIndex = v_uint8()
self.ThermalThrottleIndex = v_uint8()
self.LastKernelUserTime = v_uint32()
self.LastIdleThreadKernelTime = v_uint32()
self.PackageIdleStartTime = v_uint32()
self.PackageIdleTime = v_uint32()
self.DebugCount = v_uint32()
self.LastSysTime = v_uint32()
self.TotalIdleStateTime = vstruct.VArray([ v_uint64() for i in xrange(3) ])
self.TotalIdleTransitions = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self._pad0090 = v_bytes(size=4)
self.PreviousC3StateTime = v_uint64()
self.KneeThrottleIndex = v_uint8()
self.ThrottleLimitIndex = v_uint8()
self.PerfStatesCount = v_uint8()
self.ProcessorMinThrottle = v_uint8()
self.ProcessorMaxThrottle = v_uint8()
self.EnableIdleAccounting = v_uint8()
self.LastC3Percentage = v_uint8()
self.LastAdjustedBusyPercentage = v_uint8()
self.PromotionCount = v_uint32()
self.DemotionCount = v_uint32()
self.ErrorCount = v_uint32()
self.RetryCount = v_uint32()
self.Flags = v_uint32()
self._pad00b8 = v_bytes(size=4)
self.PerfCounterFrequency = LARGE_INTEGER()
self.PerfTickCount = v_uint32()
self._pad00c8 = v_bytes(size=4)
self.PerfTimer = KTIMER()
self.PerfDpc = KDPC()
self.PerfStates = v_ptr32()
self.PerfSetThrottle = v_ptr32()
self.LastC3KernelUserTime = v_uint32()
self.LastPackageIdleTime = v_uint32()
class SECURITY_CLIENT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityQos = SECURITY_QUALITY_OF_SERVICE()
self.ClientToken = v_ptr32()
self.DirectlyAccessClientToken = v_uint8()
self.DirectAccessEffectiveOnly = v_uint8()
self.ServerIsRemote = v_uint8()
self._pad0014 = v_bytes(size=1)
self.ClientTokenControl = TOKEN_CONTROL()
class DBGKD_SEARCH_MEMORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SearchAddress = v_uint64()
self.SearchLength = v_uint64()
self.PatternLength = v_uint32()
self._pad0018 = v_bytes(size=4)
class DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Pad = v_uint16()
self.Limit = v_uint16()
self.Base = v_uint32()
class DBGKD_MANIPULATE_STATE64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ApiNumber = v_uint32()
self.ProcessorLevel = v_uint16()
self.Processor = v_uint16()
self.ReturnStatus = v_uint32()
self._pad0010 = v_bytes(size=4)
self.u = _unnamed_11794()
class LPCP_PORT_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NonPagedPortQueue = v_ptr32()
self.Semaphore = v_ptr32()
self.ReceiveHead = LIST_ENTRY()
class DBGKD_LOAD_SYMBOLS64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PathNameLength = v_uint32()
self._pad0008 = v_bytes(size=4)
self.BaseOfDll = v_uint64()
self.ProcessId = v_uint64()
self.CheckSum = v_uint32()
self.SizeOfImage = v_uint32()
self.UnloadSymbols = v_uint8()
self._pad0028 = v_bytes(size=7)
class CACHE_UNINITIALIZE_EVENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Event = KEVENT()
class SECURITY_QUALITY_OF_SERVICE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.ImpersonationLevel = v_uint32()
self.ContextTrackingMode = v_uint8()
self.EffectiveOnly = v_uint8()
self._pad000c = v_bytes(size=2)
class COMPRESSED_DATA_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CompressionFormatAndEngine = v_uint16()
self.CompressionUnitShift = v_uint8()
self.ChunkShift = v_uint8()
self.ClusterShift = v_uint8()
self.Reserved = v_uint8()
self.NumberOfChunks = v_uint16()
self.CompressedChunkSizes = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class _unnamed_14650(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint32()
self.Length = v_uint32()
self.Reserved = v_uint32()
class RTL_HANDLE_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MaximumNumberOfHandles = v_uint32()
self.SizeOfHandleTableEntry = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.FreeHandles = v_ptr32()
self.CommittedHandles = v_ptr32()
self.UnCommittedHandles = v_ptr32()
self.MaxReservedHandles = v_ptr32()
class _unnamed_14654(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.Reserved1 = v_uint32()
self.Reserved2 = v_uint32()
class CMHIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Hive = HHIVE()
self.FileHandles = vstruct.VArray([ v_ptr32() for i in xrange(3) ])
self.NotifyList = LIST_ENTRY()
self.HiveList = LIST_ENTRY()
self.HiveLock = v_ptr32()
self.ViewLock = v_ptr32()
self.LRUViewListHead = LIST_ENTRY()
self.PinViewListHead = LIST_ENTRY()
self.FileObject = v_ptr32()
self.FileFullPath = UNICODE_STRING()
self.FileUserName = UNICODE_STRING()
self.MappedViews = v_uint16()
self.PinnedViews = v_uint16()
self.UseCount = v_uint32()
self.SecurityCount = v_uint32()
self.SecurityCacheSize = v_uint32()
self.SecurityHitHint = v_uint32()
self.SecurityCache = v_ptr32()
self.SecurityHash = vstruct.VArray([ LIST_ENTRY() for i in xrange(64) ])
self.UnloadEvent = v_ptr32()
self.RootKcb = v_ptr32()
self.Frozen = v_uint8()
self._pad047c = v_bytes(size=3)
self.UnloadWorkItem = v_ptr32()
self.GrowOnlyMode = v_uint8()
self._pad0484 = v_bytes(size=3)
self.GrowOffset = v_uint32()
self.KcbConvertListHead = LIST_ENTRY()
self.KnodeConvertListHead = LIST_ENTRY()
self.CellRemapArray = v_ptr32()
class POP_SHUTDOWN_BUG_CHECK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Code = v_uint32()
self.Parameter1 = v_uint32()
self.Parameter2 = v_uint32()
self.Parameter3 = v_uint32()
self.Parameter4 = v_uint32()
class SECTION_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVa = v_ptr32()
self.EndingVa = v_ptr32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.Segment = v_ptr32()
class LUID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class OBJECT_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PointerCount = v_uint32()
self.HandleCount = v_uint32()
self.Type = v_ptr32()
self.NameInfoOffset = v_uint8()
self.HandleInfoOffset = v_uint8()
self.QuotaInfoOffset = v_uint8()
self.Flags = v_uint8()
self.ObjectCreateInfo = v_ptr32()
self.SecurityDescriptor = v_ptr32()
self.Body = QUAD()
class PCI_MN_DISPATCH_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DispatchStyle = v_uint32()
self.DispatchFunction = v_ptr32()
class PCI_HEADER_TYPE_2(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SocketRegistersBaseAddress = v_uint32()
self.CapabilitiesPtr = v_uint8()
self.Reserved = v_uint8()
self.SecondaryStatus = v_uint16()
self.PrimaryBus = v_uint8()
self.SecondaryBus = v_uint8()
self.SubordinateBus = v_uint8()
self.SecondaryLatency = v_uint8()
self.Range = vstruct.VArray([ _unnamed_15560() for i in xrange(4) ])
self.InterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.BridgeControl = v_uint16()
class PCI_HEADER_TYPE_1(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddresses = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.PrimaryBus = v_uint8()
self.SecondaryBus = v_uint8()
self.SubordinateBus = v_uint8()
self.SecondaryLatency = v_uint8()
self.IOBase = v_uint8()
self.IOLimit = v_uint8()
self.SecondaryStatus = v_uint16()
self.MemoryBase = v_uint16()
self.MemoryLimit = v_uint16()
self.PrefetchBase = v_uint16()
self.PrefetchLimit = v_uint16()
self.PrefetchBaseUpper32 = v_uint32()
self.PrefetchLimitUpper32 = v_uint32()
self.IOBaseUpper16 = v_uint16()
self.IOLimitUpper16 = v_uint16()
self.CapabilitiesPtr = v_uint8()
self.Reserved1 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.ROMBaseAddress = v_uint32()
self.InterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.BridgeControl = v_uint16()
class PCI_HEADER_TYPE_0(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddresses = vstruct.VArray([ v_uint32() for i in xrange(6) ])
self.CIS = v_uint32()
self.SubVendorID = v_uint16()
self.SubSystemID = v_uint16()
self.ROMBaseAddress = v_uint32()
self.CapabilitiesPtr = v_uint8()
self.Reserved1 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.Reserved2 = v_uint32()
self.InterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.MinimumGrant = v_uint8()
self.MaximumLatency = v_uint8()
class MMPFN(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_13150()
self.PteAddress = v_ptr32()
self.u2 = _unnamed_13151()
self.u3 = _unnamed_13152()
self.OriginalPte = MMPTE()
self.u4 = _unnamed_13153()
class OBJECT_DUMP_CONTROL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Stream = v_ptr32()
self.Detail = v_uint32()
class CACHE_MANAGER_CALLBACKS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AcquireForLazyWrite = v_ptr32()
self.ReleaseFromLazyWrite = v_ptr32()
self.AcquireForReadAhead = v_ptr32()
self.ReleaseFromReadAhead = v_ptr32()
class DBGKD_CONTINUE2(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContinueStatus = v_uint32()
self.ControlSet = X86_DBGKD_CONTROL_SET()
self._pad0020 = v_bytes(size=12)
class HANDLE_TRACE_DB_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClientId = CLIENT_ID()
self.Handle = v_ptr32()
self.Type = v_uint32()
self.StackTrace = vstruct.VArray([ v_ptr32() for i in xrange(16) ])
class LPCP_NONPAGED_PORT_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Semaphore = KSEMAPHORE()
self.BackPointer = v_ptr32()
class DEVICE_RELATIONS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.Objects = vstruct.VArray([ v_ptr32() for i in xrange(1) ])
class _unnamed_14532(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Port = _unnamed_16299()
class BATTERY_REPORTING_SCALE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Granularity = v_uint32()
self.Capacity = v_uint32()
class MMPAGING_FILE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint32()
self.MaximumSize = v_uint32()
self.MinimumSize = v_uint32()
self.FreeSpace = v_uint32()
self.CurrentUsage = v_uint32()
self.PeakUsage = v_uint32()
self.Hint = v_uint32()
self.HighestPage = v_uint32()
self.Entry = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self.Bitmap = v_ptr32()
self.File = v_ptr32()
self.PageFileName = UNICODE_STRING()
self.PageFileNumber = v_uint32()
self.Extended = v_uint8()
self.HintSetToZero = v_uint8()
self.BootPartition = v_uint8()
self._pad0040 = v_bytes(size=1)
self.FileHandle = v_ptr32()
class _unnamed_16200(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WhichSpace = v_uint32()
self.Buffer = v_ptr32()
self.Offset = v_uint32()
self.Length = v_uint32()
class STRING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_ptr32()
class _unnamed_16205(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = v_uint8()
class FNSAVE_FORMAT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlWord = v_uint32()
self.StatusWord = v_uint32()
self.TagWord = v_uint32()
self.ErrorOffset = v_uint32()
self.ErrorSelector = v_uint32()
self.DataOffset = v_uint32()
self.DataSelector = v_uint32()
self.RegisterArea = vstruct.VArray([ v_uint8() for i in xrange(80) ])
class CMP_OFFSET_ARRAY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FileOffset = v_uint32()
self.DataBuffer = v_ptr32()
self.DataLength = v_uint32()
class CM_KEY_VALUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.NameLength = v_uint16()
self.DataLength = v_uint32()
self.Data = v_uint32()
self.Type = v_uint32()
self.Flags = v_uint16()
self.Spare = v_uint16()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0018 = v_bytes(size=2)
class MMVAD_FLAGS2(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FileOffset = v_uint32()
class LIST_ENTRY32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint32()
self.Blink = v_uint32()
class MMWSLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_13252()
class DBGKD_BREAKPOINTEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointCount = v_uint32()
self.ContinueStatus = v_uint32()
class FILE_NETWORK_OPEN_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LastAccessTime = LARGE_INTEGER()
self.LastWriteTime = LARGE_INTEGER()
self.ChangeTime = LARGE_INTEGER()
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.FileAttributes = v_uint32()
self._pad0038 = v_bytes(size=4)
class PCI_SECONDARY_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = SINGLE_LIST_ENTRY()
self.ExtensionType = v_uint32()
self.Destructor = v_ptr32()
class DBGKD_QUERY_MEMORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Address = v_uint64()
self.Reserved = v_uint64()
self.AddressSpace = v_uint32()
self.Flags = v_uint32()
class PCI_SLOT_NUMBER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u = _unnamed_14357()
class _unnamed_16115(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_ptr32()
self.Key = v_uint32()
self.ByteOffset = LARGE_INTEGER()
class KDEVICE_QUEUE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceListEntry = LIST_ENTRY()
self.SortKey = v_uint32()
self.Inserted = v_uint8()
self._pad0010 = v_bytes(size=3)
class LIST_ENTRY64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint64()
self.Blink = v_uint64()
class MMPTE_SUBSECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class PO_DEVICE_NOTIFY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Link = LIST_ENTRY()
self.TargetDevice = v_ptr32()
self.WakeNeeded = v_uint8()
self.OrderLevel = v_uint8()
self._pad0010 = v_bytes(size=2)
self.DeviceObject = v_ptr32()
self.Node = v_ptr32()
self.DeviceName = v_ptr32()
self.DriverName = v_ptr32()
self.ChildCount = v_uint32()
self.ActiveChild = v_uint32()
class HMAP_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Directory = vstruct.VArray([ v_ptr32() for i in xrange(1024) ])
class _unnamed_13150(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_uint32()
class _unnamed_13151(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Blink = v_uint32()
class _unnamed_13152(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e1 = MMPFNENTRY()
class HEAP_STOP_ON_VALUES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocAddress = v_uint32()
self.AllocTag = HEAP_STOP_ON_TAG()
self.ReAllocAddress = v_uint32()
self.ReAllocTag = HEAP_STOP_ON_TAG()
self.FreeAddress = v_uint32()
self.FreeTag = HEAP_STOP_ON_TAG()
class WMI_BUFFER_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Wnode = WNODE_HEADER()
self.Offset = v_uint32()
self.EventsLost = v_uint32()
self.InstanceGuid = GUID()
class RTL_HANDLE_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
class ARBITER_ALTERNATIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Minimum = v_uint64()
self.Maximum = v_uint64()
self.Length = v_uint32()
self.Alignment = v_uint32()
self.Priority = v_uint32()
self.Flags = v_uint32()
self.Descriptor = v_ptr32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(3) ])
class EX_FAST_REF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Object = v_ptr32()
class INTERLOCK_SEQ(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Depth = v_uint16()
self.FreeEntryOffset = v_uint16()
self.Sequence = v_uint32()
class HMAP_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Table = vstruct.VArray([ HMAP_ENTRY() for i in xrange(512) ])
class KSPIN_LOCK_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Lock = v_ptr32()
class _unnamed_12918(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DockStatus = v_uint32()
self.ListEntry = LIST_ENTRY()
self.SerialNumber = v_ptr32()
class FS_FILTER_CALLBACKS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFsFilterCallbacks = v_uint32()
self.Reserved = v_uint32()
self.PreAcquireForSectionSynchronization = v_ptr32()
self.PostAcquireForSectionSynchronization = v_ptr32()
self.PreReleaseForSectionSynchronization = v_ptr32()
self.PostReleaseForSectionSynchronization = v_ptr32()
self.PreAcquireForCcFlush = v_ptr32()
self.PostAcquireForCcFlush = v_ptr32()
self.PreReleaseForCcFlush = v_ptr32()
self.PostReleaseForCcFlush = v_ptr32()
self.PreAcquireForModifiedPageWriter = v_ptr32()
self.PostAcquireForModifiedPageWriter = v_ptr32()
self.PreReleaseForModifiedPageWriter = v_ptr32()
self.PostReleaseForModifiedPageWriter = v_ptr32()
class HANDLE_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Object = v_ptr32()
self.GrantedAccess = v_uint32()
class IO_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Option = v_uint8()
self.Type = v_uint8()
self.ShareDisposition = v_uint8()
self.Spare1 = v_uint8()
self.Flags = v_uint16()
self.Spare2 = v_uint16()
self.u = _unnamed_14532()
class _unnamed_12917(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NextResourceDeviceNode = v_ptr32()
class _unnamed_12916(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LegacyDeviceNode = v_ptr32()
class THERMAL_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ThermalStamp = v_uint32()
self.ThermalConstant1 = v_uint32()
self.ThermalConstant2 = v_uint32()
self.Processors = v_uint32()
self.SamplingPeriod = v_uint32()
self.CurrentTemperature = v_uint32()
self.PassiveTripPoint = v_uint32()
self.CriticalTripPoint = v_uint32()
self.ActiveTripPointCount = v_uint8()
self._pad0024 = v_bytes(size=3)
self.ActiveTripPoint = vstruct.VArray([ v_uint32() for i in xrange(10) ])
class IMAGE_OPTIONAL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_uint16()
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.BaseOfData = v_uint32()
self.ImageBase = v_uint32()
self.SectionAlignment = v_uint32()
self.FileAlignment = v_uint32()
self.MajorOperatingSystemVersion = v_uint16()
self.MinorOperatingSystemVersion = v_uint16()
self.MajorImageVersion = v_uint16()
self.MinorImageVersion = v_uint16()
self.MajorSubsystemVersion = v_uint16()
self.MinorSubsystemVersion = v_uint16()
self.Win32VersionValue = v_uint32()
self.SizeOfImage = v_uint32()
self.SizeOfHeaders = v_uint32()
self.CheckSum = v_uint32()
self.Subsystem = v_uint16()
self.DllCharacteristics = v_uint16()
self.SizeOfStackReserve = v_uint32()
self.SizeOfStackCommit = v_uint32()
self.SizeOfHeapReserve = v_uint32()
self.SizeOfHeapCommit = v_uint32()
self.LoaderFlags = v_uint32()
self.NumberOfRvaAndSizes = v_uint32()
self.DataDirectory = vstruct.VArray([ IMAGE_DATA_DIRECTORY() for i in xrange(16) ])
class SCSI_REQUEST_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class OBJECT_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.RootDirectory = v_ptr32()
self.ObjectName = v_ptr32()
self.Attributes = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.SecurityQualityOfService = v_ptr32()
class SUBSECTION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlArea = v_ptr32()
self.u = _unnamed_12531()
self.StartingSector = v_uint32()
self.NumberOfFullSectors = v_uint32()
self.SubsectionBase = v_ptr32()
self.UnusedPtes = v_uint32()
self.PtesInSubsection = v_uint32()
self.NextSubsection = v_ptr32()
class ETHREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Tcb = KTHREAD()
self.CreateTime = LARGE_INTEGER()
self.ExitTime = LARGE_INTEGER()
self.ExitStatus = v_uint32()
self.PostBlockList = LIST_ENTRY()
self.TerminationPort = v_ptr32()
self.ActiveTimerListLock = v_uint32()
self.ActiveTimerListHead = LIST_ENTRY()
self.Cid = CLIENT_ID()
self.LpcReplySemaphore = KSEMAPHORE()
self.LpcReplyMessage = v_ptr32()
self.ImpersonationInfo = v_ptr32()
self.IrpList = LIST_ENTRY()
self.TopLevelIrp = v_uint32()
self.DeviceToVerify = v_ptr32()
self.ThreadsProcess = v_ptr32()
self.StartAddress = v_ptr32()
self.Win32StartAddress = v_ptr32()
self.ThreadListEntry = LIST_ENTRY()
self.RundownProtect = EX_RUNDOWN_REF()
self.ThreadLock = EX_PUSH_LOCK()
self.LpcReplyMessageId = v_uint32()
self.ReadClusterSize = v_uint32()
self.GrantedAccess = v_uint32()
self.CrossThreadFlags = v_uint32()
self.SameThreadPassiveFlags = v_uint32()
self.SameThreadApcFlags = v_uint32()
self.ForwardClusterOnly = v_uint8()
self.DisablePageFaultClustering = v_uint8()
self._pad0258 = v_bytes(size=2)
class _unnamed_16158(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_ptr32()
self.Size = v_uint16()
self.Version = v_uint16()
self.Interface = v_ptr32()
self.InterfaceSpecificData = v_ptr32()
class FAST_MUTEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.Owner = v_ptr32()
self.Contention = v_uint32()
self.Event = KEVENT()
self.OldIrql = v_uint32()
class _unnamed_16156(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
class MM_SESSION_SPACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReferenceCount = v_uint32()
self.u = _unnamed_13227()
self.SessionId = v_uint32()
self.SessionPageDirectoryIndex = v_uint32()
self.GlobalVirtualAddress = v_ptr32()
self.ProcessList = LIST_ENTRY()
self.NonPagedPoolBytes = v_uint32()
self.PagedPoolBytes = v_uint32()
self.NonPagedPoolAllocations = v_uint32()
self.PagedPoolAllocations = v_uint32()
self.NonPagablePages = v_uint32()
self.CommittedPages = v_uint32()
self._pad0038 = v_bytes(size=4)
self.LastProcessSwappedOutTime = LARGE_INTEGER()
self.PageTables = v_ptr32()
self.PagedPoolMutex = FAST_MUTEX()
self.PagedPoolStart = v_ptr32()
self.PagedPoolEnd = v_ptr32()
self.PagedPoolBasePde = v_ptr32()
self.PagedPoolInfo = MM_PAGED_POOL_INFO()
self.Color = v_uint32()
self.ProcessOutSwapCount = v_uint32()
self.ImageList = LIST_ENTRY()
self.GlobalPteEntry = v_ptr32()
self.CopyOnWriteCount = v_uint32()
self.SessionPoolAllocationFailures = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.AttachCount = v_uint32()
self.AttachEvent = KEVENT()
self.LastProcess = v_ptr32()
self._pad00d8 = v_bytes(size=4)
self.Vm = MMSUPPORT()
self.Wsle = v_ptr32()
self.WsLock = ERESOURCE()
self.WsListEntry = LIST_ENTRY()
self.Session = MMSESSION()
self.Win32KDriverObject = DRIVER_OBJECT()
self.WorkingSetLockOwner = v_ptr32()
self.PagedPool = POOL_DESCRIPTOR()
self.ProcessReferenceToSession = v_uint32()
self.LocaleId = v_uint32()
self._pad1278 = v_bytes(size=4)
class CM_NAME_CONTROL_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Compressed = v_uint8()
self._pad0002 = v_bytes(size=1)
self.RefCount = v_uint16()
self.NameHash = CM_NAME_HASH()
class _unnamed_16016(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityContext = v_ptr32()
self.Options = v_uint32()
self.Reserved = v_uint16()
self.ShareAccess = v_uint16()
self.Parameters = v_ptr32()
class _unnamed_13534(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Level = v_uint32()
class KDEVICE_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceListHead = LIST_ENTRY()
self.Lock = v_uint32()
self.Busy = v_uint8()
self._pad0014 = v_bytes(size=3)
class IO_COUNTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadOperationCount = v_uint64()
self.WriteOperationCount = v_uint64()
self.OtherOperationCount = v_uint64()
self.ReadTransferCount = v_uint64()
self.WriteTransferCount = v_uint64()
self.OtherTransferCount = v_uint64()
class _unnamed_16380(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataLength = v_uint16()
self.TotalLength = v_uint16()
class PCI_BUS_INTERFACE_STANDARD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.Context = v_ptr32()
self.InterfaceReference = v_ptr32()
self.InterfaceDereference = v_ptr32()
self.ReadConfig = v_ptr32()
self.WriteConfig = v_ptr32()
self.PinToLine = v_ptr32()
self.LineToPin = v_ptr32()
class PORT_MESSAGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_15734()
self.u2 = _unnamed_15735()
self.ClientId = CLIENT_ID()
self.MessageId = v_uint32()
self.ClientViewSize = v_uint32()
class _unnamed_16385(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.DataInfoOffset = v_uint16()
class PCI_COMMON_CONFIG(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VendorID = v_uint16()
self.DeviceID = v_uint16()
self.Command = v_uint16()
self.Status = v_uint16()
self.RevisionID = v_uint8()
self.ProgIf = v_uint8()
self.SubClass = v_uint8()
self.BaseClass = v_uint8()
self.CacheLineSize = v_uint8()
self.LatencyTimer = v_uint8()
self.HeaderType = v_uint8()
self.BIST = v_uint8()
self.u = _unnamed_14629()
self.DeviceSpecific = vstruct.VArray([ v_uint8() for i in xrange(192) ])
class IO_SECURITY_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityQos = v_ptr32()
self.AccessState = v_ptr32()
self.DesiredAccess = v_uint32()
self.FullCreateOptions = v_uint32()
class TERMINATION_PORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Port = v_ptr32()
class IO_CLIENT_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NextExtension = v_ptr32()
self.ClientIdentificationAddress = v_ptr32()
class INITIAL_PRIVILEGE_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrivilegeCount = v_uint32()
self.Control = v_uint32()
self.Privilege = vstruct.VArray([ LUID_AND_ATTRIBUTES() for i in xrange(3) ])
class PCI_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Atom = v_uint32()
self.OldIrql = v_uint8()
self._pad0008 = v_bytes(size=3)
class POOL_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PoolType = v_uint32()
self.PoolIndex = v_uint32()
self.RunningAllocs = v_uint32()
self.RunningDeAllocs = v_uint32()
self.TotalPages = v_uint32()
self.TotalBigPages = v_uint32()
self.Threshold = v_uint32()
self.LockAddress = v_ptr32()
self.PendingFrees = v_ptr32()
self.PendingFreeDepth = v_uint32()
self.ListHeads = vstruct.VArray([ LIST_ENTRY() for i in xrange(512) ])
class DBGKD_QUERY_SPECIAL_CALLS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NumberOfSpecialCalls = v_uint32()
class HEAP_UNCOMMMTTED_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Address = v_uint32()
self.Size = v_uint32()
self.filler = v_uint32()
class HMAP_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BlockAddress = v_uint32()
self.BinAddress = v_uint32()
self.CmView = v_ptr32()
self.MemAlloc = v_uint32()
class DUMP_STACK_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Init = DUMP_INITIALIZATION_CONTEXT()
self.PartitionOffset = LARGE_INTEGER()
self.DumpPointers = v_ptr32()
self.PointersLength = v_uint32()
self.ModulePrefix = v_ptr32()
self.DriverList = LIST_ENTRY()
self.InitMsg = STRING()
self.ProgMsg = STRING()
self.DoneMsg = STRING()
self.FileObject = v_ptr32()
self.UsageType = v_uint32()
self._pad00b0 = v_bytes(size=4)
class PNP_DEVICE_EVENT_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Status = v_uint32()
self.EventQueueMutex = KMUTANT()
self.Lock = FAST_MUTEX()
self.List = LIST_ENTRY()
class PROCESSOR_IDLE_TIMES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartTime = v_uint64()
self.EndTime = v_uint64()
self.IdleHandlerReserved = vstruct.VArray([ v_uint32() for i in xrange(4) ])
class KWAIT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WaitListEntry = LIST_ENTRY()
self.Thread = v_ptr32()
self.Object = v_ptr32()
self.NextWaitBlock = v_ptr32()
self.WaitKey = v_uint16()
self.WaitType = v_uint16()
class DBGKD_READ_WRITE_IO32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.IoAddress = v_uint32()
self.DataValue = v_uint32()
class POP_HIBER_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WriteToFile = v_uint8()
self.ReserveLoaderMemory = v_uint8()
self.ReserveFreeMemory = v_uint8()
self.VerifyOnWake = v_uint8()
self.Reset = v_uint8()
self.HiberFlags = v_uint8()
self.LinkFile = v_uint8()
self._pad0008 = v_bytes(size=1)
self.LinkFileHandle = v_ptr32()
self.Lock = v_uint32()
self.MapFrozen = v_uint8()
self._pad0014 = v_bytes(size=3)
self.MemoryMap = RTL_BITMAP()
self.ClonedRanges = LIST_ENTRY()
self.ClonedRangeCount = v_uint32()
self.NextCloneRange = v_ptr32()
self.NextPreserve = v_uint32()
self.LoaderMdl = v_ptr32()
self.Clones = v_ptr32()
self.NextClone = v_ptr32()
self.NoClones = v_uint32()
self.Spares = v_ptr32()
self._pad0048 = v_bytes(size=4)
self.PagesOut = v_uint64()
self.IoPage = v_ptr32()
self.CurrentMcb = v_ptr32()
self.DumpStack = v_ptr32()
self.WakeState = v_ptr32()
self.NoRanges = v_uint32()
self.HiberVa = v_uint32()
self.HiberPte = LARGE_INTEGER()
self.Status = v_uint32()
self.MemoryImage = v_ptr32()
self.TableHead = v_ptr32()
self.CompressionWorkspace = v_ptr32()
self.CompressedWriteBuffer = v_ptr32()
self.PerformanceStats = v_ptr32()
self.CompressionBlock = v_ptr32()
self.DmaIO = v_ptr32()
self.TemporaryHeap = v_ptr32()
self._pad0098 = v_bytes(size=4)
self.PerfInfo = PO_HIBER_PERF()
class _unnamed_16128(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityInformation = v_uint32()
self.SecurityDescriptor = v_ptr32()
class PS_JOB_TOKEN_FILTER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CapturedSidCount = v_uint32()
self.CapturedSids = v_ptr32()
self.CapturedSidsLength = v_uint32()
self.CapturedGroupCount = v_uint32()
self.CapturedGroups = v_ptr32()
self.CapturedGroupsLength = v_uint32()
self.CapturedPrivilegeCount = v_uint32()
self.CapturedPrivileges = v_ptr32()
self.CapturedPrivilegesLength = v_uint32()
class CALL_HASH_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.CallersAddress = v_ptr32()
self.CallersCaller = v_ptr32()
self.CallCount = v_uint32()
class _unnamed_16125(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityInformation = v_uint32()
self.Length = v_uint32()
class TOKEN_CONTROL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TokenId = LUID()
self.AuthenticationId = LUID()
self.ModifiedId = LUID()
self.TokenSource = TOKEN_SOURCE()
class _unnamed_16120(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OutputBufferLength = v_uint32()
self.InputBufferLength = v_uint32()
self.IoControlCode = v_uint32()
self.Type3InputBuffer = v_ptr32()
class _unnamed_16554(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ArbitrationList = v_ptr32()
self.AllocateFromCount = v_uint32()
self.AllocateFrom = v_ptr32()
class PCI_COMMON_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.ExtensionType = v_uint32()
self.IrpDispatchTable = v_ptr32()
self.DeviceState = v_uint8()
self.TentativeNextState = v_uint8()
self._pad0010 = v_bytes(size=2)
self.SecondaryExtLock = KEVENT()
class HEAP_USERDATA_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SFreeListEntry = SINGLE_LIST_ENTRY()
self.HeapHandle = v_ptr32()
self.SizeIndex = v_uint32()
self.Signature = v_uint32()
class _unnamed_16559(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ArbitrationList = v_ptr32()
class RTL_DRIVE_LETTER_CURDIR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint16()
self.Length = v_uint16()
self.TimeStamp = v_uint32()
self.DosPath = STRING()
class ULARGE_INTEGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class _unnamed_15734(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.s1 = _unnamed_16380()
class _unnamed_15735(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.s2 = _unnamed_16385()
class TEB_ACTIVE_FRAME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.Previous = v_ptr32()
self.Context = v_ptr32()
class ETIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.KeTimer = KTIMER()
self.TimerApc = KAPC()
self.TimerDpc = KDPC()
self.ActiveTimerListEntry = LIST_ENTRY()
self.Lock = v_uint32()
self.Period = v_uint32()
self.ApcAssociated = v_uint8()
self.WakeTimer = v_uint8()
self._pad008c = v_bytes(size=2)
self.WakeTimerListEntry = LIST_ENTRY()
self._pad0098 = v_bytes(size=4)
class GENERAL_LOOKASIDE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = SLIST_HEADER()
self.Depth = v_uint16()
self.MaximumDepth = v_uint16()
self.TotalAllocates = v_uint32()
self.AllocateMisses = v_uint32()
self.TotalFrees = v_uint32()
self.FreeMisses = v_uint32()
self.Type = v_uint32()
self.Tag = v_uint32()
self.Size = v_uint32()
self.Allocate = v_ptr32()
self.Free = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.LastTotalAllocates = v_uint32()
self.LastAllocateMisses = v_uint32()
self.Future = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self._pad0080 = v_bytes(size=56)
class PHYSICAL_MEMORY_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NumberOfRuns = v_uint32()
self.NumberOfPages = v_uint32()
self.Run = vstruct.VArray([ PHYSICAL_MEMORY_RUN() for i in xrange(1) ])
class ARBITER_ORDERING_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint16()
self.Maximum = v_uint16()
self.Orderings = v_ptr32()
class OBJECT_DIRECTORY_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ChainLink = v_ptr32()
self.Object = v_ptr32()
class CM_KEY_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ConvKey = v_uint32()
self.NextHash = v_ptr32()
self.KeyHive = v_ptr32()
self.KeyCell = v_uint32()
class ARBITER_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.AlternativeCount = v_uint32()
self.Alternatives = v_ptr32()
self.PhysicalDeviceObject = v_ptr32()
self.RequestSource = v_uint32()
self.Flags = v_uint32()
self.WorkSpace = v_uint32()
self.InterfaceType = v_uint32()
self.SlotNumber = v_uint32()
self.BusNumber = v_uint32()
self.Assignment = v_ptr32()
self.SelectedAlternative = v_ptr32()
self.Result = v_uint32()
class PROCESSOR_PERF_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PercentFrequency = v_uint8()
self.MinCapacity = v_uint8()
self.Power = v_uint16()
self.IncreaseLevel = v_uint8()
self.DecreaseLevel = v_uint8()
self.Flags = v_uint16()
self.IncreaseTime = v_uint32()
self.DecreaseTime = v_uint32()
self.IncreaseCount = v_uint32()
self.DecreaseCount = v_uint32()
self.PerformanceTime = v_uint64()
class KGDTENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LimitLow = v_uint16()
self.BaseLow = v_uint16()
self.HighWord = _unnamed_13092()
class MMPFNENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Modified = v_uint32()
class NT_TIB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionList = v_ptr32()
self.StackBase = v_ptr32()
self.StackLimit = v_ptr32()
self.SubSystemTib = v_ptr32()
self.FiberData = v_ptr32()
self.ArbitraryUserPointer = v_ptr32()
self.Self = v_ptr32()
class POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemState = v_uint32()
class UNICODE_STRING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_ptr32()
class CELL_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u = u()
class MMSESSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemSpaceViewLock = FAST_MUTEX()
self.SystemSpaceViewLockPointer = v_ptr32()
self.SystemSpaceViewStart = v_ptr32()
self.SystemSpaceViewTable = v_ptr32()
self.SystemSpaceHashSize = v_uint32()
self.SystemSpaceHashEntries = v_uint32()
self.SystemSpaceHashKey = v_uint32()
self.SystemSpaceBitMap = v_ptr32()
class _unnamed_16230(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerState = v_uint32()
class _unnamed_16236(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerSequence = v_ptr32()
class PEB_FREE_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Size = v_uint32()
class MMFREE_POOL_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = LIST_ENTRY()
self.Size = v_uint32()
self.Signature = v_uint32()
self.Owner = v_ptr32()
class EPROCESS_QUOTA_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.QuotaEntry = vstruct.VArray([ EPROCESS_QUOTA_ENTRY() for i in xrange(3) ])
self.QuotaList = LIST_ENTRY()
self.ReferenceCount = v_uint32()
self.ProcessCount = v_uint32()
class FXSAVE_FORMAT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlWord = v_uint16()
self.StatusWord = v_uint16()
self.TagWord = v_uint16()
self.ErrorOpcode = v_uint16()
self.ErrorOffset = v_uint32()
self.ErrorSelector = v_uint32()
self.DataOffset = v_uint32()
self.DataSelector = v_uint32()
self.MXCsr = v_uint32()
self.MXCsrMask = v_uint32()
self.RegisterArea = vstruct.VArray([ v_uint8() for i in xrange(128) ])
self.Reserved3 = vstruct.VArray([ v_uint8() for i in xrange(128) ])
self.Reserved4 = vstruct.VArray([ v_uint8() for i in xrange(224) ])
self.Align16Byte = vstruct.VArray([ v_uint8() for i in xrange(8) ])
class BUS_HANDLER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint32()
self.InterfaceType = v_uint32()
self.ConfigurationType = v_uint32()
self.BusNumber = v_uint32()
self.DeviceObject = v_ptr32()
self.ParentHandler = v_ptr32()
self.BusData = v_ptr32()
self.DeviceControlExtensionSize = v_uint32()
self.BusAddresses = v_ptr32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(4) ])
self.GetBusData = v_ptr32()
self.SetBusData = v_ptr32()
self.AdjustResourceList = v_ptr32()
self.AssignSlotResources = v_ptr32()
self.GetInterruptVector = v_ptr32()
self.TranslateBusAddress = v_ptr32()
self.Spare1 = v_ptr32()
self.Spare2 = v_ptr32()
self.Spare3 = v_ptr32()
self.Spare4 = v_ptr32()
self.Spare5 = v_ptr32()
self.Spare6 = v_ptr32()
self.Spare7 = v_ptr32()
self.Spare8 = v_ptr32()
class OBJECT_HEADER_NAME_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Directory = v_ptr32()
self.Name = UNICODE_STRING()
self.QueryReferences = v_uint32()
class PEB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InheritedAddressSpace = v_uint8()
self.ReadImageFileExecOptions = v_uint8()
self.BeingDebugged = v_uint8()
self.SpareBool = v_uint8()
self.Mutant = v_ptr32()
self.ImageBaseAddress = v_ptr32()
self.Ldr = v_ptr32()
self.ProcessParameters = v_ptr32()
self.SubSystemData = v_ptr32()
self.ProcessHeap = v_ptr32()
self.FastPebLock = v_ptr32()
self.FastPebLockRoutine = v_ptr32()
self.FastPebUnlockRoutine = v_ptr32()
self.EnvironmentUpdateCount = v_uint32()
self.KernelCallbackTable = v_ptr32()
self.SystemReserved = vstruct.VArray([ v_uint32() for i in xrange(1) ])
self.AtlThunkSListPtr32 = v_uint32()
self.FreeList = v_ptr32()
self.TlsExpansionCounter = v_uint32()
self.TlsBitmap = v_ptr32()
self.TlsBitmapBits = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.ReadOnlySharedMemoryBase = v_ptr32()
self.ReadOnlySharedMemoryHeap = v_ptr32()
self.ReadOnlyStaticServerData = v_ptr32()
self.AnsiCodePageData = v_ptr32()
self.OemCodePageData = v_ptr32()
self.UnicodeCaseTableData = v_ptr32()
self.NumberOfProcessors = v_uint32()
self.NtGlobalFlag = v_uint32()
self._pad0070 = v_bytes(size=4)
self.CriticalSectionTimeout = LARGE_INTEGER()
self.HeapSegmentReserve = v_uint32()
self.HeapSegmentCommit = v_uint32()
self.HeapDeCommitTotalFreeThreshold = v_uint32()
self.HeapDeCommitFreeBlockThreshold = v_uint32()
self.NumberOfHeaps = v_uint32()
self.MaximumNumberOfHeaps = v_uint32()
self.ProcessHeaps = v_ptr32()
self.GdiSharedHandleTable = v_ptr32()
self.ProcessStarterHelper = v_ptr32()
self.GdiDCAttributeList = v_uint32()
self.LoaderLock = v_ptr32()
self.OSMajorVersion = v_uint32()
self.OSMinorVersion = v_uint32()
self.OSBuildNumber = v_uint16()
self.OSCSDVersion = v_uint16()
self.OSPlatformId = v_uint32()
self.ImageSubsystem = v_uint32()
self.ImageSubsystemMajorVersion = v_uint32()
self.ImageSubsystemMinorVersion = v_uint32()
self.ImageProcessAffinityMask = v_uint32()
self.GdiHandleBuffer = vstruct.VArray([ v_uint32() for i in xrange(34) ])
self.PostProcessInitRoutine = v_ptr32()
self.TlsExpansionBitmap = v_ptr32()
self.TlsExpansionBitmapBits = vstruct.VArray([ v_uint32() for i in xrange(32) ])
self.SessionId = v_uint32()
self.AppCompatFlags = ULARGE_INTEGER()
self.AppCompatFlagsUser = ULARGE_INTEGER()
self.pShimData = v_ptr32()
self.AppCompatInfo = v_ptr32()
self.CSDVersion = UNICODE_STRING()
self.ActivationContextData = v_ptr32()
self.ProcessAssemblyStorageMap = v_ptr32()
self.SystemDefaultActivationContextData = v_ptr32()
self.SystemAssemblyStorageMap = v_ptr32()
self.MinimumStackCommit = v_uint32()
self._pad0210 = v_bytes(size=4)
class DBGKD_ANY_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.X86ControlSet = X86_DBGKD_CONTROL_SET()
self._pad001c = v_bytes(size=12)
class MMSUPPORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LastTrimTime = LARGE_INTEGER()
self.Flags = MMSUPPORT_FLAGS()
self.PageFaultCount = v_uint32()
self.PeakWorkingSetSize = v_uint32()
self.WorkingSetSize = v_uint32()
self.MinimumWorkingSetSize = v_uint32()
self.MaximumWorkingSetSize = v_uint32()
self.VmWorkingSetList = v_ptr32()
self.WorkingSetExpansionLinks = LIST_ENTRY()
self.Claim = v_uint32()
self.NextEstimationSlot = v_uint32()
self.NextAgingSlot = v_uint32()
self.EstimatedAvailable = v_uint32()
self.GrowthSinceLastEstimate = v_uint32()
class HBASE_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.Sequence1 = v_uint32()
self.Sequence2 = v_uint32()
self.TimeStamp = LARGE_INTEGER()
self.Major = v_uint32()
self.Minor = v_uint32()
self.Type = v_uint32()
self.Format = v_uint32()
self.RootCell = v_uint32()
self.Length = v_uint32()
self.Cluster = v_uint32()
self.FileName = vstruct.VArray([ v_uint8() for i in xrange(64) ])
self.Reserved1 = vstruct.VArray([ v_uint32() for i in xrange(99) ])
self.CheckSum = v_uint32()
self.Reserved2 = vstruct.VArray([ v_uint32() for i in xrange(894) ])
self.BootType = v_uint32()
self.BootRecover = v_uint32()
class BUS_EXTENSION_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.BusExtension = v_ptr32()
class DBGKD_GET_SET_BUS_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BusDataType = v_uint32()
self.BusNumber = v_uint32()
self.SlotNumber = v_uint32()
self.Offset = v_uint32()
self.Length = v_uint32()
class KDPC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Number = v_uint8()
self.Importance = v_uint8()
self.DpcListEntry = LIST_ENTRY()
self.DeferredRoutine = v_ptr32()
self.DeferredContext = v_ptr32()
self.SystemArgument1 = v_ptr32()
self.SystemArgument2 = v_ptr32()
self.Lock = v_ptr32()
class KEVENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
class KSEMAPHORE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.Limit = v_uint32()
class PCI_ARBITER_INSTANCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = PCI_SECONDARY_EXTENSION()
self.Interface = v_ptr32()
self.BusFdoExtension = v_ptr32()
self.InstanceName = vstruct.VArray([ v_uint16() for i in xrange(24) ])
self.CommonInstance = ARBITER_INSTANCE()
class PI_RESOURCE_ARBITER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceArbiterList = LIST_ENTRY()
self.ResourceType = v_uint8()
self._pad000c = v_bytes(size=3)
self.ArbiterInterface = v_ptr32()
self.Level = v_uint32()
self.ResourceList = LIST_ENTRY()
self.BestResourceList = LIST_ENTRY()
self.BestConfig = LIST_ENTRY()
self.ActiveArbiterList = LIST_ENTRY()
self.State = v_uint8()
self.ResourcesChanged = v_uint8()
self._pad0038 = v_bytes(size=2)
class OBJECT_TYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Mutex = ERESOURCE()
self.TypeList = LIST_ENTRY()
self.Name = UNICODE_STRING()
self.DefaultObject = v_ptr32()
self.Index = v_uint32()
self.TotalNumberOfObjects = v_uint32()
self.TotalNumberOfHandles = v_uint32()
self.HighWaterNumberOfObjects = v_uint32()
self.HighWaterNumberOfHandles = v_uint32()
self.TypeInfo = OBJECT_TYPE_INITIALIZER()
self.Key = v_uint32()
self.ObjectLocks = vstruct.VArray([ ERESOURCE() for i in xrange(4) ])
class DBGKD_SET_INTERNAL_BREAKPOINT32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint32()
self.Flags = v_uint32()
class POP_THERMAL_ZONE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Link = LIST_ENTRY()
self.State = v_uint8()
self.Flags = v_uint8()
self.Mode = v_uint8()
self.PendingMode = v_uint8()
self.ActivePoint = v_uint8()
self.PendingActivePoint = v_uint8()
self._pad0010 = v_bytes(size=2)
self.Throttle = v_uint32()
self._pad0018 = v_bytes(size=4)
self.LastTime = v_uint64()
self.SampleRate = v_uint32()
self.LastTemp = v_uint32()
self.PassiveTimer = KTIMER()
self.PassiveDpc = KDPC()
self.OverThrottled = POP_ACTION_TRIGGER()
self.Irp = v_ptr32()
self.Info = THERMAL_INFORMATION()
self._pad00d0 = v_bytes(size=4)
class POOL_HACKER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = POOL_HEADER()
self.Contents = vstruct.VArray([ v_uint32() for i in xrange(8) ])
class HANDLE_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TableCode = v_uint32()
self.QuotaProcess = v_ptr32()
self.UniqueProcessId = v_ptr32()
self.HandleTableLock = vstruct.VArray([ EX_PUSH_LOCK() for i in xrange(4) ])
self.HandleTableList = LIST_ENTRY()
self.HandleContentionEvent = EX_PUSH_LOCK()
self.DebugInfo = v_ptr32()
self.ExtraInfoPages = v_uint32()
self.FirstFree = v_uint32()
self.LastFree = v_uint32()
self.NextHandleNeedingPool = v_uint32()
self.HandleCount = v_uint32()
self.Flags = v_uint32()
class PO_HIBER_PERF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoTicks = v_uint64()
self.InitTicks = v_uint64()
self.CopyTicks = v_uint64()
self.StartCount = v_uint64()
self.ElapsedTime = v_uint32()
self.IoTime = v_uint32()
self.CopyTime = v_uint32()
self.InitTime = v_uint32()
self.PagesWritten = v_uint32()
self.PagesProcessed = v_uint32()
self.BytesCopied = v_uint32()
self.DumpCount = v_uint32()
self.FileRuns = v_uint32()
self._pad0048 = v_bytes(size=4)
class DEFERRED_WRITE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeByteSize = v_uint16()
self.FileObject = v_ptr32()
self.BytesToWrite = v_uint32()
self.DeferredWriteLinks = LIST_ENTRY()
self.Event = v_ptr32()
self.PostRoutine = v_ptr32()
self.Context1 = v_ptr32()
self.Context2 = v_ptr32()
self.LimitModifiedPages = v_uint8()
self._pad0028 = v_bytes(size=3)
class ARBITER_INSTANCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.MutexEvent = v_ptr32()
self.Name = v_ptr32()
self.ResourceType = v_uint32()
self.Allocation = v_ptr32()
self.PossibleAllocation = v_ptr32()
self.OrderingList = ARBITER_ORDERING_LIST()
self.ReservedList = ARBITER_ORDERING_LIST()
self.ReferenceCount = v_uint32()
self.Interface = v_ptr32()
self.AllocationStackMaxSize = v_uint32()
self.AllocationStack = v_ptr32()
self.UnpackRequirement = v_ptr32()
self.PackResource = v_ptr32()
self.UnpackResource = v_ptr32()
self.ScoreRequirement = v_ptr32()
self.TestAllocation = v_ptr32()
self.RetestAllocation = v_ptr32()
self.CommitAllocation = v_ptr32()
self.RollbackAllocation = v_ptr32()
self.BootAllocation = v_ptr32()
self.QueryArbitrate = v_ptr32()
self.QueryConflict = v_ptr32()
self.AddReserved = v_ptr32()
self.StartArbiter = v_ptr32()
self.PreprocessEntry = v_ptr32()
self.AllocateEntry = v_ptr32()
self.GetNextAllocationRange = v_ptr32()
self.FindSuitableRange = v_ptr32()
self.AddAllocation = v_ptr32()
self.BacktrackAllocation = v_ptr32()
self.OverrideConflict = v_ptr32()
self.TransactionInProgress = v_uint8()
self._pad008c = v_bytes(size=3)
self.Extension = v_ptr32()
self.BusDeviceObject = v_ptr32()
self.ConflictCallbackContext = v_ptr32()
self.ConflictCallback = v_ptr32()
class MMMOD_WRITER_LISTHEAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
self.Event = KEVENT()
class NAMED_PIPE_CREATE_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NamedPipeType = v_uint32()
self.ReadMode = v_uint32()
self.CompletionMode = v_uint32()
self.MaximumInstances = v_uint32()
self.InboundQuota = v_uint32()
self.OutboundQuota = v_uint32()
self.DefaultTimeout = LARGE_INTEGER()
self.TimeoutSpecified = v_uint8()
self._pad0028 = v_bytes(size=7)
class POP_IDLE_HANDLER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Latency = v_uint32()
self.TimeCheck = v_uint32()
self.DemoteLimit = v_uint32()
self.PromoteLimit = v_uint32()
self.PromoteCount = v_uint32()
self.Demote = v_uint8()
self.Promote = v_uint8()
self.PromotePercent = v_uint8()
self.DemotePercent = v_uint8()
self.State = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.IdleFunction = v_ptr32()
class MMSUPPORT_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SessionSpace = v_uint32()
class HEAP_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = _unnamed_12162()
class EXCEPTION_REGISTRATION_RECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Handler = v_ptr32()
class FILE_BASIC_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LastAccessTime = LARGE_INTEGER()
self.LastWriteTime = LARGE_INTEGER()
self.ChangeTime = LARGE_INTEGER()
self.FileAttributes = v_uint32()
self._pad0028 = v_bytes(size=4)
class PLUGPLAY_EVENT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EventGuid = GUID()
self.EventCategory = v_uint32()
self.Result = v_ptr32()
self.Flags = v_uint32()
self.TotalSize = v_uint32()
self.DeviceObject = v_ptr32()
self.u = _unnamed_15795()
class LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_ptr32()
self.Blink = v_ptr32()
class CM_KEY_SECURITY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Reserved = v_uint16()
self.Flink = v_uint32()
self.Blink = v_uint32()
self.ReferenceCount = v_uint32()
self.DescriptorLength = v_uint32()
self.Descriptor = SECURITY_DESCRIPTOR_RELATIVE()
class _unnamed_14637(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = LARGE_INTEGER()
self.Length = v_uint32()
class _unnamed_14395(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Acquired = v_uint8()
self.CacheLineSize = v_uint8()
self.LatencyTimer = v_uint8()
self.EnablePERR = v_uint8()
self.EnableSERR = v_uint8()
class CLIENT_ID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UniqueProcess = v_ptr32()
self.UniqueThread = v_ptr32()
class POP_ACTION_TRIGGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint32()
self.Flags = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.Battery = _unnamed_13534()
class CM_CACHED_VALUE_INDEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CellIndex = v_uint32()
self.Data = _unnamed_13383()
class DEVICE_MAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DosDevicesDirectory = v_ptr32()
self.GlobalDosDevicesDirectory = v_ptr32()
self.ReferenceCount = v_uint32()
self.DriveMap = v_uint32()
self.DriveType = vstruct.VArray([ v_uint8() for i in xrange(32) ])
class CONTROL_AREA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Segment = v_ptr32()
self.DereferenceList = LIST_ENTRY()
self.NumberOfSectionReferences = v_uint32()
self.NumberOfPfnReferences = v_uint32()
self.NumberOfMappedViews = v_uint32()
self.NumberOfSubsections = v_uint16()
self.FlushInProgressCount = v_uint16()
self.NumberOfUserReferences = v_uint32()
self.u = _unnamed_12520()
self.FilePointer = v_ptr32()
self.WaitingForDeletion = v_ptr32()
self.ModifiedWriteCount = v_uint16()
self.NumberOfSystemCacheViews = v_uint16()
class GUID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Data1 = v_uint32()
self.Data2 = v_uint16()
self.Data3 = v_uint16()
self.Data4 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
class KAPC_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ApcListHead = vstruct.VArray([ LIST_ENTRY() for i in xrange(2) ])
self.Process = v_ptr32()
self.KernelApcInProgress = v_uint8()
self.KernelApcPending = v_uint8()
self.UserApcPending = v_uint8()
self._pad0018 = v_bytes(size=1)
class MMVAD_SHORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVpn = v_uint32()
self.EndingVpn = v_uint32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.u = _unnamed_14102()
class DBGKD_GET_VERSION32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.ProtocolVersion = v_uint16()
self.Flags = v_uint16()
self.KernBase = v_uint32()
self.PsLoadedModuleList = v_uint32()
self.MachineType = v_uint16()
self.ThCallbackStack = v_uint16()
self.NextCallback = v_uint16()
self.FramePointer = v_uint16()
self.KiCallUserMode = v_uint32()
self.KeUserCallbackDispatcher = v_uint32()
self.BreakpointWithStatus = v_uint32()
self.DebuggerDataList = v_uint32()
class CM_CELL_REMAP_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OldCell = v_uint32()
self.NewCell = v_uint32()
class _unnamed_14065(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InitialPrivilegeSet = INITIAL_PRIVILEGE_SET()
class KIDTENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint16()
self.Selector = v_uint16()
self.Access = v_uint16()
self.ExtendedOffset = v_uint16()
class _unnamed_16198(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IoResourceRequirementList = v_ptr32()
class _unnamed_16195(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Capabilities = v_ptr32()
class _unnamed_14640(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Level = v_uint32()
self.Vector = v_uint32()
self.Affinity = v_uint32()
class PO_MEMORY_RANGE_ARRAY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Range = _unnamed_16445()
class _unnamed_14644(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Channel = v_uint32()
self.Port = v_uint32()
self.Reserved1 = v_uint32()
class SYSTEM_POWER_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint32()
self.PowerButton = POWER_ACTION_POLICY()
self.SleepButton = POWER_ACTION_POLICY()
self.LidClose = POWER_ACTION_POLICY()
self.LidOpenWake = v_uint32()
self.Reserved = v_uint32()
self.Idle = POWER_ACTION_POLICY()
self.IdleTimeout = v_uint32()
self.IdleSensitivity = v_uint8()
self.DynamicThrottle = v_uint8()
self.Spare2 = vstruct.VArray([ v_uint8() for i in xrange(2) ])
self.MinSleep = v_uint32()
self.MaxSleep = v_uint32()
self.ReducedLatencySleep = v_uint32()
self.WinLogonFlags = v_uint32()
self.Spare3 = v_uint32()
self.DozeS4Timeout = v_uint32()
self.BroadcastCapacityResolution = v_uint32()
self.DischargePolicy = vstruct.VArray([ SYSTEM_POWER_LEVEL() for i in xrange(4) ])
self.VideoTimeout = v_uint32()
self.VideoDimDisplay = v_uint8()
self._pad00c8 = v_bytes(size=3)
self.VideoReserved = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.SpindownTimeout = v_uint32()
self.OptimizeForPower = v_uint8()
self.FanThrottleTolerance = v_uint8()
self.ForcedThrottle = v_uint8()
self.MinThrottle = v_uint8()
self.OverThrottled = POWER_ACTION_POLICY()
class IRP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.MdlAddress = v_ptr32()
self.Flags = v_uint32()
self.AssociatedIrp = _unnamed_12973()
self.ThreadListEntry = LIST_ENTRY()
self.IoStatus = IO_STATUS_BLOCK()
self.RequestorMode = v_uint8()
self.PendingReturned = v_uint8()
self.StackCount = v_uint8()
self.CurrentLocation = v_uint8()
self.Cancel = v_uint8()
self.CancelIrql = v_uint8()
self.ApcEnvironment = v_uint8()
self.AllocationFlags = v_uint8()
self.UserIosb = v_ptr32()
self.UserEvent = v_ptr32()
self.Overlay = _unnamed_12976()
self.CancelRoutine = v_ptr32()
self.UserBuffer = v_ptr32()
self.Tail = _unnamed_12979()
class _unnamed_14648(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Data = vstruct.VArray([ v_uint32() for i in xrange(3) ])
class _unnamed_16307(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinimumChannel = v_uint32()
self.MaximumChannel = v_uint32()
class _unnamed_16081(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FileInformationClass = v_uint32()
self.FileObject = v_ptr32()
self.ReplaceIfExists = v_uint8()
self.AdvanceOnly = v_uint8()
self._pad0010 = v_bytes(size=2)
class POWER_ACTION_POLICY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Action = v_uint32()
self.Flags = v_uint32()
self.EventCode = v_uint32()
class SECURITY_DESCRIPTOR_RELATIVE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.Sbz1 = v_uint8()
self.Control = v_uint16()
self.Owner = v_uint32()
self.Group = v_uint32()
self.Sacl = v_uint32()
self.Dacl = v_uint32()
class DUMP_INITIALIZATION_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Reserved = v_uint32()
self.MemoryBlock = v_ptr32()
self.CommonBuffer = vstruct.VArray([ v_ptr32() for i in xrange(2) ])
self._pad0018 = v_bytes(size=4)
self.PhysicalAddress = vstruct.VArray([ LARGE_INTEGER() for i in xrange(2) ])
self.StallRoutine = v_ptr32()
self.OpenRoutine = v_ptr32()
self.WriteRoutine = v_ptr32()
self.FinishRoutine = v_ptr32()
self.AdapterObject = v_ptr32()
self.MappedRegisterBase = v_ptr32()
self.PortConfiguration = v_ptr32()
self.CrashDump = v_uint8()
self._pad0048 = v_bytes(size=3)
self.MaximumTransferSize = v_uint32()
self.CommonBufferSize = v_uint32()
self.TargetAddress = v_ptr32()
self.WritePendingRoutine = v_ptr32()
self.PartitionStyle = v_uint32()
self.DiskInfo = _unnamed_16505()
self._pad0070 = v_bytes(size=4)
class FILE_GET_QUOTA_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NextEntryOffset = v_uint32()
self.SidLength = v_uint32()
self.Sid = SID()
class IO_COMPLETION_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Port = v_ptr32()
self.Key = v_ptr32()
class _unnamed_16565(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PhysicalDeviceObject = v_ptr32()
self.ConflictingResource = v_ptr32()
self.ConflictCount = v_ptr32()
self.Conflicts = v_ptr32()
class DRIVER_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DriverObject = v_ptr32()
self.AddDevice = v_ptr32()
self.Count = v_uint32()
self.ServiceKeyName = UNICODE_STRING()
self.ClientDriverExtension = v_ptr32()
self.FsFilterCallbacks = v_ptr32()
class TOKEN_SOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SourceName = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.SourceIdentifier = LUID()
class _unnamed_16561(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatedResources = v_ptr32()
class _unnamed_14549(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseMid = v_uint32()
class flags(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Removable = v_uint8()
class DBGKM_EXCEPTION64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = EXCEPTION_RECORD64()
self.FirstChance = v_uint32()
self._pad00a0 = v_bytes(size=4)
class _unnamed_14544(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseMid = v_uint8()
self.Flags1 = v_uint8()
self.Flags2 = v_uint8()
self.BaseHi = v_uint8()
class PM_SUPPORT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Rsvd2 = v_uint8()
class KPRCB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinorVersion = v_uint16()
self.MajorVersion = v_uint16()
self.CurrentThread = v_ptr32()
self.NextThread = v_ptr32()
self.IdleThread = v_ptr32()
self.Number = v_uint8()
self.Reserved = v_uint8()
self.BuildType = v_uint16()
self.SetMember = v_uint32()
self.CpuType = v_uint8()
self.CpuID = v_uint8()
self.CpuStep = v_uint16()
self.ProcessorState = KPROCESSOR_STATE()
self.KernelReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.HalReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.PrcbPad0 = vstruct.VArray([ v_uint8() for i in xrange(92) ])
self.LockQueue = vstruct.VArray([ KSPIN_LOCK_QUEUE() for i in xrange(16) ])
self.PrcbPad1 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.NpxThread = v_ptr32()
self.InterruptCount = v_uint32()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.DpcTime = v_uint32()
self.DebugDpcTime = v_uint32()
self.InterruptTime = v_uint32()
self.AdjustDpcThreshold = v_uint32()
self.PageColor = v_uint32()
self.SkipTick = v_uint32()
self.MultiThreadSetBusy = v_uint8()
self.Spare2 = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.ParentNode = v_ptr32()
self.MultiThreadProcessorSet = v_uint32()
self.MultiThreadSetMaster = v_ptr32()
self.ThreadStartCount = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.CcFastReadNoWait = v_uint32()
self.CcFastReadWait = v_uint32()
self.CcFastReadNotPossible = v_uint32()
self.CcCopyReadNoWait = v_uint32()
self.CcCopyReadWait = v_uint32()
self.CcCopyReadNoWaitMiss = v_uint32()
self.KeAlignmentFixupCount = v_uint32()
self.KeContextSwitches = v_uint32()
self.KeDcacheFlushCount = v_uint32()
self.KeExceptionDispatchCount = v_uint32()
self.KeFirstLevelTbFills = v_uint32()
self.KeFloatingEmulationCount = v_uint32()
self.KeIcacheFlushCount = v_uint32()
self.KeSecondLevelTbFills = v_uint32()
self.KeSystemCalls = v_uint32()
self.SpareCounter0 = vstruct.VArray([ v_uint32() for i in xrange(1) ])
self.PPLookasideList = vstruct.VArray([ PP_LOOKASIDE_LIST() for i in xrange(16) ])
self.PPNPagedLookasideList = vstruct.VArray([ PP_LOOKASIDE_LIST() for i in xrange(32) ])
self.PPPagedLookasideList = vstruct.VArray([ PP_LOOKASIDE_LIST() for i in xrange(32) ])
self.PacketBarrier = v_uint32()
self.ReverseStall = v_uint32()
self.IpiFrame = v_ptr32()
self.PrcbPad2 = vstruct.VArray([ v_uint8() for i in xrange(52) ])
self.CurrentPacket = vstruct.VArray([ v_ptr32() for i in xrange(3) ])
self.TargetSet = v_uint32()
self.WorkerRoutine = v_ptr32()
self.IpiFrozen = v_uint32()
self.PrcbPad3 = vstruct.VArray([ v_uint8() for i in xrange(40) ])
self.RequestSummary = v_uint32()
self.SignalDone = v_ptr32()
self.PrcbPad4 = vstruct.VArray([ v_uint8() for i in xrange(56) ])
self.DpcListHead = LIST_ENTRY()
self.DpcStack = v_ptr32()
self.DpcCount = v_uint32()
self.DpcQueueDepth = v_uint32()
self.DpcRoutineActive = v_uint32()
self.DpcInterruptRequested = v_uint32()
self.DpcLastCount = v_uint32()
self.DpcRequestRate = v_uint32()
self.MaximumDpcQueueDepth = v_uint32()
self.MinimumDpcRate = v_uint32()
self.QuantumEnd = v_uint32()
self.PrcbPad5 = vstruct.VArray([ v_uint8() for i in xrange(16) ])
self.DpcLock = v_uint32()
self.PrcbPad6 = vstruct.VArray([ v_uint8() for i in xrange(28) ])
self.CallDpc = KDPC()
self.ChainedInterruptList = v_ptr32()
self.LookasideIrpFloat = v_uint32()
self.SpareFields0 = vstruct.VArray([ v_uint32() for i in xrange(6) ])
self.VendorString = vstruct.VArray([ v_uint8() for i in xrange(13) ])
self.InitialApicId = v_uint8()
self.LogicalProcessorsPerPhysicalProcessor = v_uint8()
self._pad0910 = v_bytes(size=1)
self.MHz = v_uint32()
self.FeatureBits = v_uint32()
self.UpdateSignature = LARGE_INTEGER()
self.NpxSaveArea = FX_SAVE_AREA()
self.PowerState = PROCESSOR_POWER_STATE()
class HEAP_VIRTUAL_ALLOC_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = LIST_ENTRY()
self.ExtraStuff = HEAP_ENTRY_EXTRA()
self.CommitSize = v_uint32()
self.ReserveSize = v_uint32()
self.BusyBlock = HEAP_ENTRY()
class VI_DEADLOCK_THREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Thread = v_ptr32()
self.CurrentSpinNode = v_ptr32()
self.CurrentOtherNode = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.NodeCount = v_uint32()
self.PagingCount = v_uint32()
class SUPPORTED_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.SystemAddressSpace = v_uint32()
self.SystemBase = v_uint64()
self.Base = v_uint64()
self.Limit = v_uint64()
class ARBITER_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Parameters = _unnamed_15247()
class EXCEPTION_RECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_ptr32()
self.ExceptionAddress = v_ptr32()
self.NumberParameters = v_uint32()
self.ExceptionInformation = vstruct.VArray([ v_uint32() for i in xrange(15) ])
class MMPTE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u = _unnamed_11597()
class VI_DEADLOCK_NODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Parent = v_ptr32()
self.ChildrenList = LIST_ENTRY()
self.SiblingsList = LIST_ENTRY()
self.ResourceList = LIST_ENTRY()
self.Root = v_ptr32()
self.ThreadEntry = v_ptr32()
self.Active = v_uint32()
self.StackTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
self.ParentStackTrace = vstruct.VArray([ v_ptr32() for i in xrange(8) ])
class KPCR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NtTib = NT_TIB()
self.SelfPcr = v_ptr32()
self.Prcb = v_ptr32()
self.Irql = v_uint8()
self._pad0028 = v_bytes(size=3)
self.IRR = v_uint32()
self.IrrActive = v_uint32()
self.IDR = v_uint32()
self.KdVersionBlock = v_ptr32()
self.IDT = v_ptr32()
self.GDT = v_ptr32()
self.TSS = v_ptr32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.SetMember = v_uint32()
self.StallScaleFactor = v_uint32()
self.DebugActive = v_uint8()
self.Number = v_uint8()
self.Spare0 = v_uint8()
self.SecondLevelCacheAssociativity = v_uint8()
self.VdmAlert = v_uint32()
self.KernelReserved = vstruct.VArray([ v_uint32() for i in xrange(14) ])
self.SecondLevelCacheSize = v_uint32()
self.HalReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.InterruptMode = v_uint32()
self.Spare1 = v_uint8()
self._pad00dc = v_bytes(size=3)
self.KernelReserved2 = vstruct.VArray([ v_uint32() for i in xrange(17) ])
self.PrcbData = KPRCB()
class IMAGE_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Machine = v_uint16()
self.NumberOfSections = v_uint16()
self.TimeDateStamp = v_uint32()
self.PointerToSymbolTable = v_uint32()
self.NumberOfSymbols = v_uint32()
self.SizeOfOptionalHeader = v_uint16()
self.Characteristics = v_uint16()
class CM_KEY_INDEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Count = v_uint16()
self.List = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class IMAGE_DEBUG_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.Type = v_uint32()
self.SizeOfData = v_uint32()
self.AddressOfRawData = v_uint32()
self.PointerToRawData = v_uint32()
class AMD64_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceFlag = v_uint32()
self.Dr7 = v_uint64()
self.CurrentSymbolStart = v_uint64()
self.CurrentSymbolEnd = v_uint64()
class SYSPTES_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = LIST_ENTRY()
self.Count = v_uint32()
class DBGKD_READ_WRITE_IO_EXTENDED32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.AddressSpace = v_uint32()
self.IoAddress = v_uint32()
self.DataValue = v_uint32()
class PEB_LDR_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Initialized = v_uint8()
self._pad0008 = v_bytes(size=3)
self.SsHandle = v_ptr32()
self.InLoadOrderModuleList = LIST_ENTRY()
self.InMemoryOrderModuleList = LIST_ENTRY()
self.InInitializationOrderModuleList = LIST_ENTRY()
self.EntryInProgress = v_ptr32()
class DBGKD_WRITE_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointAddress = v_uint64()
self.BreakPointHandle = v_uint32()
self._pad0010 = v_bytes(size=4)
class IMAGE_NT_HEADERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.FileHeader = IMAGE_FILE_HEADER()
self.OptionalHeader = IMAGE_OPTIONAL_HEADER()
class HEAP_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.PreviousSize = v_uint16()
self.SmallTagIndex = v_uint8()
self.Flags = v_uint8()
self.UnusedBytes = v_uint8()
self.SegmentIndex = v_uint8()
class _unnamed_16304(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinimumVector = v_uint32()
self.MaximumVector = v_uint32()
class SECURITY_SUBJECT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClientToken = v_ptr32()
self.ImpersonationLevel = v_uint32()
self.PrimaryToken = v_ptr32()
self.ProcessAuditId = v_ptr32()
class X86_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceFlag = v_uint32()
self.Dr7 = v_uint32()
self.CurrentSymbolStart = v_uint32()
self.CurrentSymbolEnd = v_uint32()
class SEP_AUDIT_POLICY_OVERLAY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PolicyBits = v_uint64()
class MI_VERIFIER_DRIVER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Links = LIST_ENTRY()
self.Loads = v_uint32()
self.Unloads = v_uint32()
self.BaseName = UNICODE_STRING()
self.StartAddress = v_ptr32()
self.EndAddress = v_ptr32()
self.Flags = v_uint32()
self.Signature = v_uint32()
self.Reserved = v_uint32()
self.VerifierPoolLock = v_uint32()
self.PoolHash = v_ptr32()
self.PoolHashSize = v_uint32()
self.PoolHashFree = v_uint32()
self.PoolHashReserved = v_uint32()
self.CurrentPagedPoolAllocations = v_uint32()
self.CurrentNonPagedPoolAllocations = v_uint32()
self.PeakPagedPoolAllocations = v_uint32()
self.PeakNonPagedPoolAllocations = v_uint32()
self.PagedBytes = v_uint32()
self.NonPagedBytes = v_uint32()
self.PeakPagedBytes = v_uint32()
self.PeakNonPagedBytes = v_uint32()
class GDI_TEB_BATCH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint32()
self.HDC = v_uint32()
self.Buffer = vstruct.VArray([ v_uint32() for i in xrange(310) ])
class WMI_CLIENT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ProcessorNumber = v_uint8()
self.Alignment = v_uint8()
self.LoggerId = v_uint16()
class MMSUBSECTION_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadOnly = v_uint32()
class INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.Context = v_ptr32()
self.InterfaceReference = v_ptr32()
self.InterfaceDereference = v_ptr32()
class OBJECT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HashBuckets = vstruct.VArray([ v_ptr32() for i in xrange(37) ])
self.Lock = EX_PUSH_LOCK()
self.DeviceMap = v_ptr32()
self.SessionId = v_uint32()
self.Reserved = v_uint16()
self.SymbolicLinkUsageCount = v_uint16()
class WMI_LOGGER_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BufferSpinLock = v_uint32()
self._pad0008 = v_bytes(size=4)
self.StartTime = LARGE_INTEGER()
self.LogFileHandle = v_ptr32()
self.LoggerSemaphore = KSEMAPHORE()
self.LoggerThread = v_ptr32()
self.LoggerEvent = KEVENT()
self.FlushEvent = KEVENT()
self.LoggerStatus = v_uint32()
self.LoggerId = v_uint32()
self.BuffersAvailable = v_uint32()
self.UsePerfClock = v_uint32()
self.WriteFailureLimit = v_uint32()
self.BuffersDirty = v_uint32()
self.BuffersInUse = v_uint32()
self.SwitchingInProgress = v_uint32()
self._pad0070 = v_bytes(size=4)
self.FreeList = SLIST_HEADER()
self.FlushList = SLIST_HEADER()
self.GlobalList = SLIST_HEADER()
self.ProcessorBuffers = v_ptr32()
self.LoggerName = UNICODE_STRING()
self.LogFileName = UNICODE_STRING()
self.LogFilePattern = UNICODE_STRING()
self.NewLogFileName = UNICODE_STRING()
self.EndPageMarker = v_ptr32()
self.CollectionOn = v_uint32()
self.KernelTraceOn = v_uint32()
self.PerfLogInTransition = v_uint32()
self.RequestFlag = v_uint32()
self.EnableFlags = v_uint32()
self.MaximumFileSize = v_uint32()
self.LoggerMode = v_uint32()
self.LastFlushedBuffer = v_uint32()
self.RefCount = v_uint32()
self.FlushTimer = v_uint32()
self.FirstBufferOffset = LARGE_INTEGER()
self.ByteOffset = LARGE_INTEGER()
self.BufferAgeLimit = LARGE_INTEGER()
self.MaximumBuffers = v_uint32()
self.MinimumBuffers = v_uint32()
self.EventsLost = v_uint32()
self.BuffersWritten = v_uint32()
self.LogBuffersLost = v_uint32()
self.RealTimeBuffersLost = v_uint32()
self.BufferSize = v_uint32()
self.NumberOfBuffers = v_uint32()
self.SequencePtr = v_ptr32()
self.InstanceGuid = GUID()
self.LoggerHeader = v_ptr32()
self.GetCpuClock = v_ptr32()
self.ClientSecurityContext = SECURITY_CLIENT_CONTEXT()
self.LoggerExtension = v_ptr32()
self.ReleaseQueue = v_uint32()
self.EnableFlagExtension = TRACE_ENABLE_FLAG_EXTENSION()
self.LocalSequence = v_uint32()
self.MaximumIrql = v_uint32()
self.EnableFlagArray = v_ptr32()
self.LoggerMutex = KMUTANT()
self.MutexCount = v_uint32()
self.FileCounter = v_uint32()
self.BufferCallback = v_ptr32()
self.CallbackContext = v_ptr32()
self.PoolType = v_uint32()
self._pad01b8 = v_bytes(size=4)
self.ReferenceSystemTime = LARGE_INTEGER()
self.ReferenceTimeStamp = LARGE_INTEGER()
class IO_STACK_LOCATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorFunction = v_uint8()
self.MinorFunction = v_uint8()
self.Flags = v_uint8()
self.Control = v_uint8()
self.Parameters = _unnamed_14762()
self.DeviceObject = v_ptr32()
self.FileObject = v_ptr32()
self.CompletionRoutine = v_ptr32()
self.Context = v_ptr32()
class DBGKD_READ_WRITE_MSR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Msr = v_uint32()
self.DataValueLow = v_uint32()
self.DataValueHigh = v_uint32()
class _unnamed_14745(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UserApcRoutine = v_ptr32()
self.UserApcContext = v_ptr32()
class PCI_PDO_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.ExtensionType = v_uint32()
self.IrpDispatchTable = v_ptr32()
self.DeviceState = v_uint8()
self.TentativeNextState = v_uint8()
self._pad0010 = v_bytes(size=2)
self.SecondaryExtLock = KEVENT()
self.Slot = PCI_SLOT_NUMBER()
self.PhysicalDeviceObject = v_ptr32()
self.ParentFdoExtension = v_ptr32()
self.SecondaryExtension = SINGLE_LIST_ENTRY()
self.BusInterfaceReferenceCount = v_uint32()
self.AgpInterfaceReferenceCount = v_uint32()
self.VendorId = v_uint16()
self.DeviceId = v_uint16()
self.SubsystemVendorId = v_uint16()
self.SubsystemId = v_uint16()
self.RevisionId = v_uint8()
self.ProgIf = v_uint8()
self.SubClass = v_uint8()
self.BaseClass = v_uint8()
self.AdditionalResourceCount = v_uint8()
self.AdjustedInterruptLine = v_uint8()
self.InterruptPin = v_uint8()
self.RawInterruptLine = v_uint8()
self.CapabilitiesPtr = v_uint8()
self.SavedLatencyTimer = v_uint8()
self.SavedCacheLineSize = v_uint8()
self.HeaderType = v_uint8()
self.NotPresent = v_uint8()
self.ReportedMissing = v_uint8()
self.ExpectedWritebackFailure = v_uint8()
self.NoTouchPmeEnable = v_uint8()
self.LegacyDriver = v_uint8()
self.UpdateHardware = v_uint8()
self.MovedDevice = v_uint8()
self.DisablePowerDown = v_uint8()
self.NeedsHotPlugConfiguration = v_uint8()
self.SwitchedIDEToNativeMode = v_uint8()
self.BIOSAllowsIDESwitchToNativeMode = v_uint8()
self.IoSpaceUnderNativeIdeControl = v_uint8()
self.OnDebugPath = v_uint8()
self._pad005c = v_bytes(size=3)
self.PowerState = PCI_POWER_STATE()
self.Dependent = PCI_HEADER_TYPE_DEPENDENT()
self.HackFlags = v_uint64()
self.Resources = v_ptr32()
self.BridgeFdoExtension = v_ptr32()
self.NextBridge = v_ptr32()
self.NextHashEntry = v_ptr32()
self.Lock = PCI_LOCK()
self.PowerCapabilities = PCI_PMC()
self.TargetAgpCapabilityId = v_uint8()
self._pad00c4 = v_bytes(size=1)
self.CommandEnables = v_uint16()
self.InitialCommand = v_uint16()
class IMAGE_DATA_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.Size = v_uint32()
class FILE_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
self.Vpb = v_ptr32()
self.FsContext = v_ptr32()
self.FsContext2 = v_ptr32()
self.SectionObjectPointer = v_ptr32()
self.PrivateCacheMap = v_ptr32()
self.FinalStatus = v_uint32()
self.RelatedFileObject = v_ptr32()
self.LockOperation = v_uint8()
self.DeletePending = v_uint8()
self.ReadAccess = v_uint8()
self.WriteAccess = v_uint8()
self.DeleteAccess = v_uint8()
self.SharedRead = v_uint8()
self.SharedWrite = v_uint8()
self.SharedDelete = v_uint8()
self.Flags = v_uint32()
self.FileName = UNICODE_STRING()
self.CurrentByteOffset = LARGE_INTEGER()
self.Waiters = v_uint32()
self.Busy = v_uint32()
self.LastLock = v_ptr32()
self.Lock = KEVENT()
self.Event = KEVENT()
self.CompletionContext = v_ptr32()
class MMWSLE_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Key = v_ptr32()
self.Index = v_uint32()
class _unnamed_16004(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SecurityContext = v_ptr32()
self.Options = v_uint32()
self.Reserved = v_uint16()
self.ShareAccess = v_uint16()
self.Parameters = v_ptr32()
class SECTION_IMAGE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TransferAddress = v_ptr32()
self.ZeroBits = v_uint32()
self.MaximumStackSize = v_uint32()
self.CommittedStackSize = v_uint32()
self.SubSystemType = v_uint32()
self.SubSystemMinorVersion = v_uint16()
self.SubSystemMajorVersion = v_uint16()
self.GpValue = v_uint32()
self.ImageCharacteristics = v_uint16()
self.DllCharacteristics = v_uint16()
self.Machine = v_uint16()
self.ImageContainsCode = v_uint8()
self.Spare1 = v_uint8()
self.LoaderFlags = v_uint32()
self.ImageFileSize = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class HEAP_SUBSEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Bucket = v_ptr32()
self.UserBlocks = v_ptr32()
self.AggregateExchg = INTERLOCK_SEQ()
self.BlockSize = v_uint16()
self.FreeThreshold = v_uint16()
self.BlockCount = v_uint16()
self.SizeIndex = v_uint8()
self.AffinityIndex = v_uint8()
self.SFreeListEntry = SINGLE_LIST_ENTRY()
self.Lock = v_uint32()
class ERESOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemResourcesList = LIST_ENTRY()
self.OwnerTable = v_ptr32()
self.ActiveCount = v_uint16()
self.Flag = v_uint16()
self.SharedWaiters = v_ptr32()
self.ExclusiveWaiters = v_ptr32()
self.OwnerThreads = vstruct.VArray([ OWNER_ENTRY() for i in xrange(2) ])
self.ContentionCount = v_uint32()
self.NumberOfSharedWaiters = v_uint16()
self.NumberOfExclusiveWaiters = v_uint16()
self.Address = v_ptr32()
self.SpinLock = v_uint32()
class MBCB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeIsInZone = v_uint16()
self.PagesToWrite = v_uint32()
self.DirtyPages = v_uint32()
self.Reserved = v_uint32()
self.BitmapRanges = LIST_ENTRY()
self.ResumeWritePage = v_uint64()
self.BitmapRange1 = BITMAP_RANGE()
self.BitmapRange2 = BITMAP_RANGE()
self.BitmapRange3 = BITMAP_RANGE()
class RTL_ATOM_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HashLink = v_ptr32()
self.HandleIndex = v_uint16()
self.Atom = v_uint16()
self.ReferenceCount = v_uint16()
self.Flags = v_uint8()
self.NameLength = v_uint8()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0010 = v_bytes(size=2)
class _unnamed_12979(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Overlay = _unnamed_14765()
self._pad0030 = v_bytes(size=8)
class CHILD_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.List = v_uint32()
class _unnamed_16094(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
class RTL_RANGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.UserData = v_ptr32()
self.Owner = v_ptr32()
self.Attributes = v_uint8()
self.Flags = v_uint8()
self._pad0020 = v_bytes(size=6)
class PCI_MJ_DISPATCH_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PnpIrpMaximumMinorFunction = v_uint32()
self.PnpIrpDispatchTable = v_ptr32()
self.PowerIrpMaximumMinorFunction = v_uint32()
self.PowerIrpDispatchTable = v_ptr32()
self.SystemControlIrpDispatchStyle = v_uint32()
self.SystemControlIrpDispatchFunction = v_ptr32()
self.OtherIrpDispatchStyle = v_uint32()
self.OtherIrpDispatchFunction = v_ptr32()
class EX_PUSH_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Waiting = v_uint32()
class ARBITER_INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.Context = v_ptr32()
self.InterfaceReference = v_ptr32()
self.InterfaceDereference = v_ptr32()
self.ArbiterHandler = v_ptr32()
self.Flags = v_uint32()
class SLIST_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Alignment = v_uint64()
class _unnamed_16135(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Srb = v_ptr32()
class _unnamed_16642(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BlockedDriverGuid = GUID()
class _unnamed_16131(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Vpb = v_ptr32()
self.DeviceObject = v_ptr32()
class HEAP_SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = HEAP_ENTRY()
self.Signature = v_uint32()
self.Flags = v_uint32()
self.Heap = v_ptr32()
self.LargestUnCommittedRange = v_uint32()
self.BaseAddress = v_ptr32()
self.NumberOfPages = v_uint32()
self.FirstEntry = v_ptr32()
self.LastValidEntry = v_ptr32()
self.NumberOfUnCommittedPages = v_uint32()
self.NumberOfUnCommittedRanges = v_uint32()
self.UnCommittedRanges = v_ptr32()
self.AllocatorBackTraceIndex = v_uint16()
self.Reserved = v_uint16()
self.LastEntryInSegment = v_ptr32()
class POP_DEVICE_POWER_IRP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Free = SINGLE_LIST_ENTRY()
self.Irp = v_ptr32()
self.Notify = v_ptr32()
self.Pending = LIST_ENTRY()
self.Complete = LIST_ENTRY()
self.Abort = LIST_ENTRY()
self.Failed = LIST_ENTRY()
class HEAP_FREE_ENTRY_EXTRA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TagIndex = v_uint16()
self.FreeBackTraceIndex = v_uint16()
class PRIVATE_CACHE_MAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self._pad0004 = v_bytes(size=2)
self.ReadAheadMask = v_uint32()
self.FileObject = v_ptr32()
self._pad0010 = v_bytes(size=4)
self.FileOffset1 = LARGE_INTEGER()
self.BeyondLastByte1 = LARGE_INTEGER()
self.FileOffset2 = LARGE_INTEGER()
self.BeyondLastByte2 = LARGE_INTEGER()
self.ReadAheadOffset = vstruct.VArray([ LARGE_INTEGER() for i in xrange(2) ])
self.ReadAheadLength = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.ReadAheadSpinLock = v_uint32()
self.PrivateLinks = LIST_ENTRY()
self._pad0058 = v_bytes(size=4)
class SEP_AUDIT_POLICY_CATEGORIES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.System = v_uint32()
self.AccountLogon = v_uint32()
class IMAGE_SECTION_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.Misc = _unnamed_14793()
self.VirtualAddress = v_uint32()
self.SizeOfRawData = v_uint32()
self.PointerToRawData = v_uint32()
self.PointerToRelocations = v_uint32()
self.PointerToLinenumbers = v_uint32()
self.NumberOfRelocations = v_uint16()
self.NumberOfLinenumbers = v_uint16()
self.Characteristics = v_uint32()
class ACL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AclRevision = v_uint8()
self.Sbz1 = v_uint8()
self.AclSize = v_uint16()
self.AceCount = v_uint16()
self.Sbz2 = v_uint16()
class _unnamed_10498(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class _unnamed_10880(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FnArea = FNSAVE_FORMAT()
self._pad0208 = v_bytes(size=412)
class VACB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BaseAddress = v_ptr32()
self.SharedCacheMap = v_ptr32()
self.Overlay = _unnamed_11926()
self.LruList = LIST_ENTRY()
class WAIT_CONTEXT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WaitQueueEntry = KDEVICE_QUEUE_ENTRY()
self.DeviceRoutine = v_ptr32()
self.DeviceContext = v_ptr32()
self.NumberOfMapRegisters = v_uint32()
self.DeviceObject = v_ptr32()
self.CurrentIrp = v_ptr32()
self.BufferChainingDpc = v_ptr32()
class CM_KEY_NODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Flags = v_uint16()
self.LastWriteTime = LARGE_INTEGER()
self.Spare = v_uint32()
self.Parent = v_uint32()
self.SubKeyCounts = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.SubKeyLists = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self.ValueList = CHILD_LIST()
self.Security = v_uint32()
self.Class = v_uint32()
self.MaxNameLen = v_uint32()
self.MaxClassLen = v_uint32()
self.MaxValueNameLen = v_uint32()
self.MaxValueDataLen = v_uint32()
self.WorkVar = v_uint32()
self.NameLength = v_uint16()
self.ClassLength = v_uint16()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0050 = v_bytes(size=2)
class SE_AUDIT_PROCESS_CREATION_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImageFileName = v_ptr32()
class ACTIVATION_CONTEXT_STACK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.NextCookieSequenceNumber = v_uint32()
self.ActiveFrame = v_ptr32()
self.FrameListCache = LIST_ENTRY()
class SECURITY_TOKEN_PROXY_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.ProxyClass = v_uint32()
self.PathInfo = UNICODE_STRING()
self.ContainerMask = v_uint32()
self.ObjectMask = v_uint32()
class _unnamed_16639(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VetoType = v_uint32()
self.DeviceIdVetoNameBuffer = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0008 = v_bytes(size=2)
class _unnamed_16636(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NotificationCode = v_uint32()
self.NotificationData = v_uint32()
class _unnamed_16634(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Notification = v_ptr32()
class EX_RUNDOWN_REF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
class _unnamed_16631(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NotificationStructure = v_ptr32()
self.DeviceIds = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0008 = v_bytes(size=2)
class CM_NOTIFY_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HiveList = LIST_ENTRY()
self.PostList = LIST_ENTRY()
self.KeyControlBlock = v_ptr32()
self.KeyBody = v_ptr32()
self.Filter = v_uint32()
self.SubjectContext = SECURITY_SUBJECT_CONTEXT()
class MMPTE_PROTOTYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint32()
class PCI_HEADER_TYPE_DEPENDENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.type0 = _unnamed_14410()
class CM_BIG_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Count = v_uint16()
self.List = v_uint32()
class IMAGE_DOS_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_magic = v_uint16()
self.e_cblp = v_uint16()
self.e_cp = v_uint16()
self.e_crlc = v_uint16()
self.e_cparhdr = v_uint16()
self.e_minalloc = v_uint16()
self.e_maxalloc = v_uint16()
self.e_ss = v_uint16()
self.e_sp = v_uint16()
self.e_csum = v_uint16()
self.e_ip = v_uint16()
self.e_cs = v_uint16()
self.e_lfarlc = v_uint16()
self.e_ovno = v_uint16()
self.e_res = vstruct.VArray([ v_uint16() for i in xrange(4) ])
self.e_oemid = v_uint16()
self.e_oeminfo = v_uint16()
self.e_res2 = vstruct.VArray([ v_uint16() for i in xrange(10) ])
self.e_lfanew = v_uint32()
class _unnamed_15795(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceClass = _unnamed_16624()
class DBGKD_FILL_MEMORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Address = v_uint64()
self.Length = v_uint32()
self.Flags = v_uint16()
self.PatternLength = v_uint16()
class CM_KEY_SECURITY_CACHE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Cell = v_uint32()
self.CachedSecurity = v_ptr32()
class _unnamed_16663(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.CheckSum = v_uint32()
class _unnamed_16255(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Argument1 = v_ptr32()
self.Argument2 = v_ptr32()
self.Argument3 = v_ptr32()
self.Argument4 = v_ptr32()
class _unnamed_12606(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImageInformation = v_ptr32()
class _unnamed_12605(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImageCommitment = v_uint32()
class _unnamed_16226(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InPath = v_uint8()
self.Reserved = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.Type = v_uint32()
class ARBITER_ORDERING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
class MMVIEW(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = v_uint32()
self.ControlArea = v_ptr32()
class EXCEPTION_RECORD32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_uint32()
self.ExceptionAddress = v_uint32()
self.NumberParameters = v_uint32()
self.ExceptionInformation = vstruct.VArray([ v_uint32() for i in xrange(15) ])
class DBGKD_READ_MEMORY32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TargetBaseAddress = v_uint32()
self.TransferCount = v_uint32()
self.ActualBytesRead = v_uint32()
class QUAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DoNotUseThisField = v_uint64()
class _unnamed_11926(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.FileOffset = LARGE_INTEGER()
class LPCP_PORT_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ConnectionPort = v_ptr32()
self.ConnectedPort = v_ptr32()
self.MsgQueue = LPCP_PORT_QUEUE()
self.Creator = CLIENT_ID()
self.ClientSectionBase = v_ptr32()
self.ServerSectionBase = v_ptr32()
self.PortContext = v_ptr32()
self.ClientThread = v_ptr32()
self.SecurityQos = SECURITY_QUALITY_OF_SERVICE()
self.StaticSecurity = SECURITY_CLIENT_CONTEXT()
self.LpcReplyChainHead = LIST_ENTRY()
self.LpcDataInfoChainHead = LIST_ENTRY()
self.ServerProcess = v_ptr32()
self.MaxMessageLength = v_uint16()
self.MaxConnectionInfoLength = v_uint16()
self.Flags = v_uint32()
self.WaitEvent = KEVENT()
class CALL_PERFORMANCE_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpinLock = v_uint32()
self.HashTable = vstruct.VArray([ LIST_ENTRY() for i in xrange(64) ])
class EXCEPTION_POINTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = v_ptr32()
self.ContextRecord = v_ptr32()
class CM_KEY_SECURITY_CACHE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Cell = v_uint32()
self.ConvKey = v_uint32()
self.List = LIST_ENTRY()
self.DescriptorLength = v_uint32()
self.Descriptor = SECURITY_DESCRIPTOR_RELATIVE()
class POP_TRIGGER_WAIT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Event = KEVENT()
self.Status = v_uint32()
self.Link = LIST_ENTRY()
self.Trigger = v_ptr32()
class DEVICE_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.ReferenceCount = v_uint32()
self.DriverObject = v_ptr32()
self.NextDevice = v_ptr32()
self.AttachedDevice = v_ptr32()
self.CurrentIrp = v_ptr32()
self.Timer = v_ptr32()
self.Flags = v_uint32()
self.Characteristics = v_uint32()
self.Vpb = v_ptr32()
self.DeviceExtension = v_ptr32()
self.DeviceType = v_uint32()
self.StackSize = v_uint8()
self._pad0034 = v_bytes(size=3)
self.Queue = _unnamed_11075()
self.AlignmentRequirement = v_uint32()
self.DeviceQueue = KDEVICE_QUEUE()
self.Dpc = KDPC()
self.ActiveThreadCount = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.DeviceLock = KEVENT()
self.SectorSize = v_uint16()
self.Spare1 = v_uint16()
self.DeviceObjectExtension = v_ptr32()
self.Reserved = v_ptr32()
class MMVAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StartingVpn = v_uint32()
self.EndingVpn = v_uint32()
self.Parent = v_ptr32()
self.LeftChild = v_ptr32()
self.RightChild = v_ptr32()
self.u = _unnamed_14102()
self.ControlArea = v_ptr32()
self.FirstPrototypePte = v_ptr32()
self.LastContiguousPte = v_ptr32()
self.u2 = _unnamed_14103()
class _unnamed_13227(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LongFlags = v_uint32()
class CM_NAME_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ConvKey = v_uint32()
self.NextHash = v_ptr32()
self.NameLength = v_uint16()
self.Name = vstruct.VArray([ v_uint16() for i in xrange(1) ])
class EX_PUSH_LOCK_WAIT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WakeEvent = KEVENT()
self.Next = v_ptr32()
self.ShareCount = v_uint32()
self.Exclusive = v_uint8()
self._pad001c = v_bytes(size=3)
class _unnamed_13174(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ShortFlags = v_uint16()
self.ReferenceCount = v_uint16()
class _unnamed_16299(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Alignment = v_uint32()
self.MinimumAddress = LARGE_INTEGER()
self.MaximumAddress = LARGE_INTEGER()
class LPCP_MESSAGE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = LIST_ENTRY()
self.SenderPort = v_ptr32()
self.RepliedToThread = v_ptr32()
self.PortContext = v_ptr32()
self._pad0018 = v_bytes(size=4)
self.Request = PORT_MESSAGE()
class EX_QUEUE_WORKER_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.QueueDisabled = v_uint32()
class PCI_FDO_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.List = SINGLE_LIST_ENTRY()
self.ExtensionType = v_uint32()
self.IrpDispatchTable = v_ptr32()
self.DeviceState = v_uint8()
self.TentativeNextState = v_uint8()
self._pad0010 = v_bytes(size=2)
self.SecondaryExtLock = KEVENT()
self.PhysicalDeviceObject = v_ptr32()
self.FunctionalDeviceObject = v_ptr32()
self.AttachedDeviceObject = v_ptr32()
self.ChildListLock = KEVENT()
self.ChildPdoList = v_ptr32()
self.BusRootFdoExtension = v_ptr32()
self.ParentFdoExtension = v_ptr32()
self.ChildBridgePdoList = v_ptr32()
self.PciBusInterface = v_ptr32()
self.MaxSubordinateBus = v_uint8()
self._pad0054 = v_bytes(size=3)
self.BusHandler = v_ptr32()
self.BaseBus = v_uint8()
self.Fake = v_uint8()
self.ChildDelete = v_uint8()
self.Scanned = v_uint8()
self.ArbitersInitialized = v_uint8()
self.BrokenVideoHackApplied = v_uint8()
self.Hibernated = v_uint8()
self._pad0060 = v_bytes(size=1)
self.PowerState = PCI_POWER_STATE()
self.SecondaryExtension = SINGLE_LIST_ENTRY()
self.ChildWaitWakeCount = v_uint32()
self.PreservedConfig = v_ptr32()
self.Lock = PCI_LOCK()
self.HotPlugParameters = _unnamed_14395()
self._pad00bc = v_bytes(size=3)
self.BusHackFlags = v_uint32()
class _unnamed_16573(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReserveDevice = v_ptr32()
class PS_IMPERSONATION_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Token = v_ptr32()
self.CopyOnOpen = v_uint8()
self.EffectiveOnly = v_uint8()
self._pad0008 = v_bytes(size=2)
self.ImpersonationLevel = v_uint32()
class DBGKD_WRITE_BREAKPOINT32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointAddress = v_uint32()
self.BreakPointHandle = v_uint32()
class MMPFNLIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Total = v_uint32()
self.ListName = v_uint32()
self.Flink = v_uint32()
self.Blink = v_uint32()
class SINGLE_LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
class _unnamed_14410(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(4) ])
class _unnamed_14411(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrimaryBus = v_uint8()
self.SecondaryBus = v_uint8()
self.SubordinateBus = v_uint8()
self.SubtractiveDecode = v_uint8()
class KNODE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ProcessorMask = v_uint32()
self.Color = v_uint32()
self.MmShiftedColor = v_uint32()
self.FreeCount = vstruct.VArray([ v_uint32() for i in xrange(2) ])
self._pad0018 = v_bytes(size=4)
self.DeadStackList = SLIST_HEADER()
self.PfnDereferenceSListHead = SLIST_HEADER()
self.PfnDeferredList = v_ptr32()
self.Seed = v_uint8()
self.Flags = flags()
self._pad0030 = v_bytes(size=2)
class _unnamed_14793(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PhysicalAddress = v_uint32()
class _unnamed_16078(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.FileInformationClass = v_uint32()
class SYSTEM_POWER_CAPABILITIES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerButtonPresent = v_uint8()
self.SleepButtonPresent = v_uint8()
self.LidPresent = v_uint8()
self.SystemS1 = v_uint8()
self.SystemS2 = v_uint8()
self.SystemS3 = v_uint8()
self.SystemS4 = v_uint8()
self.SystemS5 = v_uint8()
self.HiberFilePresent = v_uint8()
self.FullWake = v_uint8()
self.VideoDimPresent = v_uint8()
self.ApmPresent = v_uint8()
self.UpsPresent = v_uint8()
self.ThermalControl = v_uint8()
self.ProcessorThrottle = v_uint8()
self.ProcessorMinThrottle = v_uint8()
self.ProcessorMaxThrottle = v_uint8()
self.spare2 = vstruct.VArray([ v_uint8() for i in xrange(4) ])
self.DiskSpinDown = v_uint8()
self.spare3 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.SystemBatteriesPresent = v_uint8()
self.BatteriesAreShortTerm = v_uint8()
self.BatteryScale = vstruct.VArray([ BATTERY_REPORTING_SCALE() for i in xrange(3) ])
self.AcOnLineWake = v_uint32()
self.SoftLidWake = v_uint32()
self.RtcWake = v_uint32()
self.MinDeviceWakeState = v_uint32()
self.DefaultLowLatencyWake = v_uint32()
class DBGKD_SET_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ContextFlags = v_uint32()
class MMEXTEND_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CommittedSize = v_uint64()
self.ReferenceCount = v_uint32()
self._pad0010 = v_bytes(size=4)
class _unnamed_16075(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.CompletionFilter = v_uint32()
class RTL_USER_PROCESS_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MaximumLength = v_uint32()
self.Length = v_uint32()
self.Flags = v_uint32()
self.DebugFlags = v_uint32()
self.ConsoleHandle = v_ptr32()
self.ConsoleFlags = v_uint32()
self.StandardInput = v_ptr32()
self.StandardOutput = v_ptr32()
self.StandardError = v_ptr32()
self.CurrentDirectory = CURDIR()
self.DllPath = UNICODE_STRING()
self.ImagePathName = UNICODE_STRING()
self.CommandLine = UNICODE_STRING()
self.Environment = v_ptr32()
self.StartingX = v_uint32()
self.StartingY = v_uint32()
self.CountX = v_uint32()
self.CountY = v_uint32()
self.CountCharsX = v_uint32()
self.CountCharsY = v_uint32()
self.FillAttribute = v_uint32()
self.WindowFlags = v_uint32()
self.ShowWindowFlags = v_uint32()
self.WindowTitle = UNICODE_STRING()
self.DesktopInfo = UNICODE_STRING()
self.ShellInfo = UNICODE_STRING()
self.RuntimeData = UNICODE_STRING()
self.CurrentDirectores = vstruct.VArray([ RTL_DRIVE_LETTER_CURDIR() for i in xrange(32) ])
class u(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.KeyNode = CM_KEY_NODE()
class IO_RESOURCE_REQUIREMENTS_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListSize = v_uint32()
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.SlotNumber = v_uint32()
self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(3) ])
self.AlternativeLists = v_uint32()
self.List = vstruct.VArray([ IO_RESOURCE_LIST() for i in xrange(1) ])
class POWER_CHANNEL_SUMMARY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.TotalCount = v_uint32()
self.D0Count = v_uint32()
self.NotifyList = LIST_ENTRY()
|
apache-2.0
| 1,417,916,768,710,102,300 | -2,226,033,339,468,044,000 | 31.797654 | 105 | 0.616804 | false |
lukeiwanski/tensorflow-opencl
|
tensorflow/contrib/session_bundle/example/export_half_plus_two.py
|
44
|
6027
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a toy linear regression inference graph.
Exports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter
format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise Session
loading and execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
FLAGS = None
def Export(export_dir, use_checkpoint_v2):
with tf.Session() as sess:
# Make model parameters a&b variables instead of constants to
# exercise the variable reloading mechanisms.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
# Calculate, y = a*x + b
y = tf.add(tf.multiply(a, x), b, name="y")
# Setup a standard Saver for our variables.
save = tf.train.Saver(
{
"a": a,
"b": b
},
sharded=True,
write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else
tf.train.SaverDef.V1)
# asset_path contains the base directory of assets used in training (e.g.
# vocabulary files).
original_asset_path = tf.constant("/tmp/original/export/assets")
# Ops reading asset files should reference the asset_path tensor
# which stores the original asset path at training time and the
# overridden assets directory at restore time.
asset_path = tf.Variable(original_asset_path,
name="asset_path",
trainable=False,
collections=[])
assign_asset_path = asset_path.assign(original_asset_path)
# Use a fixed global step number.
global_step_tensor = tf.Variable(123, name="global_step")
# Create a RegressionSignature for our input and output.
regression_signature = exporter.regression_signature(
input_tensor=serialized_tf_example,
# Use tf.identity here because we export two signatures here.
# Otherwise only graph for one of the signatures will be loaded
# (whichever is created first) during serving.
output_tensor=tf.identity(y))
named_graph_signature = {
"inputs": exporter.generic_signature({"x": x}),
"outputs": exporter.generic_signature({"y": y})
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existance as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
filename1 = tf.Variable(original_filename1,
name="filename1",
trainable=False,
collections=[])
assign_filename1 = filename1.assign(original_filename1)
original_filename2 = tf.constant("hello2.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
filename2 = tf.Variable(original_filename2,
name="filename2",
trainable=False,
collections=[])
assign_filename2 = filename2.assign(original_filename2)
# Init op contains a group of all variables that we assign.
init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)
# CopyAssets is used as a callback during export to copy files to the
# given export directory.
def CopyAssets(filepaths, export_path):
print("copying asset files to: %s" % export_path)
for filepath in filepaths:
print("copying asset file: %s" % filepath)
# Run an export.
tf.global_variables_initializer().run()
export = exporter.Exporter(save)
export.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=regression_signature,
named_graph_signatures=named_graph_signature,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
assets_callback=CopyAssets)
export.export(export_dir, global_step_tensor, sess)
def main(_):
Export(FLAGS.export_dir, FLAGS.use_checkpoint_v2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--export_dir",
type=str,
default="/tmp/half_plus_two",
help="Directory where to export inference model."
)
parser.add_argument(
"--use_checkpoint_v2",
"bool",
nargs="?",
const=True,
default=False,
help="If true, write v2 checkpoint files."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
| 9,074,608,879,831,156,000 | -3,387,435,697,580,865,000 | 36.203704 | 80 | 0.655218 | false |
sephii/django-cms
|
cms/cms_plugins.py
|
46
|
4350
|
# -*- coding: utf-8 -*-
from cms.models import CMSPlugin, Placeholder
from cms.models.aliaspluginmodel import AliasPluginModel
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.plugin_base import CMSPluginBase, PluginMenuItem
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.utils.urlutils import admin_reverse
from django.conf.urls import url
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse
from django.middleware.csrf import get_token
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
class PlaceholderPlugin(CMSPluginBase):
name = _("Placeholder")
parent_classes = [0] # so you will not be able to add it something
#require_parent = True
render_plugin = False
admin_preview = False
system = True
model = PlaceholderReference
plugin_pool.register_plugin(PlaceholderPlugin)
class AliasPlugin(CMSPluginBase):
name = _("Alias")
allow_children = False
model = AliasPluginModel
render_template = "cms/plugins/alias.html"
system = True
def render(self, context, instance, placeholder):
from cms.utils.plugins import downcast_plugins, build_plugin_tree
context['instance'] = instance
context['placeholder'] = placeholder
if instance.plugin_id:
plugins = instance.plugin.get_descendants().order_by('placeholder', 'path')
plugins = [instance.plugin] + list(plugins)
plugins = downcast_plugins(plugins)
plugins[0].parent_id = None
plugins = build_plugin_tree(plugins)
context['plugins'] = plugins
if instance.alias_placeholder_id:
content = render_placeholder(instance.alias_placeholder, context)
context['content'] = mark_safe(content)
return context
def get_extra_global_plugin_menu_items(self, request, plugin):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'plugin_id': plugin.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_extra_placeholder_menu_items(self, request, placeholder):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'placeholder_id': placeholder.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_plugin_urls(self):
return [
url(r'^create_alias/$', self.create_alias, name='cms_create_alias'),
]
def create_alias(self, request):
if not request.user.is_staff:
return HttpResponseForbidden("not enough privileges")
if not 'plugin_id' in request.POST and not 'placeholder_id' in request.POST:
return HttpResponseBadRequest("plugin_id or placeholder_id POST parameter missing.")
plugin = None
placeholder = None
if 'plugin_id' in request.POST:
pk = request.POST['plugin_id']
try:
plugin = CMSPlugin.objects.get(pk=pk)
except CMSPlugin.DoesNotExist:
return HttpResponseBadRequest("plugin with id %s not found." % pk)
if 'placeholder_id' in request.POST:
pk = request.POST['placeholder_id']
try:
placeholder = Placeholder.objects.get(pk=pk)
except Placeholder.DoesNotExist:
return HttpResponseBadRequest("placeholder with id %s not found." % pk)
if not placeholder.has_change_permission(request):
return HttpResponseBadRequest("You do not have enough permission to alias this placeholder.")
clipboard = request.toolbar.clipboard
clipboard.cmsplugin_set.all().delete()
language = get_language()
if plugin:
language = plugin.language
alias = AliasPluginModel(language=language, placeholder=clipboard, plugin_type="AliasPlugin")
if plugin:
alias.plugin = plugin
if placeholder:
alias.alias_placeholder = placeholder
alias.save()
return HttpResponse("ok")
plugin_pool.register_plugin(AliasPlugin)
|
bsd-3-clause
| -3,467,547,875,519,132,700 | -1,303,601,892,407,960,000 | 38.189189 | 109 | 0.647356 | false |
ayumilong/rethinkdb
|
external/v8_3.30.33.16/build/gyp/test/intermediate_dir/gyptest-intermediate-dir.py
|
243
|
1398
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that targets have independent INTERMEDIATE_DIRs.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('test.gyp', chdir='src')
test.build('test.gyp', 'target1', chdir='src')
# Check stuff exists.
intermediate_file1 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
shared_intermediate_file1 = test.read('src/shared_outfile.txt')
test.must_contain(shared_intermediate_file1, 'shared_target1')
test.run_gyp('test2.gyp', chdir='src')
# Force the shared intermediate to be rebuilt.
test.sleep()
test.touch('src/shared_infile.txt')
test.build('test2.gyp', 'target2', chdir='src')
# Check INTERMEDIATE_DIR file didn't get overwritten but SHARED_INTERMEDIATE_DIR
# file did.
intermediate_file2 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
test.must_contain(intermediate_file2, 'target2')
shared_intermediate_file2 = test.read('src/shared_outfile.txt')
if shared_intermediate_file1 != shared_intermediate_file2:
test.fail_test(shared_intermediate_file1 + ' != ' + shared_intermediate_file2)
test.must_contain(shared_intermediate_file1, 'shared_target2')
test.must_contain(shared_intermediate_file2, 'shared_target2')
test.pass_test()
|
agpl-3.0
| 258,478,234,130,149,760 | -6,452,179,456,032,929,000 | 32.285714 | 80 | 0.753934 | false |
sbesson/openmicroscopy
|
components/tools/OmeroWeb/test/integration/test_history.py
|
3
|
3141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests display of data in History page."""
from __future__ import print_function
from omeroweb.testlib import IWebTest
from omeroweb.testlib import get, post
from datetime import datetime
from django.core.urlresolvers import reverse
class TestHistory(IWebTest):
"""Tests display of data in History page."""
def test_history(self):
"""Test /webclient/history/ page."""
request_url = reverse("load_template", args=["history"])
response = get(self.django_client, request_url)
assert "history_calendar" in response.content.decode("utf-8")
def test_calendar_default(self):
"""Test display of new Project in today's history page."""
calendar_url = reverse("load_calendar")
response = get(self.django_client, calendar_url)
# Calendar is initially empty (no 'Project' icon)
assert "folder16.png" not in response.content.decode("utf-8")
# Add Project
request_url = reverse("manage_action_containers",
args=["addnewcontainer"])
data = {
'folder_type': 'project',
'name': 'foobar'
}
response = post(self.django_client, request_url, data)
# Default calendar loads calendar for current month
response = get(self.django_client, calendar_url)
# Now contains icon for Project
assert "folder16.png" in response.content.decode("utf-8")
def test_calendar_month(self):
"""Test loading of calendar, specifying this month."""
now = datetime.now()
calendar_url = reverse("load_calendar", args=[now.year, now.month])
print('calendar_url', calendar_url)
response = get(self.django_client, calendar_url)
# Calendar is initially empty (no 'Dataset' icon)
assert "folder_image16.png" not in response.content.decode("utf-8")
# Add Dataset
request_url = reverse("manage_action_containers",
args=["addnewcontainer"])
data = {
'folder_type': 'dataset',
'name': 'foobar'
}
response = post(self.django_client, request_url, data)
# Now contains icon for Dataset
response = get(self.django_client, calendar_url)
assert "folder_image16.png" in response.content.decode("utf-8")
|
gpl-2.0
| 7,800,852,304,053,120,000 | 219,939,905,665,899,420 | 38.759494 | 75 | 0.655842 | false |
michhar/flask-webapp-aml
|
env1/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
|
152
|
6305
|
import base64
import io
import json
import zlib
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle, text_type
def _b64_encode_bytes(b):
return base64.b64encode(b).decode("ascii")
def _b64_encode_str(s):
return _b64_encode_bytes(s.encode("utf8"))
def _b64_encode(s):
if isinstance(s, text_type):
return _b64_encode_str(s)
return _b64_encode_bytes(s)
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
data = {
"response": {
"body": _b64_encode_bytes(body),
"headers": dict(
(_b64_encode(k), _b64_encode(v))
for k, v in response.headers.items()
),
"status": response.status,
"version": response.version,
"reason": _b64_encode_str(response.reason),
"strict": response.strict,
"decode_content": response.decode_content,
},
}
# Construct our vary headers
data["vary"] = {}
if "vary" in response_headers:
varied_headers = response_headers['vary'].split(',')
for header in varied_headers:
header = header.strip()
data["vary"][header] = request.headers.get(header, None)
# Encode our Vary headers to ensure they can be serialized as JSON
data["vary"] = dict(
(_b64_encode(k), _b64_encode(v) if v is not None else v)
for k, v in data["vary"].items()
)
return b",".join([
b"cc=2",
zlib.compress(
json.dumps(
data, separators=(",", ":"), sort_keys=True,
).encode("utf8"),
),
])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{0}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
)
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except ValueError:
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(
cached["response"]["body"]
)
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(
cached["response"]["reason"],
)
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
|
mit
| -731,736,912,774,311,000 | -6,189,211,773,614,752,000 | 32.184211 | 78 | 0.547185 | false |
JosmanPS/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
| -7,901,069,623,442,818,000 | 1,256,753,847,005,718,000 | 31.702381 | 79 | 0.619949 | false |
Lautitia/newfies-dialer
|
newfies/dialer_campaign/migrations/0003_auto__add_field_campaign_agent_script__add_field_campaign_lead_disposi.py
|
9
|
17201
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Campaign.agent_script'
db.add_column(u'dialer_campaign', 'agent_script',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Campaign.lead_disposition'
db.add_column(u'dialer_campaign', 'lead_disposition',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Campaign.agent_script'
db.delete_column(u'dialer_campaign', 'agent_script')
# Deleting field 'Campaign.lead_disposition'
db.delete_column(u'dialer_campaign', 'lead_disposition')
models = {
u'audiofield.audiofile': {
'Meta': {'object_name': 'AudioFile', 'db_table': "u'audio_file'"},
'audio_file': ('audiofield.fields.AudioField', [], {'ext_whitelist': "['.mp3', '.wav', '.ogg']", 'max_length': '100', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dialer_campaign.campaign': {
'Meta': {'object_name': 'Campaign', 'db_table': "u'dialer_campaign'"},
'agent_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'aleg_gateway': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'A-Leg Gateway'", 'to': u"orm['dialer_gateway.Gateway']"}),
'amd_behavior': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'caller_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'callerid': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'callmaxduration': ('django.db.models.fields.IntegerField', [], {'default': "'1800'", 'null': 'True', 'blank': 'True'}),
'calltimeout': ('django.db.models.fields.IntegerField', [], {'default': "'45'", 'null': 'True', 'blank': 'True'}),
'campaign_code': ('django.db.models.fields.CharField', [], {'default': "'PDIWC'", 'unique': 'True', 'max_length': '20', 'blank': 'True'}),
'completed': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'completion_intervalretry': ('django.db.models.fields.IntegerField', [], {'default': "'900'", 'null': 'True', 'blank': 'True'}),
'completion_maxretry': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'daily_start_time': ('django.db.models.fields.TimeField', [], {'default': "'00:00:00'"}),
'daily_stop_time': ('django.db.models.fields.TimeField', [], {'default': "'23:59:59'"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dnc': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'DNC'", 'null': 'True', 'to': u"orm['dnc.DNC']"}),
'expirationdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 24, 0, 0)'}),
'extra_data': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': "'10'", 'null': 'True', 'blank': 'True'}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_been_duplicated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_been_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_phonebook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'intervalretry': ('django.db.models.fields.IntegerField', [], {'default': "'300'", 'null': 'True', 'blank': 'True'}),
'lead_disposition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maxretry': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'null': 'True', 'blank': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'phonebook': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['dialer_contact.Phonebook']", 'null': 'True', 'blank': 'True'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'startingdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 23, 0, 0)'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'null': 'True', 'blank': 'True'}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'totalcontact': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Campaign owner'", 'to': u"orm['auth.User']"}),
'voicemail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'voicemail_audiofile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['audiofield.AudioFile']", 'null': 'True', 'blank': 'True'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'dialer_campaign.subscriber': {
'Meta': {'unique_together': "(['contact', 'campaign'],)", 'object_name': 'Subscriber', 'db_table': "u'dialer_subscriber'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dialer_campaign.Campaign']", 'null': 'True', 'blank': 'True'}),
'completion_count_attempt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dialer_contact.Contact']", 'null': 'True', 'blank': 'True'}),
'count_attempt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'duplicate_contact': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
},
u'dialer_contact.contact': {
'Meta': {'object_name': 'Contact', 'db_table': "u'dialer_contact'"},
'additional_vars': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'phonebook': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dialer_contact.Phonebook']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'unit_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dialer_contact.phonebook': {
'Meta': {'object_name': 'Phonebook', 'db_table': "u'dialer_phonebook'"},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Phonebook owner'", 'to': u"orm['auth.User']"})
},
u'dialer_gateway.gateway': {
'Meta': {'object_name': 'Gateway', 'db_table': "u'dialer_gateway'"},
'addparameter': ('django.db.models.fields.CharField', [], {'max_length': '360', 'blank': 'True'}),
'addprefix': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'count_call': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count_in_use': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'failover': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Failover Gateway'", 'null': 'True', 'to': u"orm['dialer_gateway.Gateway']"}),
'gateway_codecs': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'gateway_retries': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'gateway_timeouts': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'gateways': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_call': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'originate_dial_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'removeprefix': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'secondused': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dnc.dnc': {
'Meta': {'object_name': 'DNC', 'db_table': "'dnc_list'"},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'DNC owner'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['dialer_campaign']
|
mpl-2.0
| -7,183,501,895,782,865,000 | -3,340,199,230,419,995,600 | 87.664948 | 188 | 0.555084 | false |
sergiomb2/gdesklets
|
display/TargetBonoboControl.py
|
2
|
1349
|
from DisplayTarget import DisplayTarget
from utils.datatypes import *
import gtk
#
# Class for a target that embeds Bonobo controls.
#
class TargetBonoboControl(DisplayTarget):
def __init__(self, name, parent):
# the control; you can load a control only once
self.__control = None
self.__widget = gtk.HBox()
self.__widget.show()
DisplayTarget.__init__(self, name, parent)
self._register_property("oafiid", TYPE_STRING,
self._setp_oafiid, self._getp)
def get_widget(self): return self.__widget
def _setp_oafiid(self, key, value):
import bonobo.ui
try:
container = bonobo.ui.Container()
control = bonobo.ui.Widget(str(value),
container.corba_objref())
pbag = control.get_control_frame().get_control_property_bag()
slots = pbag.getKeys("")
control.show()
if self.__control: # we have to remove the previous control
self.remove( self.__control )
self.__widget.add(control)
self.__control = control
self._setp(key, value)
except StandardError, exc:
log("Warning: An error occurred while setting the oafiid:\n%s" \
% (exc,))
|
gpl-2.0
| 6,237,008,058,483,872,000 | 5,979,948,370,345,047,000 | 25.45098 | 76 | 0.558191 | false |
alrusdi/lettuce
|
tests/integration/lib/Django-1.3/django/contrib/gis/gdal/tests/test_envelope.py
|
332
|
3742
|
from django.contrib.gis.gdal import Envelope, OGRException
from django.utils import unittest
class TestPoint(object):
def __init__(self, x, y):
self.x = x
self.y = y
class EnvelopeTest(unittest.TestCase):
def setUp(self):
self.e = Envelope(0, 0, 5, 5)
def test01_init(self):
"Testing Envelope initilization."
e1 = Envelope((0, 0, 5, 5))
e2 = Envelope(0, 0, 5, 5)
e3 = Envelope(0, '0', '5', 5) # Thanks to ww for this
e4 = Envelope(e1._envelope)
self.assertRaises(OGRException, Envelope, (5, 5, 0, 0))
self.assertRaises(OGRException, Envelope, 5, 5, 0, 0)
self.assertRaises(OGRException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(OGRException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, u'foo')
self.assertRaises(OGRException, Envelope, (1, 1, 0, 0))
try:
Envelope(0, 0, 0, 0)
except OGRException:
self.fail("shouldn't raise an exception for min_x == max_x or min_y == max_y")
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def test04_expand_to_include_pt_2_params(self):
"Testing Envelope expand_to_include -- point as two parameters."
self.e.expand_to_include(2, 6)
self.assertEqual((0, 0, 5, 6), self.e)
self.e.expand_to_include(-1, -1)
self.assertEqual((-1, -1, 5, 6), self.e)
def test05_expand_to_include_pt_2_tuple(self):
"Testing Envelope expand_to_include -- point as a single 2-tuple parameter."
self.e.expand_to_include((10, 10))
self.assertEqual((0, 0, 10, 10), self.e)
self.e.expand_to_include((-10, -10))
self.assertEqual((-10, -10, 10, 10), self.e)
def test06_expand_to_include_extent_4_params(self):
"Testing Envelope expand_to_include -- extent as 4 parameters."
self.e.expand_to_include(-1, 1, 3, 7)
self.assertEqual((-1, 0, 5, 7), self.e)
def test06_expand_to_include_extent_4_tuple(self):
"Testing Envelope expand_to_include -- extent as a single 4-tuple parameter."
self.e.expand_to_include((-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test07_expand_to_include_envelope(self):
"Testing Envelope expand_to_include with Envelope as parameter."
self.e.expand_to_include(Envelope(-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test08_expand_to_include_point(self):
"Testing Envelope expand_to_include with Point as parameter."
self.e.expand_to_include(TestPoint(-1, 1))
self.assertEqual((-1, 0, 5, 5), self.e)
self.e.expand_to_include(TestPoint(10, 10))
self.assertEqual((-1, 0, 10, 10), self.e)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(EnvelopeTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
gpl-3.0
| -2,053,046,379,558,478,300 | -366,396,010,498,625,300 | 38.389474 | 90 | 0.601015 | false |
GrandmasterK/XScheduler
|
venv/lib/python2.7/site-packages/rsa/pkcs1.py
|
75
|
13170
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for PKCS#1 version 1.5 encryption and signing
This module implements certain functionality from PKCS#1 version 1.5. For a
very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
At least 8 bytes of random padding is used when encrypting a message. This makes
these methods much more secure than the ones in the ``rsa`` module.
WARNING: this module leaks information when decryption or verification fails.
The exceptions that are raised contain the Python traceback information, which
can be used to deduce where in the process the failure occurred. DO NOT PASS
SUCH INFORMATION to your users.
'''
import hashlib
import os
from rsa._compat import b
from rsa import common, transform, core, varblock
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
}
HASH_METHODS = {
'MD5': hashlib.md5,
'SHA-1': hashlib.sha1,
'SHA-256': hashlib.sha256,
'SHA-384': hashlib.sha384,
'SHA-512': hashlib.sha512,
}
class CryptoError(Exception):
'''Base class for all exceptions in this module.'''
class DecryptionError(CryptoError):
'''Raised when decryption fails.'''
class VerificationError(CryptoError):
'''Raised when verification fails.'''
def _pad_for_encryption(message, target_length):
r'''Pads the message for encryption, returning the padded message.
:return: 00 02 RANDOM_DATA 00 MESSAGE
>>> block = _pad_for_encryption('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x02'
>>> block[-6:]
'\x00hello'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
# Get random padding
padding = b('')
padding_length = target_length - msglength - 3
# We remove 0-bytes, so we'll end up with less padding than we've asked for,
# so keep adding data until we're at the correct length.
while len(padding) < padding_length:
needed_bytes = padding_length - len(padding)
# Always read at least 8 bytes more than we need, and trim off the rest
# after removing the 0-bytes. This increases the chance of getting
# enough bytes, especially when needed_bytes is small
new_padding = os.urandom(needed_bytes + 5)
new_padding = new_padding.replace(b('\x00'), b(''))
padding = padding + new_padding[:needed_bytes]
assert len(padding) == padding_length
return b('').join([b('\x00\x02'),
padding,
b('\x00'),
message])
def _pad_for_signing(message, target_length):
r'''Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x01'
>>> block[-6:]
'\x00hello'
>>> block[2:-6]
'\xff\xff\xff\xff\xff\xff\xff\xff'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b('').join([b('\x00\x01'),
padding_length * b('\xff'),
b('\x00'),
message])
def encrypt(message, pub_key):
'''Encrypts the given message using PKCS#1 v1.5
:param message: the message to encrypt. Must be a byte string no longer than
``k-11`` bytes, where ``k`` is the number of bytes needed to encode
the ``n`` component of the public key.
:param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
:raise OverflowError: when the message is too large to fit in the padded
block.
>>> from rsa import key, common
>>> (pub_key, priv_key) = key.newkeys(256)
>>> message = 'hello'
>>> crypto = encrypt(message, pub_key)
The crypto text should be just as long as the public key 'n' component:
>>> len(crypto) == common.byte_size(pub_key.n)
True
'''
keylength = common.byte_size(pub_key.n)
padded = _pad_for_encryption(message, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def decrypt(crypto, priv_key):
r'''Decrypts the given message using PKCS#1 v1.5
The decryption is considered 'failed' when the resulting cleartext doesn't
start with the bytes 00 02, or when the 00 byte between the padding and
the message cannot be found.
:param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
:param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
:raise DecryptionError: when the decryption fails. No details are given as
to why the code thinks the decryption fails, as this would leak
information about the private key.
>>> import rsa
>>> (pub_key, priv_key) = rsa.newkeys(256)
It works with strings:
>>> crypto = encrypt('hello', pub_key)
>>> decrypt(crypto, priv_key)
'hello'
And with binary data:
>>> crypto = encrypt('\x00\x00\x00\x00\x01', pub_key)
>>> decrypt(crypto, priv_key)
'\x00\x00\x00\x00\x01'
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
code the exception occurred, and thus leaks information about the key.
It's only a tiny bit of information, but every bit makes cracking the
keys easier.
>>> crypto = encrypt('hello', pub_key)
>>> crypto = crypto[0:5] + 'X' + crypto[6:] # change a byte
>>> decrypt(crypto, priv_key)
Traceback (most recent call last):
...
DecryptionError: Decryption failed
'''
blocksize = common.byte_size(priv_key.n)
encrypted = transform.bytes2int(crypto)
decrypted = core.decrypt_int(encrypted, priv_key.d, priv_key.n)
cleartext = transform.int2bytes(decrypted, blocksize)
# If we can't find the cleartext marker, decryption failed.
if cleartext[0:2] != b('\x00\x02'):
raise DecryptionError('Decryption failed')
# Find the 00 separator between the padding and the message
try:
sep_idx = cleartext.index(b('\x00'), 2)
except ValueError:
raise DecryptionError('Decryption failed')
return cleartext[sep_idx+1:]
def sign(message, priv_key, hash):
'''Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
'''
# Get the ASN1 code for this hash method
if hash not in HASH_ASN1:
raise ValueError('Invalid hash method: %s' % hash)
asn1code = HASH_ASN1[hash]
# Calculate the hash
hash = _hash(message, hash)
# Encrypt the hash with the private key
cleartext = asn1code + hash
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def verify(message, signature, pub_key):
'''Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
the code the exception occurred, and thus leaks information about the
key. It's only a tiny bit of information, but every bit makes cracking
the keys easier.
'''
blocksize = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, blocksize)
# If we can't find the signature marker, verification failed.
if clearsig[0:2] != b('\x00\x01'):
raise VerificationError('Verification failed')
# Find the 00 separator between the padding and the payload
try:
sep_idx = clearsig.index(b('\x00'), 2)
except ValueError:
raise VerificationError('Verification failed')
# Get the hash and the hash method
(method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
message_hash = _hash(message, method_name)
# Compare the real hash to the hash in the signature
if message_hash != signature_hash:
raise VerificationError('Verification failed')
return True
def _hash(message, method_name):
'''Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
'''
if method_name not in HASH_METHODS:
raise ValueError('Invalid hash method: %s' % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if hasattr(message, 'read') and hasattr(message.read, '__call__'):
# read as 1K blocks
for block in varblock.yield_fixedblocks(message, 1024):
hasher.update(block)
else:
# hash the message object itself.
hasher.update(message)
return hasher.digest()
def _find_method_hash(method_hash):
'''Finds the hash method and the hash itself.
:param method_hash: ASN1 code for the hash method concatenated with the
hash itself.
:return: tuple (method, hash) where ``method`` is the used hash method, and
``hash`` is the hash itself.
:raise VerificationFailed: when the hash method cannot be found
'''
for (hashname, asn1code) in HASH_ASN1.items():
if not method_hash.startswith(asn1code):
continue
return (hashname, method_hash[len(asn1code):])
raise VerificationError('Verification failed')
__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
'DecryptionError', 'VerificationError', 'CryptoError']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
|
mit
| 2,133,823,892,082,757,000 | -7,503,151,384,398,389,000 | 32.680307 | 97 | 0.644848 | false |
higgintop/hca_code_project
|
node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/win_tool.py
|
379
|
11640
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = '%s_%d' % (m.group('out'), os.getpid())
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest)
else:
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen(args,
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefix = 'Processing '
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
for line in lines:
if not line.startswith(prefix) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mit
| 6,225,329,584,203,075,000 | -8,329,206,813,851,568,000 | 39 | 80 | 0.638746 | false |
joariasl/odoo
|
addons/account_check_writing/report/check_print.py
|
320
|
2943
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_print_check(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_print_check, self).__init__(cr, uid, name, context)
self.number_lines = 0
self.number_add = 0
self.localcontext.update({
'time': time,
'get_lines': self.get_lines,
'fill_stars' : self.fill_stars,
})
def fill_stars(self, amount):
if len(amount) < 100:
stars = 100 - len(amount)
return ' '.join([amount,'*'*stars])
else: return amount
def get_lines(self, voucher_lines):
result = []
self.number_lines = len(voucher_lines)
for i in range(0, min(10,self.number_lines)):
if i < self.number_lines:
res = {
'date_due' : voucher_lines[i].date_due,
'name' : voucher_lines[i].name,
'amount_original' : voucher_lines[i].amount_original and voucher_lines[i].amount_original or False,
'amount_unreconciled' : voucher_lines[i].amount_unreconciled and voucher_lines[i].amount_unreconciled or False,
'amount' : voucher_lines[i].amount and voucher_lines[i].amount or False,
}
else :
res = {
'date_due' : False,
'name' : False,
'amount_original' : False,
'amount_due' : False,
'amount' : False,
}
result.append(res)
return result
class report_check(osv.AbstractModel):
_name = 'report.account_check_writing.report_check'
_inherit = 'report.abstract_report'
_template = 'account_check_writing.report_check'
_wrapped_report_class = report_print_check
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 2,859,992,749,472,326,700 | 1,850,436,692,894,018,600 | 38.24 | 131 | 0.56473 | false |
anantb/confer
|
server/settings.py
|
1
|
5344
|
# Django settings for confer project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'confer', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'koob', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'gb04mossx%*@tqvjhl3&00=4fv!bsj*4ze9+x7xx5v6m*5l5_*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'server.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'server.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'server',
'south',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# server_settings
try:
from server_settings import *
USE_X_FORWARDED_HOST = True
except ImportError:
pass
|
mit
| -3,931,169,508,310,681,000 | 2,680,384,264,666,980,000 | 31.987654 | 109 | 0.682635 | false |
nmercier/linux-cross-gcc
|
linux/lib/python2.7/encodings/cp855.py
|
593
|
34106
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
bsd-3-clause
| -7,935,066,300,633,722,000 | 4,037,608,680,773,539,000 | 47.862464 | 97 | 0.608456 | false |
J861449197/edx-platform
|
common/test/acceptance/pages/studio/pagination.py
|
165
|
2099
|
"""
Mixin to include for Paginated container pages
"""
from selenium.webdriver.common.keys import Keys
class PaginatedMixin(object):
"""
Mixin class used for paginated page tests.
"""
def nav_disabled(self, position, arrows=('next', 'previous')):
"""
Verifies that pagination nav is disabled. Position can be 'top' or 'bottom'.
`top` is the header, `bottom` is the footer.
To specify a specific arrow, pass an iterable with a single element, 'next' or 'previous'.
"""
return all([
self.q(css='nav.%s * .%s-page-link.is-disabled' % (position, arrow))
for arrow in arrows
])
def move_back(self, position):
"""
Clicks one of the forward nav buttons. Position can be 'top' or 'bottom'.
"""
self.q(css='nav.%s * .previous-page-link' % position)[0].click()
self.wait_until_ready()
def move_forward(self, position):
"""
Clicks one of the forward nav buttons. Position can be 'top' or 'bottom'.
"""
self.q(css='nav.%s * .next-page-link' % position)[0].click()
self.wait_until_ready()
def go_to_page(self, number):
"""
Enter a number into the page number input field, and then try to navigate to it.
"""
page_input = self.q(css="#page-number-input")[0]
page_input.click()
page_input.send_keys(str(number))
page_input.send_keys(Keys.RETURN)
self.wait_until_ready()
def get_page_number(self):
"""
Returns the page number as the page represents it, in string form.
"""
return self.q(css="span.current-page")[0].get_attribute('innerHTML')
def check_page_unchanged(self, first_block_name):
"""
Used to make sure that a page has not transitioned after a bogus number is given.
"""
if not self.xblocks[0].name == first_block_name:
return False
if not self.q(css='#page-number-input')[0].get_attribute('value') == '':
return False
return True
|
agpl-3.0
| 1,574,253,035,224,031,000 | 4,804,772,684,498,932,000 | 32.854839 | 98 | 0.58647 | false |
SergeyPirogov/selene
|
selene/conditions.py
|
1
|
9603
|
from abc import ABCMeta, abstractmethod
import operator
from future.utils import with_metaclass, lmap
from selene.abctypes.conditions import IEntityCondition
from selene.abctypes.webdriver import IWebDriver
from selene.abctypes.webelement import IWebElement
from selene.exceptions import ConditionMismatchException
class OrNotToBe(IEntityCondition):
def description(self):
return self.__class__.__name__
def fn(self, entity):
return entity
or_not_to_be = OrNotToBe()
class Not(IEntityCondition):
def __init__(self, condition):
# type: (IEntityCondition) -> None
self._condition = condition
def description(self):
return 'not {}'.format(self._condition.description())
def fn(self, entity):
try:
self._condition.fn(entity)
except Exception as reason:
return reason
raise ConditionMismatchException() # todo: add more information to message
not_ = Not
# *** WebDriver Conditions ***
class WebDriverCondition(with_metaclass(ABCMeta, IEntityCondition)):
@abstractmethod
def fn(self, webdriver):
pass
def description(self):
return self.__class__.__name__
class JsReturnedTrue(WebDriverCondition):
def __init__(self, script_to_return_bool):
self.script = script_to_return_bool
def fn(self, webdriver):
# type: (IWebDriver) -> bool
result = webdriver.execute_script(self.script)
if not result:
raise ConditionMismatchException(
expected='''script: {script}
\t\t to return: true'''.format(script=self.script),
actual='''returned: {result}'''.format(result=result))
js_returned_true = JsReturnedTrue
class Title(WebDriverCondition):
def __init__(self, exact_value):
self.expected = exact_value
def fn(self, webdriver):
# type: (IWebDriver) -> bool
actual = webdriver.title
if not self.expected == actual:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
title = Title
class TitleContaining(WebDriverCondition):
def __init__(self, partial_value):
self.expected = partial_value
def fn(self, webdriver):
# type: (IWebDriver) -> bool
actual = webdriver.title
if actual not in self.expected:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
title_containing = TitleContaining
class Url(WebDriverCondition):
def __init__(self, exact_value):
self.expected = exact_value
def fn(self, webdriver):
actual = webdriver.current_url
if not self.expected == actual:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
url = Url
class UrlContaining(WebDriverCondition):
def __init__(self, partial_value):
self.expected = partial_value
def fn(self, webdriver):
actual = webdriver.current_url
if not self.expected in actual:
raise ConditionMismatchException(
message="Page url doesn't contain {}".format(self.expected),
expected=self.expected,
actual=actual)
url_containing = UrlContaining
# *** Element Conditions ***
class ElementCondition(with_metaclass(ABCMeta, IEntityCondition)):
def description(self):
return self.__class__.__name__
def fn(self, element):
# type: (SeleneElement) -> IWebElement
return self.match(element.get_actual_webelement())
@abstractmethod
def match(self, webelement):
# type: (IWebElement) -> IWebElement
pass
def is_matched(condition, webelement):
# type: (ElementCondition, IWebElement) -> bool
try:
condition.match(webelement)
return True
except Exception:
return False
class Visible(ElementCondition):
def match(self, webelement):
# type: (SeleneElement) -> IWebElement
if not webelement.is_displayed():
raise ConditionMismatchException()
return webelement
visible = Visible()
appear = visible
class Hidden(ElementCondition):
def match(self, webelement):
# type: (SeleneElement) -> IWebElement
if webelement.is_displayed():
raise ConditionMismatchException()
return webelement
hidden = Hidden()
disappear = hidden
# todo: consider removing this condition... because it can confuse somebody...
# it's actually kind of "pseudo-clickable", the actual "clackability" depends on js events...
# todo: implement as and_(displayed, enabled)
class Clickable(ElementCondition):
def match(self, webelement):
# type: (IWebElement) -> IWebElement
actual_displayed = webelement.is_displayed()
actual_enabled = webelement.is_enabled()
if not (actual_displayed and actual_enabled):
raise ConditionMismatchException(
expected='displayed and enabled',
actual='displayed: {displayed}, enabled: {enabled}'.format(
displayed=actual_displayed, enabled=actual_enabled))
return webelement
clickable = Clickable()
class Enabled(ElementCondition):
def match(self, webelement):
# type: (SeleneElement) -> IWebElement
if not webelement.is_enabled():
raise ConditionMismatchException()
return webelement
enabled = Enabled()
class InDom(ElementCondition):
"""
checks if element exist in DOM
"""
def match(self, webelement):
return webelement
in_dom = InDom()
exist = in_dom
class Text(ElementCondition):
def __init__(self, expected_text):
self.expected_text = expected_text
def match(self, webelement):
actual_text = webelement.text
if self.expected_text not in actual_text:
raise ConditionMismatchException(expected=self.expected_text, actual=actual_text)
return webelement
text = Text
class ExactText(ElementCondition):
def __init__(self, expected_text):
self.expected_text = expected_text
def match(self, webelement):
actual_text = webelement.text
if not self.expected_text == actual_text:
raise ConditionMismatchException(expected=self.expected_text, actual=actual_text)
return webelement
exact_text = ExactText
class CssClass(ElementCondition):
def __init__(self, expected):
self.expected = expected
def match(self, webelement):
actual = webelement.get_attribute("class")
if self.expected not in actual.split():
raise ConditionMismatchException(expected=self.expected, actual='class attribute: {}'.format(actual))
return webelement
css_class = CssClass
class Attribute(ElementCondition):
def __init__(self, name, value):
self.name = name
self.value = value
def match(self, webelement):
actual = webelement.get_attribute(self.name)
if not self.value == actual:
raise ConditionMismatchException(
expected='{name}="{value}"'.format(name=self.name, value=self.value),
actual='{name}="{value}"'.format(name=self.name, value=actual))
return webelement
attribute = Attribute
def value(val):
return Attribute('value', val)
blank = value('')
# *** Collection Conditions ***
class CollectionCondition(with_metaclass(ABCMeta, IEntityCondition)):
def description(self):
return self.__class__.__name__
def fn(self, elements):
# type: (SeleneCollection) -> List[IWebElement]
return self.match(elements.get_actual_webelements())
@abstractmethod
def match(self, webelements):
# type: (List[IWebElement]) -> List[IWebElement]
pass
class Texts(CollectionCondition):
def __init__(self, *expected):
self.expected = expected
def match(self, webelements):
actual = [it.text for it in webelements]
if not (len(actual) == len(self.expected) and all(lmap(operator.contains, actual, self.expected))):
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
return webelements
texts = Texts
class ExactTexts(CollectionCondition):
def __init__(self, *expected):
self.expected = expected
def match(self, webelements):
actual = [it.text for it in webelements]
if not (len(actual) == len(self.expected) and all(lmap(operator.eq, actual, self.expected))):
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
return webelements
exact_texts = ExactTexts
class Size(CollectionCondition):
def __init__(self, expected):
self.expected = expected
def match(self, webelements):
actual = len(webelements)
if not actual == self.expected:
raise ConditionMismatchException(
expected=self.expected,
actual=actual)
return webelements
size = Size
empty = size(0)
class SizeAtLeast(CollectionCondition):
def __init__(self, expected):
self.expected = expected
def match(self, webelements):
actual = len(webelements)
if not actual >= self.expected:
raise ConditionMismatchException(
expected='>= {}'.format(self.expected),
actual=actual)
return webelements
size_at_least = SizeAtLeast
|
mit
| 1,858,029,448,760,103,000 | 5,831,646,033,663,210,000 | 24.956757 | 113 | 0.640633 | false |
unindented/streamcode
|
client/static/jsrepl/extern/python/unclosured/lib/python2.7/mutex.py
|
243
|
1877
|
"""Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is aquired.
"""
from warnings import warnpy3k
warnpy3k("the mutex module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from collections import deque
class mutex:
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = False
self.queue = deque()
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return True if it succeeded."""
if not self.locked:
self.locked = True
return True
else:
return False
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = False
|
mit
| -4,671,729,963,612,558,000 | 4,492,222,547,201,484,300 | 33.127273 | 73 | 0.646777 | false |
anaruse/chainer
|
tests/chainer_tests/functions_tests/math_tests/test_sparse_matmul.py
|
2
|
10645
|
import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
from chainer.utils import type_check
_scipy_available = True
try:
from scipy import sparse # NOQA
except ImportError:
_scipy_available = False
def _setup_tensor(_min, _max, shape, dtype, threshold=None):
y = numpy.random.uniform(_min, _max, shape).astype(dtype)
if threshold is not None:
y[y < threshold] = 0
return y
@testing.parameterize(*testing.product_dict(
[
{'m': 2, 'n': 3, 'k': 4},
{'m': 3, 'n': 4, 'k': 2},
],
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
[
{'nbatch': 0}, {'nbatch': 1}, {'nbatch': 4},
],
[
{'a_dtype': numpy.float16},
{'a_dtype': numpy.float32},
{'a_dtype': numpy.float64},
],
[
{'b_dtype': numpy.float16},
{'b_dtype': numpy.float32},
{'b_dtype': numpy.float64},
]
))
class TestCooMatMul(unittest.TestCase):
def setUp(self):
a_shape = self._set_shape([self.m, self.k], self.transa)
b_shape = self._set_shape([self.k, self.n], self.transb)
c_shape = self._set_shape([self.m, self.n], False)
self.c_dtype = numpy.result_type(self.a_dtype, self.b_dtype)
self.a = _setup_tensor(.5, 1, a_shape, self.a_dtype, .75)
self.b = _setup_tensor(.5, 1, b_shape, self.b_dtype, .75)
self.gc = _setup_tensor(-1, 1, c_shape, self.c_dtype)
self.gga = _setup_tensor(.5, 1, a_shape, self.a_dtype)
self.gga[numpy.where(self.a < .75)] = 0
self.ggb = _setup_tensor(.5, 1, b_shape, self.b_dtype)
self.ggb[numpy.where(self.b < .75)] = 0
self.forward_answer = self._matmul(self.a, self.b)
def _set_shape(self, shape, trans):
if trans:
shape = [shape[1], shape[0]]
if self.nbatch > 0:
shape = [self.nbatch, shape[0], shape[1]]
return shape
def _matmul(self, a, b):
if self.transa:
a = a.swapaxes(-1, -2)
if self.transb:
b = b.swapaxes(-1, -2)
if hasattr(numpy, 'matmul'):
return numpy.matmul(a, b)
elif a.ndim == 2:
return numpy.dot(a, b)
else:
return numpy.einsum('...ij,...jk->...ik', a, b)
#
# SPDN: sparse A * dense B
#
def check_SPDN_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
sp_a = utils.to_coo(a_data, requires_grad=True)
b = chainer.Variable(b_data)
c = F.sparse_matmul(sp_a, b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_SPDN_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(self.a, self.b)
@attr.gpu
def test_SPDN_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(a, b)
def check_SPDN_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_a = utils.to_coo(a_data)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_backward(
op, (sp_a.data.data, b_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_SPDN_sparse_matmul_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_backward_gpu(self):
self.check_SPDN_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_SPDN_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_a = utils.to_coo(a_data)
sp_gga = utils.to_coo(a_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_double_backward(
op, (sp_a.data.data, b_data),
c_grad, (sp_gga.data.data, b_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_SPDN_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_double_backward_gpu(self):
self.check_SPDN_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
#
# DNSP: dense A * sparse B
#
def check_DNSP_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
a = chainer.Variable(a_data)
sp_b = utils.to_coo(b_data, requires_grad=True)
c = F.sparse_matmul(a, sp_b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_DNSP_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(self.a, self.b)
@attr.gpu
def test_DNSP_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(a, b)
def check_DNSP_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_b = utils.to_coo(b_data)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_backward(
op, (sp_b.data.data, a_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_DNSP_tensordot_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_tensordot_backward_gpu(self):
self.check_DNSP_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_DNSP_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_b = utils.to_coo(b_data)
sp_ggb = utils.to_coo(b_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_double_backward(
op, (sp_b.data.data, a_data),
c_grad, (sp_ggb.data.data, a_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_DNSP_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_sparse_matmul_double_backward_gpu(self):
self.check_DNSP_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
@testing.parameterize(*testing.product_dict(
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
))
class TestCooMatMulInvalid(unittest.TestCase):
def test_invalid_ndim(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_nbatch(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_shape(self):
a = _setup_tensor(.5, 1, (1, 2, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 4, 5), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_inputs(self):
a = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(ValueError):
F.sparse_matmul(sp_a, sp_b, self.transa, self.transb)
with self.assertRaises(ValueError):
F.sparse_matmul(a, b, self.transa, self.transb)
testing.run_module(__name__, __file__)
|
mit
| 7,191,485,224,699,288,000 | 3,756,457,402,134,548,500 | 34.483333 | 76 | 0.56806 | false |
apark263/tensorflow
|
tensorflow/contrib/optimizer_v2/checkpointable_utils_test.py
|
2
|
33567
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(josh11b): Forked from contrib/eager/python to test OptimizerV2 the same way
# OptimizerV1 is tested. This file should be removed once the fork is resolved.
import functools
import os
import six
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
class NonLayerCheckpointable(tracking.AutoCheckpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = util.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class _MirroringSaveable(
core_saver.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = (
util._serialize_object_graph(
root_checkpointable, saveables_cache=None))
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
# The optimizer and Dense layers also save get_config() JSON
expected_checkpoint_names.extend([
"model/_second/.ATTRIBUTES/OBJECT_CONFIG_JSON",
"model/_named_dense/.ATTRIBUTES/OBJECT_CONFIG_JSON"
])
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["optimizer_step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta1_power",
named_variables["optimizer/beta1_power" + suffix].full_name)
self.assertEqual(
"beta2_power",
named_variables["optimizer/beta2_power" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power", optimizer_node.children[0].local_name)
self.assertEqual(
"beta1_power", serialized_graph.nodes[optimizer_node.children[0]
.node_id].attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta_1_power and beta_2_power when appying gradients
# so we can test that they've been restored correctly.
beta1=1.0,
beta2=1.0)
on_create_root = util.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta_1_power, beta_2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta_1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta_2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@function.defun
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = util.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = tracking.AutoCheckpointable()
root.var = util.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(util.gather_initializers(
util.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = util.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = util.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = tracking.AutoCheckpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = util.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = util.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = util.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoCheckpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
saver = util.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
before_ops = graph.get_operations()
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoCheckpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
saver = util.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
before_ops = graph.get_operations()
saver.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(util.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta_1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta_1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta_1_power))
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
return v, v + 1., v2
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save = save_template()
optimizer = adam.AdamOptimizer(0.0)
save_root = util.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value)
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.AdamOptimizer(0.0)
load_root = util.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2 = load_template()
load_optimizer.minimize(var.read_value)
self.assertEqual(2, len(load_template._checkpoint_dependencies))
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta_1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta_1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta_1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = util.CheckpointableSaver(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_consumed()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
save_path = root.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -5,637,415,983,215,929,000 | -5,824,069,541,464,861,000 | 43.109067 | 82 | 0.66339 | false |
reingart/pyafipws
|
wslpg.py
|
1
|
216526
|
#!/usr/bin/python
# -*- coding: utf8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""Módulo para obtener código de operación electrónico (COE) para
Liquidación Primaria Electrónica de Granos del web service WSLPG de AFIP
"""
__author__ = "Mariano Reingart <[email protected]>"
__copyright__ = "Copyright (C) 2013-2018 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.32a"
LICENCIA = """
wslpg.py: Interfaz para generar Código de Operación Electrónica para
Liquidación Primaria de Granos (LpgService)
Copyright (C) 2013-2018 Mariano Reingart [email protected]
http://www.sistemasagiles.com.ar/trac/wiki/LiquidacionPrimariaGranos
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo respetando la licencia GPLv3.
Para información adicional sobre garantía, soporte técnico comercial
e incorporación/distribución en programas propietarios ver PyAfipWs:
http://www.sistemasagiles.com.ar/trac/wiki/PyAfipWs
"""
AYUDA="""
Opciones:
--ayuda: este mensaje
--debug: modo depuración (detalla y confirma las operaciones)
--formato: muestra el formato de los archivos de entrada/salida
--prueba: genera y autoriza una liquidación de prueba (no usar en producción!)
--xml: almacena los requerimientos y respuestas XML (depuración)
--dbf: utilizar tablas DBF (xBase) para los archivos de intercambio
--json: utilizar formato json para el archivo de intercambio
--dummy: consulta estado de servidores
--autorizar: Autorizar Liquidación Primaria de Granos (liquidacionAutorizar)
--ajustar: Ajustar Liquidación Primaria de Granos (liquidacionAjustar)
--anular: Anular una Liquidación Primaria de Granos (liquidacionAnular)
--autorizar-anticipo: Autoriza un Anticipo (lpgAutorizarAnticipo)
--consultar: Consulta una liquidación (parámetros: nro de orden, COE, pdf)
--cancelar-anticipo: anteponer para anticipos (lpgCancelarAnticipo)
--ult: Consulta el último número de orden registrado en AFIP
(liquidacionUltimoNroOrdenConsultar)
--pdf: genera el formulario C 1116 B en formato PDF
--mostrar: muestra el documento PDF generado (usar con --pdf)
--imprimir: imprime el documento PDF generado (usar con --mostrar y --pdf)
--autorizar-lsg: Autoriza una Liquidación Secundaria de Granos (lsgAutorizar)
--lsg --anular: Anula una LSG (lsgAnular)
--lsg --consular: Consulta una LSG por pto_emision, nro_orden o COE
--lsg --ult: Consulta el último Nº LSG emitida (lsgConsultarUltimoNroOrden)
--lsg --asociar: Asocia una liq. sec. a un contrato (lsgAsociarAContrato)
--ajustar-lsg: Ajusta una liquidación secundaria (lsgAjustar por COE/Contrato)
--autorizar-cg: Autorizar Certificación de Granos (cgAutorizar)
--cg --anular: Solicita anulación de un CG (cgSolicitarAnulacion)
--cg --consultar: Consulta una CG por pto_emision, nro_orden o COE
--cg --ult: Consulta el último Nº LSG emitida (cgConsultarUltimoNroOrden)
--informar-calidad: Informa la calidad de una CG (cgInformarCalidad)
--buscar-ctg: devuelve los datos de la CTG a certificar
espera tipo_certificado, cuit_depositante, nro_planta, cod_grano, campania
--buscar-cert-con-saldo-disp: CG disponible para liquidar/retirar/transferir
espera cuit_depositante, cod_grano, campania, coe fecha_emision_des/has
--provincias: obtiene el listado de provincias
--localidades: obtiene el listado de localidades por provincia
--tipograno: obtiene el listado de los tipos de granos disponibles
--campanias: obtiene el listado de las campañas
--gradoref: obtiene el listado de los grados de referencias
--gradoent: obtiene el listado de los grados y valores entregados
--certdeposito: obtiene el listado de los tipos de certificados de depósito
--deducciones: obtiene el listado de los tipos de deducciones
--retenciones: obtiene el listado de los tipos de retenciones
--puertos: obtiene el listado de los puertos habilitados
--actividades: obtiene el listado de las actividades habilitados
--actividadesrep: devuelve las actividades en las que emisor/representado
se encuentra inscripto en RUOCA
--operaciones: obtiene el listado de las operaciones para el representado
Ver wslpg.ini para parámetros de configuración (URL, certificados, etc.)"
"""
import os, sys, shelve
import decimal, datetime
import traceback
import pprint
import warnings
from pysimplesoap.client import SoapFault
from fpdf import Template
import utils
# importo funciones compartidas:
from utils import leer, escribir, leer_dbf, guardar_dbf, N, A, I, json, BaseWS, inicializar_y_capturar_excepciones, get_install_dir
WSDL = "https://fwshomo.afip.gov.ar/wslpg/LpgService?wsdl"
#WSDL = "https://serviciosjava.afip.gob.ar/wslpg/LpgService?wsdl"
#WSDL = "file:wslpg.wsdl"
DEBUG = False
XML = False
CONFIG_FILE = "wslpg.ini"
TIMEOUT = 30
HOMO = False
# definición del formato del archivo de intercambio:
ENCABEZADO = [
('tipo_reg', 1, A), # 0: encabezado liquidación
('nro_orden', 18, N),
('cuit_comprador', 11, N),
('nro_act_comprador', 5, N),
('nro_ing_bruto_comprador', 15, N),
('cod_tipo_operacion', 2, N),
('es_liquidacion_propia', 1, A), # S o N
('es_canje', 1, A), # S o N
('cod_puerto', 4, N),
('des_puerto_localidad', 240, A),
('cod_grano', 3, N),
('cuit_vendedor', 11, N),
('nro_ing_bruto_vendedor', 15, N),
('actua_corredor', 1, A), # S o N
('liquida_corredor', 1, A), # S o N
('cuit_corredor', 11, N),
('nro_ing_bruto_corredor', 15, N),
('comision_corredor', 5, I, 2), # 3.2
('fecha_precio_operacion', 10, A), # 26/02/2013
('precio_ref_tn', 8, I, 3), # 4.3
('cod_grado_ref', 2, A),
('cod_grado_ent', 2, A),
('factor_ent', 6, I, 3), # 3.3
('precio_flete_tn', 7, I, 2), # 5.2
('cont_proteico', 6, I, 3), # 3.3
('alic_iva_operacion', 5, I, 2), # 3.2
('campania_ppal', 4, N),
('cod_localidad_procedencia', 6, N),
('reservado1', 200, A), # datos_adicionales (compatibilidad hacia atras)
('coe', 12, N),
('coe_ajustado', 12, N),
('estado', 2, A),
('total_deduccion', 17, I, 2), # 17.2
('total_retencion', 17, I, 2), # 17.2
('total_retencion_afip', 17, I, 2), # 17.2
('total_otras_retenciones', 17, I, 2), # 17.2
('total_neto_a_pagar', 17, I, 2), # 17.2
('total_iva_rg_4310_18', 17, I, 2), # 17.2 WSLPGv1.20
('total_pago_segun_condicion', 17, I, 2), # 17.2
('fecha_liquidacion', 10, A),
('nro_op_comercial', 10, N),
('precio_operacion', 17, I, 3), # 17.3
('subtotal', 17, I, 2), # 17.2
('importe_iva', 17, I, 2), # 17.2
('operacion_con_iva', 17, I, 2), # 17.2
('total_peso_neto', 8, N), # 17.2
# Campos WSLPGv1.1:
('pto_emision', 4, N),
('cod_prov_procedencia', 2, N),
('peso_neto_sin_certificado', 8, N),
('cod_tipo_ajuste', 2, N),
('val_grado_ent', 4, I, 3), # 1.3
# Campos WSLPGv1.3:
('cod_prov_procedencia_sin_certificado', 2, N),
('cod_localidad_procedencia_sin_certificado', 6, N),
# Campos WSLPGv1.4 (ajustes):
('nro_contrato', 15, N),
('tipo_formulario', 2, N),
('nro_formulario', 12, N),
# datos devuetos:
('total_iva_10_5', 17, I, 2), # 17.2
('total_iva_21', 17, I, 2), # 17.2
('total_retenciones_ganancias', 17, I, 2), # 17.2
('total_retenciones_iva', 17, I, 2), # 17.2
('datos_adicionales', 400, A), # max 400 desde WSLPGv1.2
# Campos agregados WSLPGv1.5 (ajustes):
('iva_deducciones', 17, I, 2), # 17.2
('subtotal_deb_cred', 17, I, 2), # 17.2
('total_base_deducciones', 17, I, 2), # 17.2
# Campos agregados WSLPGv1.6 (liquidación secundaria base):
('cantidad_tn', 11, I, 3), # 8.3
('nro_act_vendedor', 5, N),
# Campos agregados WSLPGv1.9 (liquidación secundaria base):
('total_deducciones', 19, I , 2),
('total_percepciones', 19, I , 2),
]
CERTIFICADO = [
('tipo_reg', 1, A), # 1: Certificado
('reservado1', 2, N), # en WSLPGv1.7 se amplio el campo
('nro_certificado_deposito', 12, N),
('peso_neto', 8, N), # usado peso ajustado WSLPGv1.17
('cod_localidad_procedencia', 6, N),
('cod_prov_procedencia', 2, N),
('reservado', 2, N),
('campania', 4, N),
('fecha_cierre', 10, A),
('peso_neto_total_certificado', 8, N), # para ajuste unificado (WSLPGv1.4)
('coe_certificado_deposito', 12, N), # para certificacion (WSLPGv1.6)
('tipo_certificado_deposito', 3, N), # wSLPGv1.7 agrega valor 332
]
RETENCION = [
('tipo_reg', 1, A), # 2: Retencion
('codigo_concepto', 2, A),
('detalle_aclaratorio', 30, A),
('base_calculo', 10, I, 2), # 8.2
('alicuota', 6, I, 2), # 3.2
('nro_certificado_retencion', 14, N),
('fecha_certificado_retencion', 10, A),
('importe_certificado_retencion', 17, I, 2), # 17.2
('importe_retencion', 17, I, 2), # 17.2
]
DEDUCCION = [
('tipo_reg', 1, A), # 3: Deducción
('codigo_concepto', 2, A),
('detalle_aclaratorio', 30, A), # max 50 por WSLPGv1.2
('dias_almacenaje', 4, N),
('reservado1', 6, I, 3),
('comision_gastos_adm', 5, I, 2), # 3.2
('base_calculo', 10, I, 2), # 8.2
('alicuota', 6, I, 2), # 3.2
('importe_iva', 17, I, 2), # 17.2
('importe_deduccion', 17, I, 2), # 17.2
('precio_pkg_diario', 11, I, 8), # 3.8, ajustado WSLPGv1.2
]
PERCEPCION = [
('tipo_reg', 1, A), # P: Percepcion
('detalle_aclaratoria', 50, A), # max 50 por WSLPGv1.8
('base_calculo', 10, I, 2), # 8.2
('alicuota', 6, I, 2), # 3.2
('importe_final', 19, I, 2), # 17.2 (LPG WSLPGv1.16)
]
OPCIONAL = [
('tipo_reg', 1, A), # O: Opcional
('codigo', 50, A),
('descripcion', 250, A),
]
AJUSTE = [
('tipo_reg', 1, A), # 4: ajuste débito / 5: crédito (WSLPGv1.4)
('concepto_importe_iva_0', 20, A),
('importe_ajustar_iva_0', 15, I, 2), # 11.2
('concepto_importe_iva_105', 20, A),
('importe_ajustar_iva_105', 15, I, 2), # 11.2
('concepto_importe_iva_21', 20, A),
('importe_ajustar_iva_21', 15, I, 2), # 11.2
('diferencia_peso_neto', 8, N),
('diferencia_precio_operacion', 17, I, 3), # 17.3
('cod_grado', 2, A),
('val_grado', 4, I, 3), # 1.3
('factor', 6, I, 3), # 3.3
('diferencia_precio_flete_tn', 7, I, 2), # 5.2
('datos_adicionales', 400, A),
# datos devueltos:
('fecha_liquidacion', 10, A),
('nro_op_comercial', 10, N),
('precio_operacion', 17, I, 3), # 17.3
('subtotal', 17, I, 2), # 17.2
('importe_iva', 17, I, 2), # 17.2
('operacion_con_iva', 17, I, 2), # 17.2
('total_peso_neto', 8, N), # 17.2
('total_deduccion', 17, I, 2), # 17.2
('total_retencion', 17, I, 2), # 17.2
('total_retencion_afip', 17, I, 2), # 17.2
('total_otras_retenciones', 17, I, 2), # 17.2
('total_neto_a_pagar', 17, I, 2), # 17.2
('total_iva_rg_4310_18', 17, I, 2), # 17.2
('total_pago_segun_condicion', 17, I, 2), # 17.2
('iva_calculado_iva_0', 15, I, 2), # 15.2
('iva_calculado_iva_105', 15, I, 2), # 15.2
('iva_calculado_iva_21', 15, I, 2), # 15.2
]
CERTIFICACION = [
('tipo_reg', 1, A), # 7: encabezado certificación
# campos de la cabecera para todas las certificaciones (WSLPGv1.6)
('pto_emision', 4, N),
('nro_orden', 8, N),
('tipo_certificado', 1, A), # P:Primaria,R:Retiro,T:Transferencia,E:Preexistente
('nro_planta', 6, N),
('nro_ing_bruto_depositario', 15, N),
('titular_grano', 1, A), # "P" (Propio) "T" (Tercero)
('cuit_depositante', 11, N), # obligatorio si titular_grano es T
('nro_ing_bruto_depositante', 15, N),
('cuit_corredor', 11, N),
('cod_grano', 3, N),
('campania', 4, N),
('datos_adicionales', 400, A),
('reservado1', 14, A), # reservado para futuros campos (no usar)
# campos para CgAutorizarPrimariaType ex-cgAutorizarDeposito (WSLPGv1.6-1.8)
('nro_act_depositario', 5, N), # nuevo WSLPGv1.8 tambien R/T
('descripcion_tipo_grano', 20, A),
('monto_almacenaje', 10, I, 2),
('monto_acarreo', 10, I, 2),
('monto_gastos_generales', 10, I, 2),
('monto_zarandeo', 10, I, 2),
('porcentaje_secado_de', 5, I, 2),
('porcentaje_secado_a', 5, I, 2),
('monto_secado', 10, I, 2),
('monto_por_cada_punto_exceso', 10, I, 2),
('monto_otros', 10, I, 2),
('reservado_calidad', 35, A), # ver subestructura WSLPGv1.10
('peso_neto_merma_volatil', 10, I , 2),
('porcentaje_merma_secado', 5, I, 2),
('peso_neto_merma_secado', 10, I, 2),
('porcentaje_merma_zarandeo', 5, I, 2),
('peso_neto_merma_zarandeo', 10, I, 2),
('peso_neto_certificado', 10, I, 2), # WSLPGv1.9 2 decimales!
('servicios_secado', 8, I, 3),
('servicios_zarandeo', 8, I, 3),
('servicios_otros', 7, I, 3),
('servicios_forma_de_pago', 20, A),
# campos para cgAutorizarRetiroTransferencia (WSLPGv1.6):
('cuit_receptor', 11, N),
('fecha', 10, A), # no usado WSLPGv1.8
('nro_carta_porte_a_utilizar', 9, N), # obligatorio para retiro
('cee_carta_porte_a_utilizar', 14, N), # no usado WSLPGv1.8
# para cgAutorizarPreexistente (WSLPGv1.6):
('tipo_certificado_deposito_preexistente', 1, N), # "R": Retiro "T": Tra.
('nro_certificado_deposito_preexistente', 12, N),
('cac_certificado_deposito_preexistente', 14, N), # cambio WSLPGv1.8
('fecha_emision_certificado_deposito_preexistente', 10, A),
('peso_neto', 8, N),
# nro_planta definido previamente - agregado WSLPGv1.8
# datos devueltos por el webservice:
('reservado2', 183, N), # padding para futuros campos (no usar)
('coe', 12, N),
('fecha_certificacion', 10, A),
('estado', 2, A),
('reservado3', 101, A), # padding para futuros campos (no usar)
# otros campos devueltos (opcionales)
# 'pesosResumen'
('peso_bruto_certificado', 10, I , 2),
('peso_merma_secado', 10, I , 2),
('peso_merma_zarandeo', 10, I , 2),
# peso_neto_certificado definido arriba
# serviciosResumen
('importe_iva', 10, I , 2),
('servicio_gastos_generales', 10, I , 2),
('servicio_otros', 10, I , 2),
('servicio_total', 10, I , 2),
('servicio_zarandeo', 10, I , 2),
# planta
('cuit_titular_planta', 11, N),
('razon_social_titular_planta', 11, A),
# campos no documentados por AFIP (agregados luego de WSLPGv1.15 a fines Sept)
('servicios_conceptos_no_gravados', 10, I, 2),
('servicios_percepciones_iva', 10, I, 2),
('servicios_otras_percepciones', 10, I, 2),
]
CTG = [ # para cgAutorizarDeposito (WSLPGv1.6)
('tipo_reg', 1, A), # C: CTG
('nro_ctg', 8, N),
('nro_carta_porte', 9, N),
('porcentaje_secado_humedad', 5, I, 2),
('importe_secado', 10, I, 2),
('peso_neto_merma_secado', 10, I, 2),
('tarifa_secado', 10, I, 2),
('importe_zarandeo', 10, I, 2),
('peso_neto_merma_zarandeo', 10, I, 2),
('tarifa_zarandeo', 10, I, 2),
('peso_neto_confirmado_definitivo', 10, I, 2),
]
DET_MUESTRA_ANALISIS = [ # para cgAutorizarDeposito (WSLPGv1.6)
('tipo_reg', 1, A), # D: detalle muestra analisis
('descripcion_rubro', 400, A),
('tipo_rubro', 1, A), # "B" (Bonificación) y "R" (Rebaja)
('porcentaje', 5, I, 2),
('valor', 5, I, 2),
]
CALIDAD = [ # para cgAutorizar y cgInformarCalidad (WSLPGv1.10)
('tipo_reg', 1, A), # Q: caldiad
('analisis_muestra', 10, N),
('nro_boletin', 10, N),
('cod_grado', 2, A), # nuevo WSLPGv1.10: G1 G2 ....
('valor_grado', 4, I, 3), # solo para cod_grado F1 F2 ...
('valor_contenido_proteico', 5, I, 3),
('valor_factor', 6, I, 3),
]
FACTURA_PAPEL = [ # para lsgAjustar (WSLPGv1.15)
('tipo_reg', 1, A), # F: factura papel
('nro_cai', 14, N),
('nro_factura_papel', 12, N),
('fecha_factura', 10, A),
('tipo_comprobante', 3, N),
]
FUSION = [ # para liquidacionAjustarUnificado (WSLPGv1.19)
('tipo_reg', 1, A), # f: fusion
('nro_ing_brutos', 15, N),
('nro_actividad', 5, N),
]
EVENTO = [
('tipo_reg', 1, A), # E: Evento
('codigo', 4, A),
('descripcion', 250, A),
]
ERROR = [
('tipo_reg', 1, A), # R: Error
('codigo', 4, A),
('descripcion', 250, A),
]
DATO = [
('tipo_reg', 1, A), # 9: Dato adicional
('campo', 25, A),
('valor', 250, A),
]
class WSLPG(BaseWS):
"Interfaz para el WebService de Liquidación Primaria de Granos"
_public_methods_ = ['Conectar', 'Dummy', 'SetTicketAcceso', 'DebugLog',
'AutorizarLiquidacion',
'AutorizarLiquidacionSecundaria',
'AnularLiquidacionSecundaria','AnularLiquidacion',
'AutorizarAnticipo', 'CancelarAnticipo',
'CrearLiquidacion', 'CrearLiqSecundariaBase',
'AgregarCertificado', 'AgregarRetencion',
'AgregarDeduccion', 'AgregarPercepcion',
'AgregarOpcional', 'AgregarCalidad',
'AgregarFacturaPapel', 'AgregarFusion',
'ConsultarLiquidacion', 'ConsultarUltNroOrden',
'ConsultarLiquidacionSecundaria',
'ConsultarLiquidacionSecundariaUltNroOrden',
'CrearAjusteBase',
'CrearAjusteDebito', 'CrearAjusteCredito',
'AjustarLiquidacionUnificado',
'AjustarLiquidacionUnificadoPapel',
'AjustarLiquidacionContrato',
'AjustarLiquidacionSecundaria',
'AnalizarAjusteDebito', 'AnalizarAjusteCredito',
'AsociarLiquidacionAContrato', 'ConsultarAjuste',
'ConsultarLiquidacionesPorContrato',
'ConsultarLiquidacionesSecundariasPorContrato',
'AsociarLiquidacionSecundariaAContrato',
'CrearCertificacionCabecera',
'AgregarCertificacionPrimaria',
'AgregarCertificacionRetiroTransferencia',
'AgregarCertificacionPreexistente',
'AgregarDetalleMuestraAnalisis', 'AgregarCTG',
'AutorizarCertificacion',
'InformarCalidadCertificacion', 'BuscarCTG',
'AnularCertificacion',
'ConsultarCertificacion',
'ConsultarCertificacionUltNroOrden',
'BuscarCertConSaldoDisponible',
'LeerDatosLiquidacion',
'ConsultarCampanias',
'ConsultarTipoGrano',
'ConsultarGradoEntregadoXTipoGrano',
'ConsultarCodigoGradoReferencia',
'ConsultarTipoCertificadoDeposito',
'ConsultarTipoDeduccion',
'ConsultarTipoRetencion',
'ConsultarPuerto',
'ConsultarTipoActividad',
'ConsultarTipoActividadRepresentado',
'ConsultarProvincias',
'ConsultarLocalidadesPorProvincia',
'ConsultarTiposOperacion',
'BuscarLocalidades',
'AnalizarXml', 'ObtenerTagXml', 'LoadTestXML',
'SetParametros', 'SetParametro', 'GetParametro',
'CargarFormatoPDF', 'AgregarCampoPDF', 'AgregarDatoPDF',
'CrearPlantillaPDF', 'ProcesarPlantillaPDF',
'GenerarPDF', 'MostrarPDF',
]
_public_attrs_ = ['Token', 'Sign', 'Cuit',
'AppServerStatus', 'DbServerStatus', 'AuthServerStatus',
'Excepcion', 'ErrCode', 'ErrMsg', 'LanzarExcepciones', 'Errores',
'XmlRequest', 'XmlResponse', 'Version', 'Traceback', 'InstallDir',
'COE', 'COEAjustado', 'Estado', 'Resultado', 'NroOrden',
'TotalDeduccion', 'TotalRetencion', 'TotalRetencionAfip',
'TotalOtrasRetenciones', 'TotalNetoAPagar', 'TotalPagoSegunCondicion',
'TotalIvaRg4310_18', 'Subtotal', 'TotalIva105', 'TotalIva21',
'TotalRetencionesGanancias', 'TotalRetencionesIVA', 'NroContrato',
'FechaCertificacion',
]
_reg_progid_ = "WSLPG"
_reg_clsid_ = "{9D21C513-21A6-413C-8592-047357692608}"
# Variables globales para BaseWS:
HOMO = HOMO
WSDL = WSDL
LanzarExcepciones = False
Version = "%s %s" % (__version__, HOMO and 'Homologación' or '')
def inicializar(self):
BaseWS.inicializar(self)
self.AppServerStatus = self.DbServerStatus = self.AuthServerStatus = None
self.errores = []
self.COE = self.COEAjustado = ""
self.Estado = self.Resultado = self.NroOrden = self.NroContrato = ''
self.TotalDeduccion = ""
self.TotalRetencion = ""
self.TotalRetencionAfip = ""
self.TotalOtrasRetenciones = ""
self.TotalNetoAPagar = ""
self.TotalIvaRg4310_18 = ""
self.TotalPagoSegunCondicion = ""
self.Subtotal = self.TotalIva105 = self.TotalIva21 = ""
self.TotalRetencionesGanancias = self.TotalRetencionesIVA = ""
self.TotalPercepcion = ""
self.FechaCertificacion = ""
self.datos = {}
@inicializar_y_capturar_excepciones
def Conectar(self, cache=None, url="", proxy="", wrapper="", cacert=None, timeout=30):
"Establecer la conexión a los servidores de la AFIP"
# llamo al constructor heredado:
ok = BaseWS.Conectar(self, cache, url, proxy, wrapper, cacert, timeout)
if ok:
# corrijo ubicación del servidor (puerto htttp 80 en el WSDL)
location = self.client.services['LpgService']['ports']['LpgEndPoint']['location']
if location.startswith("http://"):
print "Corrigiendo WSDL ...", location,
location = location.replace("http://", "https://").replace(":80", ":443")
self.client.services['LpgService']['ports']['LpgEndPoint']['location'] = location
print location
try:
# intento abrir el diccionario persistente de localidades
import wslpg_datos
localidades_db = os.path.join(self.cache, "localidades.dat")
# verificar que puede escribir en el dir, sino abrir solo lectura
flag = os.access(self.cache, os.W_OK) and 'c' or 'r'
wslpg_datos.LOCALIDADES = shelve.open(localidades_db, flag=flag)
if DEBUG: print "Localidades en BD:", len(wslpg_datos.LOCALIDADES)
self.Traceback = "Localidades en BD: %s" % len(wslpg_datos.LOCALIDADES)
except Exception, e:
print "ADVERTENCIA: No se pudo abrir la bbdd de localidades:", e
self.Excepcion = str(e)
return ok
def __analizar_errores(self, ret):
"Comprueba y extrae errores si existen en la respuesta XML"
errores = []
if 'errores' in ret:
errores.extend(ret['errores'])
if 'erroresFormato' in ret:
errores.extend(ret['erroresFormato'])
if errores:
self.Errores = ["%(codigo)s: %(descripcion)s" % err['error']
for err in errores]
self.errores = [
{'codigo': err['error']['codigo'],
'descripcion': err['error']['descripcion'].replace("\n", "")
.replace("\r", "")}
for err in errores]
self.ErrCode = ' '.join(self.Errores)
self.ErrMsg = '\n'.join(self.Errores)
@inicializar_y_capturar_excepciones
def Dummy(self):
"Obtener el estado de los servidores de la AFIP"
results = self.client.dummy()['return']
self.AppServerStatus = str(results['appserver'])
self.DbServerStatus = str(results['dbserver'])
self.AuthServerStatus = str(results['authserver'])
return True
@inicializar_y_capturar_excepciones
def CrearLiquidacion(self, nro_orden=None, cuit_comprador=None,
nro_act_comprador=None, nro_ing_bruto_comprador=None,
cod_tipo_operacion=None,
es_liquidacion_propia=None, es_canje=None,
cod_puerto=None, des_puerto_localidad=None, cod_grano=None,
cuit_vendedor=None, nro_ing_bruto_vendedor=None,
actua_corredor=None, liquida_corredor=None, cuit_corredor=None,
comision_corredor=None, nro_ing_bruto_corredor=None,
fecha_precio_operacion=None,
precio_ref_tn=None, cod_grado_ref=None, cod_grado_ent=None,
factor_ent=None, precio_flete_tn=None, cont_proteico=None,
alic_iva_operacion=None, campania_ppal=None,
cod_localidad_procedencia=None,
datos_adicionales=None, pto_emision=1, cod_prov_procedencia=None,
peso_neto_sin_certificado=None, val_grado_ent=None,
cod_localidad_procedencia_sin_certificado=None,
cod_prov_procedencia_sin_certificado=None,
nro_contrato=None,
**kwargs
):
"Inicializa internamente los datos de una liquidación para autorizar"
# limpio los campos especiales (segun validaciones de AFIP)
if alic_iva_operacion == 0:
alic_iva_operacion = None # no informar alicuota p/ monotributo
if val_grado_ent == 0:
val_grado_ent = None
# borrando datos corredor si no corresponden
if actua_corredor == "N":
cuit_corredor = None
comision_corredor = None
nro_ing_bruto_corredor = None
# si no corresponde elimino el peso neto certificado campo opcional
if not peso_neto_sin_certificado or not int(peso_neto_sin_certificado):
peso_neto_sin_certificado = None
if cod_puerto and int(cod_puerto) != 14:
des_puerto_localidad = None # validacion 1630
# limpio los campos opcionales para no enviarlos si no corresponde:
if cod_grado_ref == "":
cod_grado_ref = None
if cod_grado_ent == "":
cod_grado_ent = None
if val_grado_ent == 0:
val_grado_ent = None
# creo el diccionario con los campos generales de la liquidación:
self.liquidacion = dict(
ptoEmision=pto_emision,
nroOrden=nro_orden,
cuitComprador=cuit_comprador,
nroActComprador=nro_act_comprador,
nroIngBrutoComprador=nro_ing_bruto_comprador,
codTipoOperacion=cod_tipo_operacion,
esLiquidacionPropia=es_liquidacion_propia,
esCanje=es_canje,
codPuerto=cod_puerto,
desPuertoLocalidad=des_puerto_localidad,
codGrano=cod_grano,
cuitVendedor=cuit_vendedor,
nroIngBrutoVendedor=nro_ing_bruto_vendedor,
actuaCorredor=actua_corredor,
liquidaCorredor=liquida_corredor,
cuitCorredor=cuit_corredor,
comisionCorredor=comision_corredor,
nroIngBrutoCorredor=nro_ing_bruto_corredor,
fechaPrecioOperacion=fecha_precio_operacion,
precioRefTn=precio_ref_tn,
codGradoRef=cod_grado_ref,
codGradoEnt=cod_grado_ent,
valGradoEnt=val_grado_ent,
factorEnt=factor_ent,
precioFleteTn=precio_flete_tn,
contProteico=cont_proteico,
alicIvaOperacion=alic_iva_operacion,
campaniaPPal=campania_ppal,
codLocalidadProcedencia=cod_localidad_procedencia,
codProvProcedencia=cod_prov_procedencia,
datosAdicionales=datos_adicionales,
pesoNetoSinCertificado=peso_neto_sin_certificado,
numeroContrato=nro_contrato or None,
certificados=[],
)
# para compatibilidad hacia atras, "copiar" los campos si no hay cert:
if peso_neto_sin_certificado:
if cod_localidad_procedencia_sin_certificado is None:
cod_localidad_procedencia_sin_certificado = cod_localidad_procedencia
if cod_prov_procedencia_sin_certificado is None:
cod_prov_procedencia_sin_certificado = cod_prov_procedencia
self.liquidacion.update(dict(
codLocalidadProcedenciaSinCertificado=cod_localidad_procedencia_sin_certificado,
codProvProcedenciaSinCertificado=cod_prov_procedencia_sin_certificado,
))
# inicializo las listas que contentran las retenciones y deducciones:
self.retenciones = []
self.deducciones = []
self.percepciones = []
self.opcionales = [] # para anticipo
# limpio las estructuras internas no utilizables en este caso
self.certificacion = None
return True
@inicializar_y_capturar_excepciones
def CrearLiqSecundariaBase(self, pto_emision=1, nro_orden=None,
nro_contrato=None,
cuit_comprador=None, nro_ing_bruto_comprador=None,
cod_puerto=None, des_puerto_localidad=None,
cod_grano=None, cantidad_tn=None,
cuit_vendedor=None, nro_act_vendedor=None, # nuevo!!
nro_ing_bruto_vendedor=None,
actua_corredor=None, liquida_corredor=None, cuit_corredor=None,
nro_ing_bruto_corredor=None,
fecha_precio_operacion=None, precio_ref_tn=None,
precio_operacion=None, alic_iva_operacion=None, campania_ppal=None,
cod_localidad_procedencia=None, cod_prov_procedencia=None,
datos_adicionales=None,
**kwargs):
"Inicializa los datos de una liquidación secundaria de granos (base)"
# creo el diccionario con los campos generales de la liquidación:
self.liquidacion = dict(
ptoEmision=pto_emision, nroOrden=nro_orden,
numeroContrato=nro_contrato or None, cuitComprador=cuit_comprador,
nroIngBrutoComprador=nro_ing_bruto_comprador,
codPuerto=cod_puerto, desPuertoLocalidad=des_puerto_localidad,
codGrano=cod_grano, cantidadTn=cantidad_tn,
cuitVendedor=cuit_vendedor, nroActVendedor=nro_act_vendedor,
nroIngBrutoVendedor=nro_ing_bruto_vendedor,
actuaCorredor=actua_corredor, liquidaCorredor=liquida_corredor,
cuitCorredor=cuit_corredor or None,
nroIngBrutoCorredor=nro_ing_bruto_corredor or None,
fechaPrecioOperacion=fecha_precio_operacion,
precioRefTn=precio_ref_tn, precioOperacion=precio_operacion,
alicIvaOperacion=alic_iva_operacion or None,
campaniaPPal=campania_ppal,
codLocalidad=cod_localidad_procedencia,
codProvincia=cod_prov_procedencia,
datosAdicionales=datos_adicionales,
)
# inicializo las listas que contentran las retenciones y deducciones:
self.deducciones = []
self.percepciones = []
self.opcionales = []
self.factura_papel = None
return True
@inicializar_y_capturar_excepciones
def AgregarCertificado(self, tipo_certificado_deposito=None,
nro_certificado_deposito=None,
peso_neto=None,
cod_localidad_procedencia=None,
cod_prov_procedencia=None,
campania=None, fecha_cierre=None,
peso_neto_total_certificado=None,
coe_certificado_deposito=None, # WSLPGv1.6
**kwargs):
"Agrego el certificado a la liquidación / certificación de granos"
# limpio campos opcionales:
if not peso_neto_total_certificado:
peso_neto_total_certificado = None # 0 no es válido
# coe_certificado_deposito no es para LPG, unificar en futuras versiones
if tipo_certificado_deposito and int(tipo_certificado_deposito) == 332:
if coe_certificado_deposito and long(coe_certificado_deposito):
nro_certificado_deposito = coe_certificado_deposito
coe_certificado_deposito = None
cert = dict(
tipoCertificadoDeposito=tipo_certificado_deposito,
nroCertificadoDeposito=nro_certificado_deposito,
pesoNeto=peso_neto,
codLocalidadProcedencia=cod_localidad_procedencia,
codProvProcedencia=cod_prov_procedencia,
campania=campania,
fechaCierre=fecha_cierre,
pesoNetoTotalCertificado=peso_neto_total_certificado,
coeCertificadoDeposito=coe_certificado_deposito,
coe=coe_certificado_deposito, # WSLPGv1.17
pesoAjustado=peso_neto, # WSLPGv1.17
)
if self.liquidacion:
self.liquidacion['certificados'].append({'certificado': cert})
else:
self.certificacion['retiroTransferencia']['certificadoDeposito'] = cert
return True
@inicializar_y_capturar_excepciones
def AgregarRetencion(self, codigo_concepto, detalle_aclaratorio,
base_calculo, alicuota,
nro_certificado_retencion=None,
fecha_certificado_retencion=None,
importe_certificado_retencion=None,
**kwargs):
"Agrega la información referente a las retenciones de la liquidación"
# limpio los campos opcionales:
if fecha_certificado_retencion is not None and not fecha_certificado_retencion.strip():
fecha_certificado_retencion = None
if importe_certificado_retencion is not None and not float(importe_certificado_retencion):
importe_certificado_retencion = None
if nro_certificado_retencion is not None and not int(nro_certificado_retencion):
nro_certificado_retencion = None
self.retenciones.append(dict(
retencion=dict(
codigoConcepto=codigo_concepto,
detalleAclaratorio=detalle_aclaratorio,
baseCalculo=base_calculo,
alicuota=alicuota,
nroCertificadoRetencion=nro_certificado_retencion,
fechaCertificadoRetencion=fecha_certificado_retencion,
importeCertificadoRetencion=importe_certificado_retencion,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarDeduccion(self, codigo_concepto=None, detalle_aclaratorio=None,
dias_almacenaje=None, precio_pkg_diario=None,
comision_gastos_adm=None, base_calculo=None,
alicuota=None, **kwargs):
"Agrega la información referente a las deducciones de la liquidación."
# limpiar campo según validación (comision_gastos_adm puede ser 0.00!)
if codigo_concepto != "CO" and comision_gastos_adm is not None \
and float(comision_gastos_adm) == 0:
comision_gastos_adm = None
# no enviar campos para prevenir errores AFIP 1705, 1707, 1708
if base_calculo is not None:
if codigo_concepto == "AL":
base_calculo = None
if codigo_concepto == "CO" and float(base_calculo) == 0:
base_calculo = None # no enviar, por retrocompatibilidad
if codigo_concepto != "AL":
dias_almacenaje = None
precio_pkg_diario = None
self.deducciones.append(dict(
deduccion=dict(
codigoConcepto=codigo_concepto,
detalleAclaratorio=detalle_aclaratorio,
diasAlmacenaje=dias_almacenaje,
precioPKGdiario=precio_pkg_diario,
comisionGastosAdm=comision_gastos_adm,
baseCalculo=base_calculo,
alicuotaIva=alicuota,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarPercepcion(self, codigo_concepto=None, detalle_aclaratoria=None,
base_calculo=None, alicuota=None, importe_final=None,
**kwargs):
"Agrega la información referente a las percepciones de la liquidación"
# liquidación secundaria (sin importe final)
self.percepciones.append(dict(
percepcion=dict(
detalleAclaratoria=detalle_aclaratoria,
baseCalculo=base_calculo,
alicuota=alicuota,
importeFinal=importe_final,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarOpcional(self, codigo=None, descripcion=None, **kwargs):
"Agrega la información referente a los opcionales de la liq. seq."
self.opcionales.append(dict(
opcional=dict(
codigo=codigo,
descripcion=descripcion,
))
)
return True
@inicializar_y_capturar_excepciones
def AgregarFacturaPapel(self, nro_cai=None, nro_factura_papel=None,
fecha_factura=None, tipo_comprobante=None,
**kwargs):
self.factura_papel = dict(
nroCAI=nro_cai,
nroFacturaPapel=nro_factura_papel,
fechaFactura=fecha_factura,
tipoComprobante=tipo_comprobante,
)
return True
@inicializar_y_capturar_excepciones
def AutorizarLiquidacion(self):
"Autorizar Liquidación Primaria Electrónica de Granos"
# limpio los elementos que no correspondan por estar vacios:
if not self.liquidacion['certificados']:
del self.liquidacion['certificados']
if not self.retenciones:
self.retenciones = None
if not self.deducciones:
self.deducciones = None
if not self.percepciones:
self.percepciones = None
else:
# ajustar los nombres de campos que varian entre LPG y LSG
for it in self.percepciones:
per = it['percepcion']
per['descripcion'] = per.pop("detalleAclaratoria")
del per['baseCalculo']
del per['alicuota']
# llamo al webservice:
ret = self.client.liquidacionAutorizar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
liquidacion=self.liquidacion,
retenciones=self.retenciones,
deducciones=self.deducciones,
percepciones=self.percepciones,
)
# analizo la respusta
ret = ret['liqReturn']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion)
return True
@inicializar_y_capturar_excepciones
def AutorizarLiquidacionSecundaria(self):
"Autorizar Liquidación Secundaria Electrónica de Granos"
# extraer y adaptar los campos para liq. sec.
if self.deducciones:
self.liquidacion['deduccion'] = []
for it in self.deducciones:
ded = it['deduccion'] # no se agrupa
self.liquidacion['deduccion'].append({
'detalleAclaratoria': ded['detalleAclaratorio'],
'baseCalculo': ded['baseCalculo'],
'alicuotaIVA': ded['alicuotaIva']})
if self.percepciones:
self.liquidacion['percepcion'] = []
for it in self.percepciones:
per = it['percepcion'] # no se agrupa
self.liquidacion['percepcion'].append(per)
if self.opcionales:
self.liquidacion['opcionales'] = self.opcionales # agrupado ok
# llamo al webservice:
ret = self.client.lsgAutorizar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
liqSecundariaBase=self.liquidacion,
facturaPapel=self.factura_papel,
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion)
return True
@inicializar_y_capturar_excepciones
def AutorizarAnticipo(self):
"Autorizar Anticipo de una Liquidación Primaria Electrónica de Granos"
# extraer y adaptar los campos para el anticipo
anticipo = {"liquidacion": self.liquidacion}
liq = anticipo["liquidacion"]
liq["campaniaPpal"] = self.liquidacion["campaniaPPal"]
liq["codLocProcedencia"] = self.liquidacion["codLocalidadProcedencia"]
liq["descPuertoLocalidad"] = self.liquidacion["desPuertoLocalidad"]
if self.opcionales:
liq['opcionales'] = self.opcionales
if self.retenciones:
anticipo['retenciones'] = self.retenciones
if self.deducciones:
anticipo['deducciones'] = self.deducciones
# llamo al webservice:
ret = self.client.lpgAutorizarAnticipo(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
anticipo=anticipo,
)
# analizo la respusta
ret = ret['liqReturn']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion)
return True
@inicializar_y_capturar_excepciones
def CancelarAnticipo(self, pto_emision=None, nro_orden=None, coe=None,
pdf=None):
"Cancelar Anticipo de una Liquidación Primaria Electrónica de Granos"
# llamo al webservice:
ret = self.client.lpgCancelarAnticipo(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
ptoEmision=pto_emision,
nroOrden=nro_orden,
pdf="S" if pdf else "N",
)
# analizo la respusta
ret = ret['liqConsReturn']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
aut = ret['autorizacion']
liq = ret['liquidacion']
self.AnalizarLiquidacion(aut, liq)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
def AnalizarLiquidacion(self, aut, liq=None, ajuste=False):
"Método interno para analizar la respuesta de AFIP"
# proceso los datos básicos de la liquidación (devuelto por consultar):
if liq:
self.params_out = dict(
pto_emision=liq.get('ptoEmision'),
nro_orden=liq.get('nroOrden'),
cuit_comprador=liq.get('cuitComprador'),
nro_act_comprador=liq.get('nroActComprador'),
nro_ing_bruto_comprador=liq.get('nroIngBrutoComprador'),
cod_tipo_operacion=liq.get('codTipoOperacion'),
es_liquidacion_propia=liq.get('esLiquidacionPropia'),
es_canje=liq.get('esCanje'),
cod_puerto=liq.get('codPuerto'),
des_puerto_localidad=liq.get('desPuertoLocalidad'),
cod_grano=liq.get('codGrano'),
cuit_vendedor=liq.get('cuitVendedor'),
nro_ing_bruto_vendedor=liq.get('nroIngBrutoVendedor'),
actua_corredor=liq.get('actuaCorredor'),
liquida_corredor=liq.get('liquidaCorredor'),
cuit_corredor=liq.get('cuitCorredor'),
comision_corredor=liq.get('comisionCorredor'),
nro_ing_bruto_corredor=liq.get('nroIngBrutoCorredor'),
fecha_precio_operacion=liq.get('fechaPrecioOperacion'),
precio_ref_tn=liq.get('precioRefTn'),
cod_grado_ref=liq.get('codGradoRef'),
cod_grado_ent=liq.get('codGradoEnt'),
factor_ent=liq.get('factorEnt'),
precio_flete_tn=liq.get('precioFleteTn'),
cont_proteico=liq.get('contProteico'),
alic_iva_operacion=liq.get('alicIvaOperacion'),
campania_ppal=liq.get('campaniaPPal'),
cod_localidad_procedencia=liq.get('codLocalidadProcedencia'),
cod_prov_procedencia=liq.get('codProvProcedencia'),
datos_adicionales=liq.get('datosAdicionales'),
peso_neto_sin_certificado=liq.get('pesoNetoSinCertificado'),
cod_localidad_procedencia_sin_certificado=liq.get('codLocalidadProcedenciaSinCertificado'),
cod_prov_procedencia_sin_certificado=liq.get('codProvProcedenciaSinCertificado'),
certificados=[],
)
if ajuste:
self.params_out.update(
# ajustes:
diferencia_peso_neto=liq.get('diferenciaPesoNeto'),
diferencia_precio_operacion=liq.get('diferenciaPrecioOperacion'),
cod_grado=liq.get('codGrado'),
val_grado=liq.get('valGrado'),
factor=liq.get('factor'),
diferencia_precio_flete_tn=liq.get('diferenciaPrecioFleteTn'),
concepto_importe_iva_0=liq.get('conceptoImporteIva0'),
importe_ajustar_iva_0=liq.get('importeAjustarIva0'),
concepto_importe_iva_105=liq.get('conceptoImporteIva105'),
importe_ajustar_iva_105=liq.get('importeAjustarIva105'),
concepto_importe_iva_21=liq.get('conceptoImporteIva21'),
importe_ajustar_iva_21=liq.get('importeAjustarIva21'),
)
# analizar detalle de importes ajustados discriminados por alicuota
# (por compatibildiad y consistencia se usan los mismos campos)
for it in liq.get("importes", liq.get("importe")):
# en ajustes LSG no se agrupan los importes en un subtipo...
if 'importeReturn' in it:
it = it['importeReturn'][0] # TODO: revisar SOAP
tasa = "iva_%s" % str(it['alicuota']).replace(".", "").strip()
self.params_out["concepto_importe_%s" % tasa] = it['concepto']
self.params_out["importe_ajustar_%s" % tasa] = it['importe']
self.params_out["iva_calculado_%s" % tasa] = it['ivaCalculado']
if 'certificados' in liq:
for c in liq['certificados']:
cert = c['certificado']
self.params_out['certificados'].append(dict(
tipo_certificado_deposito=cert['tipoCertificadoDeposito'],
nro_certificado_deposito=cert['nroCertificadoDeposito'],
peso_neto=cert['pesoNeto'],
cod_localidad_procedencia=cert['codLocalidadProcedencia'],
cod_prov_procedencia=cert['codProvProcedencia'],
campania=cert['campania'],
fecha_cierre=cert['fechaCierre'],
))
self.params_out['errores'] = self.errores
# proceso la respuesta de autorizar, ajustar (y consultar):
if aut:
self.TotalDeduccion = aut.get('totalDeduccion')
self.TotalRetencion = aut.get('totalRetencion')
self.TotalRetencionAfip = aut.get('totalRetencionAfip')
self.TotalOtrasRetenciones = aut.get('totalOtrasRetenciones')
self.TotalNetoAPagar = aut.get('totalNetoAPagar')
self.TotalIvaRg4310_18 = aut.get('totalIvaRg4310_18')
self.TotalPagoSegunCondicion = aut.get('totalPagoSegunCondicion')
self.COE = str(aut.get('coe', ''))
self.COEAjustado = aut.get('coeAjustado')
self.Estado = aut.get('estado', '')
self.NroContrato = aut.get('numeroContrato', '')
# actualizo parámetros de salida:
self.params_out['coe'] = self.COE
self.params_out['coe_ajustado'] = self.COEAjustado
self.params_out['estado'] = self.Estado
self.params_out['total_deduccion'] = self.TotalDeduccion
self.params_out['total_retencion'] = self.TotalRetencion
self.params_out['total_retencion_afip'] = self.TotalRetencionAfip
self.params_out['total_otras_retenciones'] = self.TotalOtrasRetenciones
self.params_out['total_neto_a_pagar'] = self.TotalNetoAPagar
self.params_out['total_iva_rg_4310_18'] = self.TotalIvaRg4310_18
self.params_out['total_pago_segun_condicion'] = self.TotalPagoSegunCondicion
# datos adicionales:
self.NroOrden = self.params_out['nro_orden'] = aut.get('nroOrden')
self.params_out['cod_tipo_ajuste'] = aut.get('codTipoAjuste')
fecha = aut.get('fechaLiquidacion')
if fecha:
fecha = str(fecha)
self.params_out['fecha_liquidacion'] = fecha
self.params_out['importe_iva'] = aut.get('importeIva')
self.params_out['nro_op_comercial'] = aut.get('nroOpComercial')
self.params_out['operacion_con_iva'] = aut.get('operacionConIva')
self.params_out['precio_operacion'] = aut.get('precioOperacion')
self.params_out['total_peso_neto'] = aut.get('totalPesoNeto')
self.params_out['subtotal'] = aut.get('subTotal')
# LSG (especificos):
self.params_out['total_deducciones'] = aut.get('totalDeducciones')
if 'todalPercepciones' in aut:
# error de tipeo en el WSDL de AFIP...
self.params_out['total_percepciones'] = aut.get('todalPercepciones')
else:
self.params_out['total_percepciones'] = aut.get('totalPercepciones')
# sub estructuras:
self.params_out['retenciones'] = []
self.params_out['deducciones'] = []
self.params_out['percepciones'] = []
for retret in aut.get("retenciones", []):
retret = retret['retencionReturn']
self.params_out['retenciones'].append({
'importe_retencion': retret['importeRetencion'],
'alicuota': retret['retencion'].get('alicuota'),
'base_calculo': retret['retencion'].get('baseCalculo'),
'codigo_concepto': retret['retencion'].get('codigoConcepto'),
'detalle_aclaratorio': (retret['retencion'].get('detalleAclaratorio') or "").replace("\n", ""),
'importe_certificado_retencion': retret['retencion'].get('importeCertificadoRetencion'),
'nro_certificado_retencion': retret['retencion'].get('nroCertificadoRetencion'),
'fecha_certificado_retencion': retret['retencion'].get('fechaCertificadoRetencion'),
})
for dedret in aut.get("deducciones", []):
dedret = dedret['deduccionReturn']
self.params_out['deducciones'].append({
'importe_deduccion': dedret['importeDeduccion'],
'importe_iva': dedret.get('importeIva'),
'alicuota': dedret['deduccion'].get('alicuotaIva'),
'base_calculo': dedret['deduccion'].get('baseCalculo'),
'codigo_concepto': dedret['deduccion'].get('codigoConcepto'),
'detalle_aclaratorio': dedret['deduccion'].get('detalleAclaratorio', "").replace("\n", ""),
'dias_almacenaje': dedret['deduccion'].get('diasAlmacenaje'),
'precio_pkg_diario': dedret['deduccion'].get('precioPKGdiario'),
'comision_gastos_adm': dedret['deduccion'].get('comisionGastosAdm'),
})
for perret in aut.get("percepciones", []):
perret = perret.get('percepcionReturn', perret)
self.params_out['percepciones'].append({
'importe_final': perret['percepcion']['importeFinal'],
'alicuota': perret['percepcion'].get('alicuota'),
'base_calculo': perret['percepcion'].get('baseCalculo'),
'descripcion': perret['percepcion'].get('descripcion', "").replace("\n", ""),
})
@inicializar_y_capturar_excepciones
def CrearAjusteBase(self,
pto_emision=1, nro_orden=None, # unificado, contrato, papel
coe_ajustado=None, # unificado
nro_contrato=None, # contrato
tipo_formulario=None, # papel
nro_formulario=None, # papel
actividad=None, # contrato / papel
cod_grano=None, # contrato / papel
cuit_vendedor=None, # contrato / papel
cuit_comprador=None, # contrato / papel
cuit_corredor=None, # contrato / papel
nro_ing_bruto_vendedor=None, # papel
nro_ing_bruto_comprador=None, # papel
nro_ing_bruto_corredor=None, # papel
tipo_operacion=None, # papel
precio_ref_tn=None, # contrato
cod_grado_ent=None, # contrato
val_grado_ent=None, # contrato
precio_flete_tn=None, # contrato
cod_puerto=None, # contrato
des_puerto_localidad=None, # contrato
cod_provincia=None, # unificado, contrato, papel
cod_localidad=None, # unificado, contrato, papel
comision_corredor=None, # papel
**kwargs
):
"Inicializa internamente los datos de una liquidación para ajustar"
# ajusto nombre de campos para compatibilidad hacia atrás (encabezado):
if 'cod_localidad_procedencia' in kwargs:
cod_localidad = kwargs['cod_localidad_procedencia']
if 'cod_provincia_procedencia' in kwargs:
cod_provincia = kwargs['cod_provincia_procedencia']
if 'nro_act_comprador' in kwargs:
actividad = kwargs['nro_act_comprador']
if 'cod_tipo_operacion' in kwargs:
tipo_operacion = kwargs['cod_tipo_operacion']
# limpio los campos especiales (segun validaciones de AFIP)
if val_grado_ent == 0:
val_grado_ent = None
# borrando datos si no corresponden
if cuit_corredor and int(cuit_corredor) == 0:
cuit_corredor = None
comision_corredor = None
nro_ing_bruto_corredor = None
if cod_puerto and int(cod_puerto) != 14:
des_puerto_localidad = None # validacion 1630
# limpio los campos opcionales para no enviarlos si no corresponde:
if cod_grado_ent == "":
cod_grado_ent = None
if val_grado_ent == 0:
val_grado_ent = None
# creo el diccionario con los campos generales del ajuste base:
self.ajuste = { 'ajusteBase': {
'ptoEmision': pto_emision,
'nroOrden': nro_orden,
'coeAjustado': coe_ajustado,
'nroContrato': nro_contrato,
'tipoFormulario': tipo_formulario,
'nroFormulario': nro_formulario,
'actividad': actividad,
'codGrano': cod_grano,
'cuitVendedor': cuit_vendedor,
'cuitComprador': cuit_comprador,
'cuitCorredor': cuit_corredor,
'nroIngBrutoVendedor': nro_ing_bruto_vendedor,
'nroIngBrutoComprador': nro_ing_bruto_comprador,
'nroIngBrutoCorredor': nro_ing_bruto_corredor,
'tipoOperacion': tipo_operacion,
'codPuerto': cod_puerto,
'desPuertoLocalidad': des_puerto_localidad,
'comisionCorredor': comision_corredor,
'precioRefTn': precio_ref_tn,
'codGradoEnt': cod_grado_ent,
'valGradoEnt': val_grado_ent,
'precioFleteTn': precio_flete_tn,
'codLocalidad': cod_localidad,
'codProv': cod_provincia,
'certificados': [],
}
}
# para compatibilidad con AgregarCertificado
self.liquidacion = self.ajuste['ajusteBase']
# inicializar temporales
self.__ajuste_base = None
self.__ajuste_debito = None
self.__ajuste_credito = None
return True
@inicializar_y_capturar_excepciones
def CrearAjusteCredito(self,
datos_adicionales=None, # unificado, contrato, papel
concepto_importe_iva_0=None, # unificado, contrato, papel
importe_ajustar_iva_0=None, # unificado, contrato, papel
concepto_importe_iva_105=None, # unificado, contrato, papel
importe_ajustar_iva_105=None, # unificado, contrato, papel
concepto_importe_iva_21=None, # unificado, contrato, papel
importe_ajustar_iva_21=None, # unificado, contrato, papel
diferencia_peso_neto=None, # unificado
diferencia_precio_operacion=None, # unificado
cod_grado=None, # unificado
val_grado=None, # unificado
factor=None, # unificado
diferencia_precio_flete_tn=None, # unificado
**kwargs
):
"Inicializa internamente los datos del crédito del ajuste"
self.ajuste['ajusteCredito'] = {
'diferenciaPesoNeto': diferencia_peso_neto,
'diferenciaPrecioOperacion': diferencia_precio_operacion,
'codGrado': cod_grado,
'valGrado': val_grado,
'factor': factor,
'diferenciaPrecioFleteTn': diferencia_precio_flete_tn,
'datosAdicionales': datos_adicionales,
'opcionales': None,
'conceptoImporteIva0': concepto_importe_iva_0,
'importeAjustarIva0': importe_ajustar_iva_0,
'conceptoImporteIva105': concepto_importe_iva_105,
'importeAjustarIva105': importe_ajustar_iva_105,
'conceptoImporteIva21': concepto_importe_iva_21,
'importeAjustarIva21': importe_ajustar_iva_21,
'deducciones': [],
'retenciones': [],
'percepciones': [],
'certificados': [],
}
# vinculación con AgregarOpcional:
self.opcionales = self.ajuste['ajusteCredito']['opcionales']
# vinculación con AgregarRetencion y AgregarDeduccion
self.deducciones = self.ajuste['ajusteCredito']['deducciones']
self.retenciones = self.ajuste['ajusteCredito']['retenciones']
# para LSG:
self.percepciones = self.ajuste['ajusteCredito']['percepciones']
# para compatibilidad con AgregarCertificado (WSLPGv1.17)
self.liquidacion = self.ajuste['ajusteCredito']
return True
@inicializar_y_capturar_excepciones
def CrearAjusteDebito(self,
datos_adicionales=None, # unificado, contrato, papel
concepto_importe_iva_0=None, # unificado, contrato, papel
importe_ajustar_iva_0=None, # unificado, contrato, papel
concepto_importe_iva_105=None, # unificado, contrato, papel
importe_ajustar_iva_105=None, # unificado, contrato, papel
concepto_importe_iva_21=None, # unificado, contrato, papel
importe_ajustar_iva_21=None, # unificado, contrato, papel
diferencia_peso_neto=None, # unificado
diferencia_precio_operacion=None, # unificado
cod_grado=None, # unificado
val_grado=None, # unificado
factor=None, # unificado
diferencia_precio_flete_tn=None, # unificado
**kwargs
):
"Inicializa internamente los datos del crédito del ajuste"
self.ajuste['ajusteDebito'] = {
'diferenciaPesoNeto': diferencia_peso_neto,
'diferenciaPrecioOperacion': diferencia_precio_operacion,
'codGrado': cod_grado,
'valGrado': val_grado,
'factor': factor,
'diferenciaPrecioFleteTn': diferencia_precio_flete_tn,
'datosAdicionales': datos_adicionales,
'opcionales': None,
'conceptoImporteIva0': concepto_importe_iva_0,
'importeAjustarIva0': importe_ajustar_iva_0,
'conceptoImporteIva105': concepto_importe_iva_105,
'importeAjustarIva105': importe_ajustar_iva_105,
'conceptoImporteIva21': concepto_importe_iva_21,
'importeAjustarIva21': importe_ajustar_iva_21,
'deducciones': [],
'retenciones': [],
'percepciones': [],
'certificados': [],
}
# vinculación con AgregarOpcional:
self.opcionales = self.ajuste['ajusteDebito']['opcionales']
# vinculación con AgregarRetencion y AgregarDeduccion
self.deducciones = self.ajuste['ajusteDebito']['deducciones']
self.retenciones = self.ajuste['ajusteDebito']['retenciones']
# para LSG:
self.percepciones = self.ajuste['ajusteDebito']['percepciones']
# para compatibilidad con AgregarCertificado (WSLPGv1.17)
self.liquidacion = self.ajuste['ajusteDebito']
return True
def AgregarFusion(self, nro_ing_brutos, nro_actividad, **kwargs):
"Datos de comprador o vendedor según liquidación a ajustar (fusión.)"
self.ajuste['ajusteBase']['fusion'] = {'nroIngBrutos': nro_ing_brutos,
'nroActividad': nro_actividad,
}
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionUnificado(self):
"Ajustar Liquidación Primaria de Granos"
# limpiar estructuras no utilizadas (si no hay deducciones / retenciones)
for k in ('ajusteDebito', 'ajusteCredito'):
if not any(self.ajuste[k].values()):
del self.ajuste[k]
else:
if not self.ajuste[k]['deducciones']:
del self.ajuste[k]['deducciones']
if not self.ajuste[k]['retenciones']:
del self.ajuste[k]['retenciones']
# llamar al webservice:
ret = self.client.liquidacionAjustarUnificado(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.ajuste
)
# analizar el resultado:
ret = ret['ajusteUnifReturn']
self.__analizar_errores(ret)
if 'ajusteUnificado' in ret:
aut = ret['ajusteUnificado']
self.AnalizarAjuste(aut)
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionUnificadoPapel(self):
"Ajustar Liquidación realizada en un formulario F1116 B / C (papel)"
# limpiar arrays no enviados:
if not self.ajuste['ajusteBase']['certificados']:
del self.ajuste['ajusteBase']['certificados']
for k1 in ('ajusteCredito', 'ajusteDebito'):
for k2 in ('retenciones', 'deducciones'):
if not self.ajuste[k1][k2]:
del self.ajuste[k1][k2]
ret = self.client.liquidacionAjustarUnificadoPapel(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.ajuste
)
ret = ret['ajustePapelReturn']
self.__analizar_errores(ret)
if 'ajustePapel' in ret:
aut = ret['ajustePapel']
self.AnalizarAjuste(aut)
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionContrato(self):
"Ajustar Liquidación activas relacionadas a un contrato"
# limpiar arrays no enviados:
if not self.ajuste['ajusteBase']['certificados']:
del self.ajuste['ajusteBase']['certificados']
for k1 in ('ajusteCredito', 'ajusteDebito'):
for k2 in ('retenciones', 'deducciones'):
if not self.ajuste[k1][k2]:
del self.ajuste[k1][k2]
ret = self.client.liquidacionAjustarContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.ajuste
)
ret = ret['ajusteContratoReturn']
self.__analizar_errores(ret)
if 'ajusteContrato' in ret:
aut = ret['ajusteContrato']
self.AnalizarAjuste(aut)
return True
@inicializar_y_capturar_excepciones
def AjustarLiquidacionSecundaria(self):
"Ajustar Liquidación Secundaria de Granos"
# limpiar estructuras no utilizadas (si no hay deducciones / retenciones)
for k in ('ajusteDebito', 'ajusteCredito'):
if k not in self.ajuste:
# ignorar si no se agrego estructura ajuste credito / debito
continue
elif not any(self.ajuste[k].values()):
# eliminar estructura vacia credito / debito
del self.ajuste[k]
else:
# ajustar cambios de nombre entre LSG y LPG
for tasa in ("0", "105", "21"):
tasa_lsg = "10" if tasa == "105" else tasa
self.ajuste[k]['importeAjustar%s' % tasa_lsg] = self.ajuste[k]['importeAjustarIva%s' % tasa]
self.ajuste[k]['conceptoIva%s' % tasa_lsg] = self.ajuste[k]['conceptoImporteIva%s' % tasa]
# no enviar tag percepciones vacio (no agrupar en subtipo)
if self.ajuste[k]['percepciones']:
self.ajuste[k]['percepcion'] = [
per["percepcion"] for per
in self.ajuste[k]['percepciones']]
del self.ajuste[k]['percepciones']
base = self.ajuste['ajusteBase']
base['coe'] = base['coeAjustado']
base['codProvincia'] = base['codProv']
# llamar al webservice:
if base['nroContrato'] is not None and long(base['nroContrato']):
metodo = self.client.lsgAjustarXContrato
else:
metodo = self.client.lsgAjustarXCoe
ret = metodo(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ajusteCredito=self.ajuste.get('ajusteCredito'),
ajusteDebito=self.ajuste.get('ajusteDebito'),
**base
)
# analizar el resultado:
ret = ret['oReturn']
self.__analizar_errores(ret)
if ret:
self.AnalizarAjuste(ret)
return True
def AnalizarAjuste(self, aut, base=True):
"Método interno para analizar la respuesta de AFIP (ajustes)"
self.__ajuste_base = None
self.__ajuste_debito = None
self.__ajuste_credito = None
# para compatibilidad con la generacion de PDF (completo datos)
if hasattr(self, "liquidacion") and self.liquidacion and base:
self.AnalizarLiquidacion(aut=None, liq=self.liquidacion)
self.params_out['errores'] = self.errores
# proceso la respuesta de autorizar, ajustar (y consultar):
if aut:
# en caso de anulación o no ser ajuste, ahora no devuelve datos:
self.COE = str(aut.get('coe', ""))
self.COEAjustado = aut.get('coeAjustado')
self.NroContrato = aut.get('nroContrato')
self.Estado = aut.get('estado', "")
totunif = aut.get("totalesUnificados") or {}
self.Subtotal = totunif.get('subTotalGeneral')
self.TotalIva105 = totunif.get('iva105')
self.TotalIva21 = totunif.get('iva21')
self.TotalRetencionesGanancias = totunif.get('retencionesGanancias')
self.TotalRetencionesIVA = totunif.get('retencionesIVA')
self.TotalOtrasRetenciones = totunif.get('importeOtrasRetenciones')
self.TotalNetoAPagar = totunif.get('importeNeto')
self.TotalIvaRg4310_18 = totunif.get('ivaRG4310_18')
self.TotalPagoSegunCondicion = totunif.get('pagoSCondicion')
# actualizo parámetros de salida:
self.params_out['coe'] = self.COE
self.params_out['coe_ajustado'] = self.COEAjustado
self.params_out['estado'] = self.Estado
self.params_out['nro_orden'] = aut.get('nroOrden')
self.params_out['cod_tipo_operacion'] = aut.get('codTipoOperacion')
self.params_out['nro_contrato'] = aut.get('nroContrato')
self.params_out['nro_op_comercial'] = aut.get('nroOpComercial', "")
# actualizo totales solo para ajuste base (liquidacion general)
if base:
self.params_out['subtotal'] = self.Subtotal
self.params_out['iva_deducciones'] = totunif.get('ivaDeducciones')
self.params_out['subtotal_deb_cred'] = totunif.get('subTotalDebCred')
self.params_out['total_base_deducciones'] = totunif.get('totalBaseDeducciones')
self.params_out['total_iva_10_5'] = self.TotalIva105
self.params_out['total_iva_21'] = self.TotalIva21
self.params_out['total_retenciones_ganancias'] = self.TotalRetencionesGanancias
self.params_out['total_retenciones_iva'] = self.TotalRetencionesIVA
self.params_out['total_otras_retenciones'] = self.TotalOtrasRetenciones
self.params_out['total_neto_a_pagar'] = self.TotalNetoAPagar
self.params_out['total_iva_rg_4310_18'] = self.TotalIvaRg4310_18
self.params_out['total_pago_segun_condicion'] = self.TotalPagoSegunCondicion
# almaceno los datos de ajustes crédito y débito para usarlos luego
self.__ajuste_base = aut
self.__ajuste_debito = aut.get('ajusteDebito') or {}
self.__ajuste_credito = aut.get('ajusteCredito') or {}
return True
@inicializar_y_capturar_excepciones
def AnalizarAjusteDebito(self):
"Método para analizar la respuesta de AFIP para Ajuste Debito"
# para compatibilidad con la generacion de PDF (completo datos)
liq = {}
if hasattr(self, "liquidacion") and self.liquidacion:
liq.update(self.liquidacion)
if hasattr(self, "ajuste") and 'ajusteDebito' in self.ajuste:
liq.update(self.ajuste['ajusteDebito'])
if self.__ajuste_debito:
liq.update(self.__ajuste_debito)
self.AnalizarLiquidacion(aut=self.__ajuste_debito, liq=liq, ajuste=True)
self.AnalizarAjuste(self.__ajuste_base, base=False) # datos generales
return True
@inicializar_y_capturar_excepciones
def AnalizarAjusteCredito(self):
"Método para analizar la respuesta de AFIP para Ajuste Credito"
liq = {}
if hasattr(self, "liquidacion") and self.liquidacion:
liq.update(self.liquidacion)
if hasattr(self, "ajuste") and 'ajusteCredito' in self.ajuste:
liq.update(self.ajuste['ajusteCredito'])
if self.__ajuste_credito:
liq.update(self.__ajuste_credito)
self.AnalizarLiquidacion(aut=self.__ajuste_credito, liq=liq, ajuste=True)
self.AnalizarAjuste(self.__ajuste_base, base=False) # datos generales
return True
@inicializar_y_capturar_excepciones
def CrearCertificacionCabecera(self, pto_emision=1, nro_orden=None,
tipo_certificado=None, nro_planta=None,
nro_ing_bruto_depositario=None, titular_grano=None,
cuit_depositante=None, nro_ing_bruto_depositante=None,
cuit_corredor=None, cod_grano=None, campania=None,
datos_adicionales=None,
**kwargs):
"Inicializa los datos de una certificación de granos (cabecera)"
self.certificacion = {}
self.certificacion['cabecera'] = dict(
ptoEmision=pto_emision,
nroOrden=nro_orden,
tipoCertificado=tipo_certificado,
nroPlanta=nro_planta or None, # opcional
nroIngBrutoDepositario=nro_ing_bruto_depositario,
titularGrano=titular_grano,
cuitDepositante=cuit_depositante or None, # opcional
nroIngBrutoDepositante=nro_ing_bruto_depositante or None, # opcional
cuitCorredor=cuit_corredor or None, # opcional
codGrano=cod_grano,
campania=campania,
datosAdicionales=datos_adicionales, # opcional
)
# limpio las estructuras internas no utilizables en este caso
self.liquidacion = None
return True
@inicializar_y_capturar_excepciones
def AgregarCertificacionPrimaria(self,
nro_act_depositario=None,
descripcion_tipo_grano=None,
monto_almacenaje=None, monto_acarreo=None,
monto_gastos_generales=None, monto_zarandeo=None,
porcentaje_secado_de=None, porcentaje_secado_a=None,
monto_secado=None, monto_por_cada_punto_exceso=None,
monto_otros=None,
porcentaje_merma_volatil=None, peso_neto_merma_volatil=None,
porcentaje_merma_secado=None, peso_neto_merma_secado=None,
porcentaje_merma_zarandeo=None, peso_neto_merma_zarandeo=None,
peso_neto_certificado=None, servicios_secado=None,
servicios_zarandeo=None, servicios_otros=None,
servicios_forma_de_pago=None,
**kwargs):
# compatibilidad hacia atras: utilizar nuevos campos mas amplio
v = None
if 'servicio_otros' in kwargs:
v = kwargs.get('servicio_otros')
if isinstance(v, basestring) and v and not v.isalpha():
v = float(v)
if v:
servicios_otros = v
if not v:
warnings.warn("Usar servicio_otros para mayor cantidad de digitos")
self.certificacion['primaria'] = dict(
nroActDepositario=nro_act_depositario,
ctg=[], # <!--0 or more repetitions:-->
descripcionTipoGrano=descripcion_tipo_grano,
montoAlmacenaje=monto_almacenaje,
montoAcarreo=monto_acarreo,
montoGastosGenerales=monto_gastos_generales,
montoZarandeo=monto_zarandeo,
porcentajeSecadoDe=porcentaje_secado_de,
porcentajeSecadoA=porcentaje_secado_a,
montoSecado=monto_secado,
montoPorCadaPuntoExceso=monto_por_cada_punto_exceso,
montoOtros=monto_otros,
porcentajeMermaVolatil=porcentaje_merma_volatil,
pesoNetoMermaVolatil=peso_neto_merma_volatil,
porcentajeMermaSecado=porcentaje_merma_secado,
pesoNetoMermaSecado=peso_neto_merma_secado,
porcentajeMermaZarandeo=porcentaje_merma_zarandeo,
pesoNetoMermaZarandeo=peso_neto_merma_zarandeo,
pesoNetoCertificado=peso_neto_certificado,
serviciosSecado=servicios_secado or None, # opcional
serviciosZarandeo=servicios_zarandeo or None,
serviciosOtros=servicios_otros or None,
serviciosFormaDePago=servicios_forma_de_pago or None,
)
# si se pasan campos no documentados por AFIP, intentar enviarlo:
for k, kk in {
'servicios_conceptos_no_gravados': 'serviciosConceptosNoGravados',
'servicios_percepciones_iva': 'serviciosPercepcionesIva',
'servicios_otras_percepciones': 'serviciosOtrasPercepciones',
}.items():
v = kwargs.get(k)
# cuidado: si AFIP retira el campo, puede fallar si se pasa en 0
if isinstance(v, basestring) and v and not v.isalpha():
v = float(v)
if v:
self.certificacion['primaria'][kk] = v
return True
@inicializar_y_capturar_excepciones
def AgregarCertificacionRetiroTransferencia(self,
nro_act_depositario=None,
cuit_receptor=None,
fecha=None,
nro_carta_porte_a_utilizar=None,
cee_carta_porte_a_utilizar=None,
**kwargs):
self.certificacion['retiroTransferencia'] = dict(
nroActDepositario=nro_act_depositario,
cuitReceptor=cuit_receptor or None, # opcional
fecha=fecha,
nroCartaPorteAUtilizar=nro_carta_porte_a_utilizar or None,
ceeCartaPorteAUtilizar=cee_carta_porte_a_utilizar or None,
certificadoDeposito=[], # <!--0 or more repetitions:-->
)
return True
@inicializar_y_capturar_excepciones
def AgregarCertificacionPreexistente(self,
tipo_certificado_deposito_preexistente=None,
nro_certificado_deposito_preexistente=None,
cac_certificado_deposito_preexistente=None,
fecha_emision_certificado_deposito_preexistente=None,
peso_neto=None, nro_planta=None,
**kwargs):
self.certificacion['preexistente'] = dict(
tipoCertificadoDepositoPreexistente=tipo_certificado_deposito_preexistente,
nroCertificadoDepositoPreexistente=nro_certificado_deposito_preexistente,
cacCertificadoDepositoPreexistente=cac_certificado_deposito_preexistente,
fechaEmisionCertificadoDepositoPreexistente=fecha_emision_certificado_deposito_preexistente,
pesoNeto=peso_neto, nroPlanta=nro_planta,
)
return True
@inicializar_y_capturar_excepciones
def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None,
cod_grado=None, valor_grado=None,
valor_contenido_proteico=None, valor_factor=None,
**kwargs):
"Agrega la información sobre la calidad, al autorizar o posteriormente"
self.certificacion['primaria']['calidad'] = dict(
analisisMuestra=analisis_muestra,
nroBoletin=nro_boletin,
codGrado=cod_grado, # G1 G2 G3 F1 F2 F3
valorGrado=valor_grado or None, # opcional
valorContProteico=valor_contenido_proteico,
valorFactor=valor_factor,
detalleMuestraAnalisis=[], # <!--1 or more repetitions:-->
)
return True
@inicializar_y_capturar_excepciones
def AgregarDetalleMuestraAnalisis(self, descripcion_rubro=None,
tipo_rubro=None, porcentaje=None,
valor=None,
**kwargs):
"Agrega la información referente al detalle de la certificación"
det = dict(
descripcionRubro=descripcion_rubro,
tipoRubro=tipo_rubro,
porcentaje=porcentaje,
valor=valor,
)
self.certificacion['primaria']['calidad']['detalleMuestraAnalisis'].append(det)
return True
@inicializar_y_capturar_excepciones
def BuscarCTG(self, tipo_certificado="P", cuit_depositante=None,
nro_planta=None, cod_grano=2, campania=1314,
nro_ctg=None, tipo_ctg=None, nro_carta_porte=None,
fecha_confirmacion_ctg_des=None,
fecha_confirmacion_ctg_has=None,
):
"Devuelve los CTG/Carta de porte que se puede incluir en un certificado"
ret = self.client.cgBuscarCtg(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
tipoCertificado=tipo_certificado,
cuitDepositante=cuit_depositante or self.Cuit,
nroPlanta=nro_planta,
codGrano=cod_grano, campania=campania,
nroCtg=nro_ctg, tipoCtg=tipo_ctg,
nroCartaPorte=nro_carta_porte,
fechaConfirmacionCtgDes=fecha_confirmacion_ctg_des,
fechaConfirmacionCtgHas=fecha_confirmacion_ctg_has,
)['oReturn']
self.__analizar_errores(ret)
array = ret.get('ctg', [])
self.Excepcion = self.Traceback = ""
self.params_out['ctgs'] = []
for ctg in array:
self.params_out['ctgs'].append({
'campania': ctg.get('campania'),
'nro_planta': ctg.get('nroPlanta'),
'nro_ctg': ctg.get('nroCtg'),
'tipo_ctg': ctg.get('tipoCtg'),
'nro_carta_porte': ctg.get('nroCartaPorte'),
'kilos_confirmados': ctg.get('kilosConfirmados'),
'fecha_confirmacion_ctg': ctg.get('fechaConfirmacionCtg'),
'cod_grano': ctg.get('codGrano'),
'cuit_remitente_comercial': ctg.get('cuitRemitenteComercial'),
'cuit_liquida': ctg.get('cuitLiquida'),
'cuit_certifica': ctg.get('cuitCertifica'),
})
return True
@inicializar_y_capturar_excepciones
def AgregarCTG(self, nro_ctg=None, nro_carta_porte=None,
porcentaje_secado_humedad=None, importe_secado=None,
peso_neto_merma_secado=None, tarifa_secado=None,
importe_zarandeo=None, peso_neto_merma_zarandeo=None,
tarifa_zarandeo=None,
peso_neto_confirmado_definitivo=None,
**kwargs):
"Agrega la información referente a una CTG de la certificación"
ctg = dict(
nroCTG=nro_ctg,
nroCartaDePorte=nro_carta_porte,
pesoNetoConfirmadoDefinitivo=peso_neto_confirmado_definitivo,
porcentajeSecadoHumedad=porcentaje_secado_humedad,
importeSecado=importe_secado,
pesoNetoMermaSecado=peso_neto_merma_secado,
tarifaSecado=tarifa_secado,
importeZarandeo=importe_zarandeo,
pesoNetoMermaZarandeo=peso_neto_merma_zarandeo,
tarifaZarandeo=tarifa_zarandeo,
)
self.certificacion['primaria']['ctg'].append(ctg)
return True
@inicializar_y_capturar_excepciones
def BuscarCertConSaldoDisponible(self, cuit_depositante=None,
cod_grano=2, campania=1314, coe=None,
fecha_emision_des=None,
fecha_emision_has=None,
):
"""Devuelve los certificados de depósito en los que un productor tiene
saldo disponible para Liquidar/Retirar/Transferir"""
ret = self.client.cgBuscarCertConSaldoDisponible(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
cuitDepositante=cuit_depositante or self.Cuit,
codGrano=cod_grano, campania=campania,
coe=coe,
fechaEmisionDes=fecha_emision_des,
fechaEmisionHas=fecha_emision_has,
)['oReturn']
self.__analizar_errores(ret)
array = ret.get('certificado', [])
self.Excepcion = self.Traceback = ""
self.params_out['certificados'] = []
for cert in array:
self.params_out['certificados'].append(dict(
coe=cert['coe'],
tipo_certificado=cert['tipoCertificado'],
campania=cert['campania'],
cuit_depositante=cert['cuitDepositante'],
cuit_depositario=cert['cuitDepositario'],
nro_planta=cert['nroPlanta'],
kilos_disponibles=cert['kilosDisponibles'],
cod_grano=cert['codGrano'],
))
return True
@inicializar_y_capturar_excepciones
def AutorizarCertificacion(self):
"Autoriza una Certificación Primaria de Depósito de Granos (C1116A/RT)"
# limpio los elementos que no correspondan por estar vacios:
for k1 in ('primaria', 'retiroTransferencia'):
dic = self.certificacion.get(k1)
if not dic: continue
for k2 in ('ctg', 'detalleMuestraAnalisis', 'certificadoDeposito'):
if k2 in dic and not dic[k2]:
del dic[k2]
# llamo al webservice:
ret = self.client.cgAutorizar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
**self.certificacion
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarAutorizarCertificadoResp(ret)
return True
def AnalizarAutorizarCertificadoResp(self, ret):
"Metodo interno para extraer datos de la Respuesta de Certificación"
aut = ret.get('autorizacion')
if aut:
self.PtoEmision = aut['ptoEmision']
self.NroOrden = aut['nroOrden']
self.FechaCertificacion = str(aut.get('fechaCertificacion', ""))
self.COE = str(aut['coe'])
self.Estado = aut['estado']
# actualizo parámetros de salida:
self.params_out['coe'] = self.COE
self.params_out['estado'] = self.Estado
self.params_out['nro_orden'] = self.NroOrden
self.params_out['fecha_certificacion'] = self.FechaCertificacion.replace("-", "")
if "planta" in aut:
p = aut.get("planta")
self.params_out['nro_planta'] = p.get("nroPlanta")
self.params_out['cuit_titular_planta'] = p.get("cuitTitularPlanta")
self.params_out['razon_social_titular_planta'] = p.get("razonSocialTitularPlanta")
# otros campos devueltos (opcionales)
p = aut.get('pesosResumen', {})
self.params_out['peso_bruto_certificado'] = p.get("pesoBrutoCertificado")
self.params_out['peso_merma_secado'] = p.get("pesoMermaSecado")
self.params_out['peso_merma_volatil'] = p.get("pesoMermaVolatil")
self.params_out['peso_merma_zarandeo'] = p.get("pesoMermaZarandeo")
self.params_out['peso_neto_certificado'] = p.get("pesoNetoCertificado")
p = aut.get('serviciosResumen', {})
self.params_out['importe_iva'] = p.get("importeIVA")
self.params_out['servicio_gastos_generales'] = p.get("servicioGastosGenerales")
self.params_out['servicio_otros'] = p.get("servicioOtros")
self.params_out['servicio_total'] = p.get("servicioTotal")
self.params_out['servicio_zarandeo'] = p.get("servicioZarandeo")
# datos devueltos según el tipo de certificacion (consultas):
cab = ret.get('cabecera')
if cab:
self.params_out['pto_emision'] = cab.get('ptoEmision')
self.params_out['nro_orden'] = cab.get('nroOrden')
self.params_out['tipo_certificado'] = cab.get('tipoCertificado')
self.params_out['nro_planta'] = cab.get('nroPlanta')
self.params_out['nro_ing_bruto_depositario'] = cab.get('nroIngBrutoDepositario')
self.params_out['titular_grano'] = cab.get('titularGrano')
self.params_out['cuit_depositante'] = cab.get('cuitDepositante')
self.params_out['nro_ing_bruto_depositante'] = cab.get('nroIngBrutoDepositante')
self.params_out['cuit_corredor'] = cab.get('cuitCorredor')
self.params_out['cod_grano'] = cab.get('codGrano')
self.params_out['campania'] = cab.get('campania')
self.params_out['datos_adicionales'] = cab.get('datosAdicionales')
pri = ret.get('primaria')
if pri:
self.params_out['nro_act_depositario'] = pri.get('nroActDepositario')
self.params_out['descripcion_tipo_grano'] = pri.get('descripcionTipoGrano')
self.params_out['monto_almacenaje'] = pri.get('montoAlmacenaje')
self.params_out['monto_acarreo'] = pri.get('montoAcarreo')
self.params_out['monto_gastos_generales'] = pri.get('montoGastosGenerales')
self.params_out['monto_zarandeo'] = pri.get('montoZarandeo')
self.params_out['porcentaje_secado_de'] = pri.get('porcentajeSecadoDe')
self.params_out['porcentaje_secado_a'] = pri.get('porcentajeSecadoA')
self.params_out['monto_secado'] = pri.get('montoSecado')
self.params_out['monto_por_cada_punto_exceso'] = pri.get('montoPorCadaPuntoExceso')
self.params_out['monto_otros'] = pri.get('montoOtros')
self.params_out['porcentaje_merma_volatil'] = pri.get('porcentajeMermaVolatil')
self.params_out['porcentaje_merma_secado'] = pri.get('porcentajeMermaSecado')
self.params_out['peso_neto_merma_secado'] = pri.get('pesoNetoMermaSecado')
self.params_out['porcentaje_merma_zarandeo'] = pri.get('pesoNetoMermaZarandeo')
self.params_out['peso_neto_certificado'] = pri.get('pesoNetoCertificado')
self.params_out['servicios_secado'] = pri.get('serviciosSecado')
self.params_out['servicios_zarandeo'] = pri.get('serviciosZarandeo')
self.params_out['servicios_otros'] = pri.get('serviciosOtros')
self.params_out['servicios_forma_de_pago'] = pri.get('serviciosFormaDePago')
# otros campos no documentados:
self.params_out['servicios_conceptos_no_gravados'] = pri.get("serviciosConceptosNoGravados")
self.params_out['servicios_percepciones_iva'] = pri.get("serviciosPercepcionesIVA")
self.params_out['servicios_otras_percepciones'] = pri.get("serviciosOtrasPercepciones")
# sub estructuras:
self.params_out['ctgs'] = []
self.params_out['det_muestra_analisis'] = []
for ctg in pri.get("ctg", []):
self.params_out['ctgs'].append({
'nro_ctg': ctg.get('nroCTG'),
'nro_carta_porte': ctg.get('nroCartaDePorte'),
'peso_neto_confirmado_definitivo': ctg.get('pesoNetoConfirmadoDefinitivo'),
'porcentaje_secado_humedad': ctg.get('porcentajeSecadoHumedad'),
'importe_secado': ctg.get('importeSecado'),
'peso_neto_merma_secado': ctg.get('pesoNetoMermaSecado'),
'importe_zarandeo': ctg.get('importeZarandeo'),
'peso_neto_merma_zarandeo': ctg.get('pesoNetoMermaZarandeo'),
'tarifa_zarandeo': ctg.get('tarifaZarandeo'),
})
self.params_out['calidad'] = []
for cal in [pri.get("calidad", {})]:
self.params_out['calidad'].append({
'analisis_muestra': cal.get('analisisMuestra'),
'nro_boletin': cal.get('nroBoletin'),
'nro_act_depositario': cal.get('nroActDepositario'),
'cod_grado': cal.get('codGrado'),
'valor_grado': cal.get('valorGrado'),
'valor_contenido_proteico': cal.get('valorContProteico'),
'valor_factor': cal.get('valorFactor')
})
for det in cal.get("detalleMuestraAnalisis", []):
self.params_out['det_muestra_analisis'].append({
'descripcion_rubro': det.get('descripcionRubro'),
'tipo_rubro': det.get('tipoRubro'),
'porcentaje': det.get('porcentaje'),
'valor': det.get('valor'),
})
rt = ret.get('retiroTransferencia')
if rt:
self.params_out['nro_act_depositario'] = rt.get('nroActDepositario')
self.params_out['cuit_receptor'] = rt.get('cuitReceptor')
self.params_out['nro_carta_porte_a_utilizar'] = rt.get('nroCartaPorteAUtilizar')
# sub estructuras:
self.params_out['certificados'] = []
cert = rt.get("certificadoDeposito")
if cert:
self.params_out['certificados'].append({
'coe_certificado_deposito': cert.get('coeCertificadoDeposito'),
'peso_neto': cert.get('pesoNeto'),
})
pre = ret.get('preexistente')
if pre:
self.params_out['nro_planta'] = pre.get('nroPlanta')
self.params_out['tipo_certificado_deposito_preexistente'] = pre.get('tipoCertificadoDepositoPreexistente')
self.params_out['nro_certificado_deposito_preexistente'] = pre.get('nroCertificadoDepositoPreexistente')
self.params_out['cac_certificado_deposito_preexistente'] = pre.get('cacCertificadoDepositoPreexistente')
self.params_out['fecha_emision_certificado_deposito_preexistente'] = pre.get('fechaEmisionCertificadoDepositoPreexistente')
self.params_out['peso_neto'] = pre.get('pesoNeto')
self.params_out['errores'] = self.errores
@inicializar_y_capturar_excepciones
def InformarCalidadCertificacion(self, coe):
"Informar calidad de un certificado (C1116A/RT)"
# llamo al webservice:
ret = self.client.cgInformarCalidad(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
calidad=self.certificacion['primaria']['calidad'],
)
# analizo la respusta
ret = ret['oReturn']
self.__analizar_errores(ret)
self.AnalizarAutorizarCertificadoResp(ret)
return True
@inicializar_y_capturar_excepciones
def AnularCertificacion(self, coe):
"Anular liquidación activa"
ret = self.client.cgSolicitarAnulacion(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
self.Estado = ret.get('estadoCertificado', "")
return self.COE
@inicializar_y_capturar_excepciones
def AsociarLiquidacionAContrato(self, coe=None, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Asociar una Liquidación a un contrato"
ret = self.client.asociarLiquidacionAContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['liquidacion']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
# analizo la respusta
liq = ret['liquidacion']
aut = ret['autorizacion']
self.AnalizarLiquidacion(aut, liq)
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionesPorContrato(self, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Obtener los COE de liquidaciones relacionadas a un contrato"
ret = self.client.liquidacionPorContratoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['liqPorContratoCons']
self.__analizar_errores(ret)
if 'coeRelacionados' in ret:
# analizo la respuesta = [{'coe': "...."}]
self.DatosLiquidacion = sorted(ret['coeRelacionados'])
# establezco el primer COE
self.LeerDatosLiquidacion()
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacion(self, pto_emision=None, nro_orden=None, coe=None,
pdf=None):
"Consulta una liquidación por No de orden"
if coe:
ret = self.client.liquidacionXCoeConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
else:
ret = self.client.liquidacionXNroOrdenConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
)
ret = ret['liqConsReturn']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
aut = ret['autorizacion']
liq = ret['liquidacion']
self.AnalizarLiquidacion(aut, liq)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionSecundaria(self, pto_emision=None, nro_orden=None,
coe=None, pdf=None):
"Consulta una liquidación sequndaria por No de orden o coe"
if coe:
ret = self.client.lsgConsultarXCoe(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
else:
ret = self.client.lsgConsultarXNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
for it in ret['liquidaciones']:
aut = it['autorizacion']
if 'liquidacion' in it:
liq = it['liquidacion']
elif 'ajuste' in it:
liq = it['ajuste']
self.AnalizarLiquidacion(aut, liq)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionesSecundariasPorContrato(self, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Obtener los COE de liquidaciones relacionadas a un contrato"
ret = self.client.lsgConsultarXContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['liqPorContratoCons']
self.__analizar_errores(ret)
if 'coeRelacionados' in ret:
# analizo la respuesta = [{'coe': "...."}]
self.DatosLiquidacion = sorted(ret['coeRelacionados'])
# establezco el primer COE
self.LeerDatosLiquidacion()
return True
@inicializar_y_capturar_excepciones
def AsociarLiquidacionSecundariaAContrato(self, coe=None, nro_contrato=None,
cuit_comprador=None,
cuit_vendedor=None,
cuit_corredor=None,
cod_grano=None,
**kwargs):
"Asociar una Liquidación a un contrato"
ret = self.client.lsgAsociarAContrato(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
nroContrato=nro_contrato,
cuitComprador=cuit_comprador,
cuitVendedor=cuit_vendedor,
cuitCorredor=cuit_corredor,
codGrano=cod_grano,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
if 'liquidacion' in ret:
# analizo la respusta
liq = ret['liquidacion']
aut = ret['autorizacion']
self.AnalizarLiquidacion(aut, liq)
return True
@inicializar_y_capturar_excepciones
def ConsultarCertificacion(self, pto_emision=None, nro_orden=None,
coe=None, pdf=None):
"Consulta una certificacion por No de orden o COE"
if coe:
ret = self.client.cgConsultarXCoe(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
else:
ret = self.client.cgConsultarXNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
)
ret = ret['oReturn']
self.__analizar_errores(ret)
if 'autorizacion' in ret:
self.AnalizarAutorizarCertificadoResp(ret)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarAjuste(self, pto_emision=None, nro_orden=None, nro_contrato=None,
coe=None, pdf=None):
"Consulta un ajuste de liquidación por No de orden o numero de contrato"
if nro_contrato:
ret = self.client.ajustePorContratoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroContrato=nro_contrato,
)
ret = ret['ajusteContratoReturn']
elif coe is None or pdf is None:
ret = self.client.ajusteXNroOrdenConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
nroOrden=nro_orden,
pdf='S' if pdf else 'N',
)
ret = ret['ajusteXNroOrdenConsReturn']
else:
ret = self.client.ajusteXCoeConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
pdf='S' if pdf else 'N',
)
ret = ret['ajusteConsReturn']
self.__analizar_errores(ret)
if 'ajusteUnificado' in ret:
aut = ret['ajusteUnificado']
self.AnalizarAjuste(aut)
# guardo el PDF si se indico archivo y vino en la respuesta:
if pdf and 'pdf' in ret:
open(pdf, "wb").write(ret['pdf'])
return True
@inicializar_y_capturar_excepciones
def ConsultarUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado"
ret = self.client.liquidacionUltimoNroOrdenConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
@inicializar_y_capturar_excepciones
def ConsultarLiquidacionSecundariaUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado para LSG"
ret = self.client.lsgConsultarUltimoNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
@inicializar_y_capturar_excepciones
def ConsultarCertificacionUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado para CG"
ret = self.client.cgConsultarUltimoNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
@inicializar_y_capturar_excepciones
def LeerDatosLiquidacion(self, pop=True):
"Recorro los datos devueltos y devuelvo el primero si existe"
if self.DatosLiquidacion:
# extraigo el primer item
if pop:
datos_liq = self.DatosLiquidacion.pop(0)
else:
datos_liq = self.DatosLiquidacion[0]
self.COE = str(datos_liq['coe'])
self.Estado = unicode(datos_liq.get('estado', ""))
return self.COE
else:
return ""
@inicializar_y_capturar_excepciones
def AnularLiquidacion(self, coe):
"Anular liquidación activa"
ret = self.client.liquidacionAnular(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['anulacionReturn']
self.__analizar_errores(ret)
self.Resultado = ret['resultado']
return self.COE
@inicializar_y_capturar_excepciones
def AnularLiquidacionSecundaria(self, coe):
"Anular liquidación secundaria activa"
ret = self.client.lsgAnular(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
coe=coe,
)
ret = ret['anulacionReturn']
self.__analizar_errores(ret)
self.Resultado = ret['resultado']
return self.COE
def ConsultarCampanias(self, sep="||"):
ret = self.client.campaniasConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['campaniaReturn']
self.__analizar_errores(ret)
array = ret.get('campanias', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoGrano(self, sep="||"):
ret = self.client.tipoGranoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoGranoReturn']
self.__analizar_errores(ret)
array = ret.get('granos', [])
if sep is None:
return dict([(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarCodigoGradoReferencia(self, sep="||"):
"Consulta de Grados según Grano."
ret = self.client.codigoGradoReferenciaConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['gradoRefReturn']
self.__analizar_errores(ret)
array = ret.get('gradosRef', [])
if sep is None:
return dict([(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarGradoEntregadoXTipoGrano(self, cod_grano, sep="||"):
"Consulta de Grado y Valor según Grano Entregado."
ret = self.client.codigoGradoEntregadoXTipoGranoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
codGrano=cod_grano,
)['gradoEntReturn']
self.__analizar_errores(ret)
array = ret.get('gradoEnt', [])
if sep is None:
return dict([(it['gradoEnt']['codigoDescripcion']['codigo'],
it['gradoEnt']['valor'])
for it in array])
else:
return [("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) %
(it['gradoEnt']['codigoDescripcion']['codigo'],
it['gradoEnt']['codigoDescripcion']['descripcion'],
it['gradoEnt']['valor'],
)
for it in array]
def ConsultarTipoCertificadoDeposito(self, sep="||"):
"Consulta de tipos de Certificados de Depósito"
ret = self.client.tipoCertificadoDepositoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoCertDepReturn']
self.__analizar_errores(ret)
array = ret.get('tiposCertDep', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoDeduccion(self, sep="||"):
"Consulta de tipos de Deducciones"
ret = self.client.tipoDeduccionConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoDeduccionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposDeduccion', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoRetencion(self, sep="||"):
"Consulta de tipos de Retenciones."
ret = self.client.tipoRetencionConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoRetencionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposRetencion', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarPuerto(self, sep="||"):
"Consulta de Puertos habilitados"
ret = self.client.puertoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['puertoReturn']
self.__analizar_errores(ret)
array = ret.get('puertos', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoActividad(self, sep="||"):
"Consulta de Tipos de Actividad."
ret = self.client.tipoActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
array = ret.get('tiposActividad', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarTipoActividadRepresentado(self, sep="||"):
"Consulta de Tipos de Actividad inscripta en el RUOCA."
try:
ret = self.client.tipoActividadRepresentadoConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
array = ret.get('tiposActividad', [])
self.Excepcion = self.Traceback = ""
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
except Exception:
ex = utils.exception_info()
self.Excepcion = ex['msg']
self.Traceback = ex['tb']
if sep:
return ["ERROR"]
def ConsultarProvincias(self, sep="||"):
"Consulta las provincias habilitadas"
ret = self.client.provinciasConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['provinciasReturn']
self.__analizar_errores(ret)
array = ret.get('provincias', [])
if sep is None:
return dict([(int(it['codigoDescripcion']['codigo']),
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def ConsultarLocalidadesPorProvincia(self, codigo_provincia, sep="||"):
ret = self.client.localidadXProvinciaConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
codProvincia=codigo_provincia,
)['localidadesReturn']
self.__analizar_errores(ret)
array = ret.get('localidades', [])
if sep is None:
return dict([(str(it['codigoDescripcion']['codigo']),
it['codigoDescripcion']['descripcion'])
for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array]
def BuscarLocalidades(self, cod_prov, cod_localidad=None, consultar=True):
"Devuelve la localidad o la consulta en AFIP (uso interno)"
# si no se especifíca cod_localidad, es util para reconstruir la cache
import wslpg_datos as datos
if not str(cod_localidad) in datos.LOCALIDADES and consultar:
d = self.ConsultarLocalidadesPorProvincia(cod_prov, sep=None)
try:
# actualizar el diccionario persistente (shelve)
datos.LOCALIDADES.update(d)
except Exception, e:
print "EXCEPCION CAPTURADA", e
# capturo errores por permisos (o por concurrencia)
datos.LOCALIDADES = d
return datos.LOCALIDADES.get(str(cod_localidad), "")
def ConsultarTiposOperacion(self, sep="||"):
"Consulta tipo de Operación por Actividad."
ops = []
ret = self.client.tipoActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['tipoActividadReturn']
self.__analizar_errores(ret)
for it_act in ret.get('tiposActividad', []):
ret = self.client.tipoOperacionXActividadConsultar(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
nroActLiquida=it_act['codigoDescripcion']['codigo'],
)['tipoOperacionReturn']
self.__analizar_errores(ret)
array = ret.get('tiposOperacion', [])
if sep:
ops.extend([("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) %
(it_act['codigoDescripcion']['codigo'],
it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
else:
ops.extend([(it_act['codigoDescripcion']['codigo'],
it['codigoDescripcion']['codigo'],
it['codigoDescripcion']['descripcion'])
for it in array])
return ops
# Funciones para generar PDF:
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"):
"Cargo el formato de campos a generar desde una planilla CSV"
# si no encuentro archivo, lo busco en el directorio predeterminado:
if not os.path.exists(archivo):
archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo))
if DEBUG: print "abriendo archivo ", archivo
# inicializo la lista de los elementos:
self.elements = []
for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()):
if DEBUG: print "procesando linea ", lno, linea
args = []
for i,v in enumerate(linea.split(";")):
if not v.startswith("'"):
v = v.replace(",",".")
else:
v = v#.decode('latin1')
if v.strip()=='':
v = None
else:
v = eval(v.strip())
args.append(v)
# corrijo path relativo para las imágenes:
if args[1] == 'I':
if not os.path.exists(args[14]):
args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14]))
if DEBUG: print "NUEVO PATH:", args[14]
self.AgregarCampoPDF(*args)
self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
if HOMO:
self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
# cargo los elementos en la plantilla
self.template.load_elements(self.elements)
return True
def AgregarCampoPDF(self, nombre, tipo, x1, y1, x2, y2,
font="Arial", size=12,
bold=False, italic=False, underline=False,
foreground= 0x000000, background=0xFFFFFF,
align="L", text="", priority=0, **kwargs):
"Agrego un campo a la plantilla"
# convierto colores de string (en hexadecimal)
if isinstance(foreground, basestring): foreground = int(foreground, 16)
if isinstance(background, basestring): background = int(background, 16)
if isinstance(text, unicode): text = text.encode("latin1")
field = {
'name': nombre,
'type': tipo,
'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'font': font, 'size': size,
'bold': bold, 'italic': italic, 'underline': underline,
'foreground': foreground, 'background': background,
'align': align, 'text': text, 'priority': priority}
field.update(kwargs)
self.elements.append(field)
return True
def CrearPlantillaPDF(self, papel="A4", orientacion="portrait"):
"Iniciar la creación del archivo PDF"
# genero el renderizador con propiedades del PDF
t = Template(
format=papel, orientation=orientacion,
title="F 1116 B/C %s" % (self.NroOrden),
author="CUIT %s" % self.Cuit,
subject="COE %s" % self.params_out.get('coe'),
keywords="AFIP Liquidacion Electronica Primaria de Granos",
creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__,)
self.template = t
return True
def AgregarDatoPDF(self, campo, valor, pagina='T'):
"Agrego un dato a la factura (internamente)"
# corrijo path relativo para las imágenes (compatibilidad hacia atrás):
if campo == 'fondo' and valor.startswith(self.InstallDir):
if not os.path.exists(valor):
valor = os.path.join(self.InstallDir, "plantillas", os.path.basename(valor))
if DEBUG: print "NUEVO PATH:", valor
self.datos[campo] = valor
return True
def ProcesarPlantillaPDF(self, num_copias=1, lineas_max=24, qty_pos='izq',
clave=''):
"Generar el PDF según la factura creada y plantilla cargada"
try:
f = self.template
liq = self.params_out
# actualizo los campos según la clave (ajuste debitos / creditos)
if clave and clave in liq:
liq = liq.copy()
liq.update(liq[clave]) # unificar con AnalizarAjusteCredito/Debito
if HOMO:
self.AgregarDatoPDF("homo", u"HOMOLOGACIÓN")
copias = {1: 'Original', 2: 'Duplicado', 3: 'Triplicado',
4: 'Cuadruplicado', 5: 'Quintuplicado'}
# convierto el formato de intercambio para representar los valores:
fmt_encabezado = dict([(v[0], v[1:]) for v in ENCABEZADO])
fmt_deduccion = dict([(v[0], v[1:]) for v in DEDUCCION])
fmt_retencion = dict([(v[0], v[1:]) for v in RETENCION])
def formatear(campo, valor, formato):
"Convertir el valor a una cadena correctamente s/ formato ($ % ...)"
if campo in formato and v is not None:
fmt = formato[campo]
if fmt[1] == N:
if 'cuit' in campo:
c = str(valor)
if len(c) == 11:
valor = "%s-%s-%s" % (c[0:2], c[2:10], c[10:])
else:
valor = ""
elif 'peso' in campo:
valor = "%s Kg" % valor
elif valor is not None and valor != "":
valor = "%d" % int(valor)
else:
valor = ""
elif fmt[1] == I:
valor = ("%%0.%df" % fmt[2]) % valor
if 'alic' in campo or 'comision' in campo:
valor = valor + " %"
elif 'factor' in campo or 'cont' in campo or 'cant' in campo:
pass
else:
valor = "$ " + valor
elif 'fecha' in campo:
d = valor
if isinstance(d, (datetime.date, datetime.datetime)):
valor = d.strftime("%d/%m/%Y")
else:
valor = "%s/%s/%s" % (d[8:10], d[5:7], d[0:4])
return valor
def buscar_localidad_provincia(cod_prov, cod_localidad):
"obtener la descripción de la provincia/localidad (usar cache)"
cod_prov = int(cod_prov)
cod_localidad = str(cod_localidad)
provincia = datos.PROVINCIAS[cod_prov]
localidad = self.BuscarLocalidades(cod_prov, cod_localidad)
return localidad, provincia
# divido los datos adicionales (debe haber renglones 1 al 9):
if liq.get('datos_adicionales') and f.has_key('datos_adicionales1'):
d = liq.get('datos_adicionales')
for i, ds in enumerate(f.split_multicell(d, 'datos_adicionales1')):
liq['datos_adicionales%s' % (i + 1)] = ds
for copia in range(1, num_copias+1):
# completo campos y hojas
f.add_page()
f.set('copia', copias.get(copia, "Adicional %s" % copia))
f.set('anulado', {'AC': '', '': 'SIN ESTADO',
'AN': "ANULADO"}.get(liq['estado'], "ERROR"))
try:
cod_tipo_ajuste = int(liq["cod_tipo_ajuste"] or '0')
except:
cod_tipo_ajuste = None
f.set('tipo_ajuste', {3: u'Liquidación de Débito',
4: u'Liquidación de Crédito',
}.get(cod_tipo_ajuste, ''))
# limpio datos del corredor si no corresponden:
if liq.get('actua_corredor', 'N') == 'N':
if liq.get('cuit_corredor', None) == 0:
del liq['cuit_corredor']
# establezco campos según tabla encabezado:
for k,v in liq.items():
v = formatear(k, v, fmt_encabezado)
if isinstance(v, (basestring, int, long, float)):
f.set(k, v)
elif isinstance(v, decimal.Decimal):
f.set(k, str(v))
elif isinstance(v, datetime.datetime):
f.set(k, str(v))
import wslpg_datos as datos
campania = int(liq.get('campania_ppal') or 0)
f.set("campania_ppal", datos.CAMPANIAS.get(campania, campania))
f.set("tipo_operacion", datos.TIPOS_OP.get(int(liq.get('cod_tipo_operacion') or 0), ""))
f.set("actividad", datos.ACTIVIDADES.get(int(liq.get('nro_act_comprador') or 0), ""))
if 'cod_grano' in liq and liq['cod_grano']:
cod_grano = int(liq['cod_grano'])
else:
cod_grano = int(self.datos.get('cod_grano') or 0)
f.set("grano", datos.GRANOS.get(cod_grano, ""))
cod_puerto = int(liq.get('cod_puerto', self.datos.get('cod_puerto')) or 0)
if cod_puerto in datos.PUERTOS:
f.set("des_puerto_localidad", datos.PUERTOS[cod_puerto])
cod_grado_ref = liq.get('cod_grado_ref', self.datos.get('cod_grado_ref')) or ""
if cod_grado_ref in datos.GRADOS_REF:
f.set("des_grado_ref", datos.GRADOS_REF[cod_grado_ref])
else:
f.set("des_grado_ref", cod_grado_ref)
cod_grado_ent = liq.get('cod_grado_ent', self.datos.get('cod_grado_ent'))
if 'val_grado_ent' in liq and int(liq.get('val_grado_ent') or 0):
val_grado_ent = liq['val_grado_ent']
elif 'val_grado_ent' in self.datos:
val_grado_ent = self.datos.get('val_grado_ent')
elif cod_grano in datos.GRADO_ENT_VALOR:
valores = datos.GRADO_ENT_VALOR[cod_grano]
if cod_grado_ent in valores:
val_grado_ent = valores[cod_grado_ent]
else:
val_grado_ent = ""
else:
val_grado_ent = ""
f.set("valor_grado_ent", "%s %s" % (cod_grado_ent or "", val_grado_ent or ""))
f.set("cont_proteico", liq.get('cont_proteico', self.datos.get('cont_proteico', "")))
if liq.get('certificados'):
# uso la procedencia del certificado de depósito
cert = liq['certificados'][0]
localidad, provincia = buscar_localidad_provincia(
cert['cod_prov_procedencia'],
cert['cod_localidad_procedencia'])
elif liq.get('cod_prov_procedencia_sin_certificado'):
localidad, provincia = buscar_localidad_provincia(
liq['cod_prov_procedencia_sin_certificado'],
liq['cod_localidad_procedencia_sin_certificado'])
else:
localidad, provincia = "", ""
f.set("procedencia", "%s - %s" % (localidad, provincia))
# si no se especifíca, uso la procedencia para el lugar
if not self.datos.get('lugar_y_fecha'):
localidad, provincia = buscar_localidad_provincia(
liq['cod_prov_procedencia'],
liq['cod_localidad_procedencia'])
lugar = "%s - %s " % (localidad, provincia)
fecha = datetime.datetime.today().strftime("%d/%m/%Y")
f.set("lugar_y_fecha", "%s, %s" % (fecha, lugar))
if 'lugar_y_fecha' in self.datos:
del self.datos['lugar_y_fecha']
if HOMO:
homo = "(pruebas)"
else:
homo = ""
if int(liq['cod_tipo_operacion'] or 0) == 1:
f.set("comprador.L", "COMPRADOR:")
f.set("vendedor.L", "VENDEDOR:")
f.set("formulario", u"Form. Electrónico 1116 B %s" % homo)
else:
f.set("comprador.L", "MANDATARIO/CONSIGNATARIO:")
f.set("vendedor.L", "MANDANTE/COMITENTE:")
f.set("formulario", u"Form. Electrónico 1116 C %s" % homo)
if int(liq.get("coe_ajustado") or 0) or int(liq.get("nro_contrato") or 0):
f.set("formulario", u"Ajuste Unificado %s" % homo)
certs = []
for cert in liq.get('certificados', []):
certs.append(u"%s Nº %s" % (
datos.TIPO_CERT_DEP[int(cert['tipo_certificado_deposito'])],
cert['nro_certificado_deposito']))
f.set("certificados_deposito", ', '.join(certs))
for i, deduccion in enumerate(liq.get('deducciones', [])):
for k, v in deduccion.items():
v = formatear(k, v, fmt_deduccion)
f.set("deducciones_%s_%02d" % (k, i + 1), v)
for i, retencion in enumerate(liq.get('retenciones', [])):
for k, v in retencion.items():
v = formatear(k, v, fmt_retencion)
f.set("retenciones_%s_%02d" % (k, i + 1), v)
if retencion['importe_certificado_retencion']:
d = retencion['fecha_certificado_retencion']
f.set('retenciones_cert_retencion_%02d' % (i + 1),
"%s $ %0.2f %s" % (
retencion['nro_certificado_retencion'] or '',
retencion['importe_certificado_retencion'],
"%s/%s/%s" % (d[8:10], d[5:7], d[2:4]),
))
# cargo campos adicionales ([PDF] en .ini y AgregarDatoPDF)
for k,v in self.datos.items():
f.set(k, v)
# Ajustes:
if clave:
f.set('subtipo_ajuste', {'ajuste_debito': u'AJUSTE DÉBITO',
'ajuste_credito': u'AJUSTE CRÉDITO'}[clave])
if int(liq.get('coe_ajustado') or 0):
f.set("leyenda_coe_nro", "COE Ajustado:")
f.set("nro_contrato_o_coe_ajustado", liq['coe_ajustado'])
f.set("coe_relacionados.L", "")
f.set("coe_relacionados", "")
elif liq.get('nro_contrato'):
f.set("leyenda_coe_nro", "Contrato Ajustado:")
f.set("nro_contrato_o_coe_ajustado", liq['nro_contrato'])
##f.set("coe_relacionados", TODO)
return True
except Exception, e:
ex = utils.exception_info()
try:
f.set('anulado', "%(name)s:%(lineno)s" % ex)
except:
pass
self.Excepcion = ex['msg']
self.Traceback = ex['tb']
if DEBUG:
print self.Excepcion
print self.Traceback
return False
def GenerarPDF(self, archivo="", dest="F"):
"Generar archivo de salida en formato PDF"
try:
self.template.render(archivo, dest=dest)
return True
except Exception, e:
self.Excepcion = str(e)
return False
def MostrarPDF(self, archivo, imprimir=False):
try:
if sys.platform=="linux2":
os.system("evince ""%s""" % archivo)
else:
operation = imprimir and "print" or ""
os.startfile(archivo, operation)
return True
except Exception, e:
self.Excepcion = str(e)
return False
def escribir_archivo(dic, nombre_archivo, agrega=True):
archivo = open(nombre_archivo, agrega and "a" or "w")
if '--json' in sys.argv:
json.dump(dic, archivo, sort_keys=True, indent=4)
elif '--dbf' in sys.argv:
formatos = [('Encabezado', ENCABEZADO, [dic]),
('Certificacion', CERTIFICACION, [dic]),
('Certificado', CERTIFICADO, dic.get('certificados', [])),
('Retencion', RETENCION, dic.get('retenciones', [])),
('Deduccion', DEDUCCION, dic.get('deducciones', [])),
('Percepcion', PERCEPCION, dic.get('percepciones', [])),
('Opcional', OPCIONAL, dic.get('opcionales', [])),
('AjusteCredito', AJUSTE, dic.get('ajuste_credito', [])),
('AjusteDebito', AJUSTE, dic.get('ajuste_debito', [])),
('CTG', CTG, dic.get('ctgs', [])),
('DetMuestraAnalisis', DET_MUESTRA_ANALISIS, dic.get('det_muestra_analisis', [])),
('Calidad', CALIDAD, dic.get('calidad', [])),
('FacturaPapel', FACTURA_PAPEL, dic.get('factura_papel', [])),
('Fusion', FUSION, dic.get('fusion', [])),
('Dato', DATO, dic.get('datos', [])),
('Error', ERROR, dic.get('errores', [])),
]
guardar_dbf(formatos, agrega, conf_dbf)
else:
dic['tipo_reg'] = 0
archivo.write(escribir(dic, ENCABEZADO))
dic['tipo_reg'] = 7
archivo.write(escribir(dic, CERTIFICACION))
if 'certificados' in dic:
for it in dic['certificados']:
it['tipo_reg'] = 1
archivo.write(escribir(it, CERTIFICADO))
if 'retenciones' in dic:
for it in dic['retenciones']:
it['tipo_reg'] = 2
archivo.write(escribir(it, RETENCION))
if 'deducciones' in dic:
for it in dic['deducciones']:
it['tipo_reg'] = 3
archivo.write(escribir(it, DEDUCCION))
if 'percepciones' in dic:
for it in dic['percepciones']:
it['tipo_reg'] = 'P'
archivo.write(escribir(it, PERCEPCION))
if 'opcionales' in dic:
for it in dic['opcionales']:
it['tipo_reg'] = 'O'
archivo.write(escribir(it, OPCIONAL))
if 'ajuste_debito' in dic:
dic['ajuste_debito']['tipo_reg'] = 4
archivo.write(escribir(dic['ajuste_debito'], AJUSTE))
for it in dic['ajuste_debito'].get('retenciones', []):
it['tipo_reg'] = 2
archivo.write(escribir(it, RETENCION))
for it in dic['ajuste_debito'].get('deducciones', []):
it['tipo_reg'] = 3
archivo.write(escribir(it, DEDUCCION))
for it in dic['ajuste_debito'].get('percepciones', []):
it['tipo_reg'] = "P"
archivo.write(escribir(it, PERCEPCION))
for it in dic['ajuste_debito'].get('certificados', []):
it['tipo_reg'] = 1
archivo.write(escribir(it, CERTIFICADO))
if 'ajuste_credito' in dic:
dic['ajuste_credito']['tipo_reg'] = 5
archivo.write(escribir(dic['ajuste_credito'], AJUSTE))
for it in dic['ajuste_credito'].get('retenciones', []):
it['tipo_reg'] = 2
archivo.write(escribir(it, RETENCION))
for it in dic['ajuste_credito'].get('deducciones', []):
it['tipo_reg'] = 3
archivo.write(escribir(it, DEDUCCION))
for it in dic['ajuste_credito'].get('percepciones', []):
it['tipo_reg'] = "P"
archivo.write(escribir(it, PERCEPCION))
for it in dic['ajuste_credito'].get('certificados', []):
it['tipo_reg'] = 1
archivo.write(escribir(it, CERTIFICADO))
if 'ctgs' in dic:
for it in dic['ctgs']:
it['tipo_reg'] = 'C'
archivo.write(escribir(it, CTG))
if 'det_muestra_analisis' in dic:
for it in dic['det_muestra_analisis']:
it['tipo_reg'] = 'D'
archivo.write(escribir(it, DET_MUESTRA_ANALISIS))
if 'calidad' in dic:
for it in dic['calidad']:
it['tipo_reg'] = 'Q'
archivo.write(escribir(it, CALIDAD))
if 'factura_papel' in dic:
for it in dic['factura_papel']:
it['tipo_reg'] = 'F'
archivo.write(escribir(it, FACTURA_PAPEL))
if 'fusion' in dic:
for it in dic['fusion']:
it['tipo_reg'] = 'f'
archivo.write(escribir(it, FUSION))
if 'datos' in dic:
for it in dic['datos']:
it['tipo_reg'] = 9
archivo.write(escribir(it, DATO))
if 'errores' in dic:
for it in dic['errores']:
it['tipo_reg'] = 'R'
archivo.write(escribir(it, ERROR))
archivo.close()
def leer_archivo(nombre_archivo):
archivo = open(nombre_archivo, "r")
if '--json' in sys.argv:
dic = json.load(archivo)
elif '--dbf' in sys.argv:
dic = {'retenciones': [], 'deducciones': [], 'certificados': [],
'percepciones': [], 'opcionales': [], 'fusion': [],
'datos': [], 'ajuste_credito': [], 'ajuste_debito': [],
'ctgs': [], 'det_muestra_analisis': [], 'calidad': [],
}
formatos = [('Encabezado', ENCABEZADO, dic),
('Certificacion', CERTIFICACION, dic),
('Certificado', CERTIFICADO, dic['certificados']),
('Retencio', RETENCION, dic['retenciones']),
('Deduccion', DEDUCCION, dic['deducciones']),
('Percepcion', PERCEPCION, dic['percepciones']),
('Opcional', OPCIONAL, dic['opcionales']),
('AjusteCredito', AJUSTE, dic['ajuste_credito']),
('AjusteDebito', AJUSTE, dic['ajuste_debito']),
('CTG', CTG, dic.get('ctgs', [])),
('DetMuestraAnalisis', DET_MUESTRA_ANALISIS, dic.get('det_muestra_analisis', [])),
('Calidad', CALIDAD, dic.get('calidad', [])),
('FacturaPapel', FACTURA_PAPEL, dic.get('factura_papel', [])),
('Fusion', FUSION, dic.get('fusion', [])),
('Dato', DATO, dic['datos']),
]
leer_dbf(formatos, conf_dbf)
else:
dic = {'retenciones': [], 'deducciones': [], 'certificados': [],
'percepciones': [], 'opcionales': [],
'datos': [], 'ajuste_credito': {}, 'ajuste_debito': {},
'ctgs': [], 'det_muestra_analisis': [], 'calidad': [],
'factura_papel': [], 'fusion': [],
}
for linea in archivo:
if str(linea[0])=='0':
# encabezado base de las liquidaciones
d = leer(linea, ENCABEZADO)
if d['reservado1']:
print "ADVERTENCIA: USAR datos adicionales (nueva posición)"
d['datos_adicionales'] = d['reservado1']
dic.update(d)
# referenciar la liquidación para agregar ret. / ded.:
liq = dic
elif str(linea[0])=='1':
d = leer(linea, CERTIFICADO)
if d['reservado1']:
print "ADVERTENCIA: USAR tipo_certificado_deposito (nueva posición)"
d['tipo_certificado_deposito'] = d['reservado1']
liq['certificados'].append(d)
elif str(linea[0])=='2':
liq['retenciones'].append(leer(linea, RETENCION))
elif str(linea[0])=='3':
d = leer(linea, DEDUCCION)
# ajustes por cambios en afip (compatibilidad hacia atras):
if d['reservado1']:
print "ADVERTENCIA: USAR precio_pkg_diario!"
d['precio_pkg_diario'] = d['reservado1']
liq['deducciones'].append(d)
elif str(linea[0])=='P':
liq['percepciones'].append(leer(linea, PERCEPCION))
elif str(linea[0])=='O':
liq['opcionales'].append(leer(linea, OPCIONAL))
elif str(linea[0])=='4':
liq = leer(linea, AJUSTE)
liq.update({'retenciones': [], 'deducciones': [], 'percepciones': [], 'datos': [], 'certificados': []})
dic['ajuste_debito'] = liq
elif str(linea[0])=='5':
liq = leer(linea, AJUSTE)
liq.update({'retenciones': [], 'deducciones': [], 'percepciones': [], 'datos': [], 'certificados': []})
dic['ajuste_credito'] = liq
elif str(linea[0])=='7':
# actualizo con cabecera para certificaciones de granos:
d = leer(linea, CERTIFICACION)
dic.update(d)
elif str(linea[0])=='C':
dic['ctgs'].append(leer(linea, CTG))
elif str(linea[0])=='D':
dic['det_muestra_analisis'].append(leer(linea, DET_MUESTRA_ANALISIS))
elif str(linea[0])=='Q':
dic['calidad'].append(leer(linea, CALIDAD))
elif str(linea[0])=='F':
dic['factura_papel'].append(leer(linea, FACTURA_PAPEL))
elif str(linea[0])=='f':
dic['fusion'].append(leer(linea, FUSION))
elif str(linea[0])=='9':
dic['datos'].append(leer(linea, DATO))
else:
print "Tipo de registro incorrecto:", linea[0]
archivo.close()
if not 'nro_orden' in dic:
raise RuntimeError("Archivo de entrada invalido, revise campos y lineas en blanco")
if DEBUG:
import pprint; pprint.pprint(dic)
return dic
# busco el directorio de instalación (global para que no cambie si usan otra dll)
INSTALL_DIR = WSLPG.InstallDir = get_install_dir()
if __name__ == '__main__':
if '--ayuda' in sys.argv:
print LICENCIA
print AYUDA
sys.exit(0)
if '--formato' in sys.argv:
print "Formato:"
for msg, formato in [('Encabezado', ENCABEZADO),
('Certificado', CERTIFICADO),
('Retencion', RETENCION),
('Deduccion', DEDUCCION),
('Percepcion', PERCEPCION),
('Opcional', OPCIONAL),
('Ajuste', AJUSTE),
('Certificacion', CERTIFICACION),
('CTG', CTG),
('Det. Muestra Analisis', DET_MUESTRA_ANALISIS),
('Calidad', CALIDAD),
('Factura Papel', FACTURA_PAPEL),
('Fusion', FUSION),
('Evento', EVENTO), ('Error', ERROR),
('Dato', DATO)]:
comienzo = 1
print "=== %s ===" % msg
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = len(fmt)>3 and fmt[3] or (tipo=='I' and '2' or '')
print " * Campo: %-20s Posición: %3d Longitud: %4d Tipo: %s Decimales: %s" % (
clave, comienzo, longitud, tipo, dec)
comienzo += longitud
sys.exit(0)
if "--register" in sys.argv or "--unregister" in sys.argv:
import win32com.server.register
win32com.server.register.UseCommandLine(WSLPG)
sys.exit(0)
import csv
from ConfigParser import SafeConfigParser
from wsaa import WSAA
try:
if "--version" in sys.argv:
print "Versión: ", __version__
if len(sys.argv)>1 and sys.argv[1].endswith(".ini"):
CONFIG_FILE = sys.argv[1]
print "Usando configuracion:", CONFIG_FILE
config = SafeConfigParser()
config.read(CONFIG_FILE)
CERT = config.get('WSAA','CERT')
PRIVATEKEY = config.get('WSAA','PRIVATEKEY')
CUIT = config.get('WSLPG','CUIT')
ENTRADA = config.get('WSLPG','ENTRADA')
SALIDA = config.get('WSLPG','SALIDA')
if config.has_option('WSAA','URL') and not HOMO:
WSAA_URL = config.get('WSAA','URL')
else:
WSAA_URL = None #wsaa.WSAAURL
if config.has_option('WSLPG','URL') and not HOMO:
WSLPG_URL = config.get('WSLPG','URL')
else:
WSLPG_URL = WSDL
PROXY = config.has_option('WSAA', 'PROXY') and config.get('WSAA', 'PROXY') or None
CACERT = config.has_option('WSAA', 'CACERT') and config.get('WSAA', 'CACERT') or None
WRAPPER = config.has_option('WSAA', 'WRAPPER') and config.get('WSAA', 'WRAPPER') or None
if config.has_option('WSLPG', 'TIMEOUT'):
TIMEOUT = int(config.get('WSLPG', 'TIMEOUT'))
if config.has_section('DBF'):
conf_dbf = dict(config.items('DBF'))
if DEBUG: print "conf_dbf", conf_dbf
else:
conf_dbf = {}
DEBUG = '--debug' in sys.argv
XML = '--xml' in sys.argv
if DEBUG:
print "Usando Configuración:"
print "WSAA_URL:", WSAA_URL
print "WSLPG_URL:", WSLPG_URL
print "CACERT", CACERT
print "WRAPPER", WRAPPER
print "timeout:", TIMEOUT
# obteniendo el TA
from wsaa import WSAA
wsaa = WSAA()
ta = wsaa.Autenticar("wslpg", CERT, PRIVATEKEY, wsdl=WSAA_URL,
proxy=PROXY, wrapper=WRAPPER, cacert=CACERT)
if not ta:
sys.exit("Imposible autenticar con WSAA: %s" % wsaa.Excepcion)
# cliente soap del web service
wslpg = WSLPG()
wslpg.LanzarExcepciones = True
wslpg.Conectar(url=WSLPG_URL, proxy=PROXY, wrapper=WRAPPER, cacert=CACERT, timeout=TIMEOUT)
wslpg.SetTicketAcceso(ta)
wslpg.Cuit = CUIT
if '--dummy' in sys.argv:
ret = wslpg.Dummy()
print "AppServerStatus", wslpg.AppServerStatus
print "DbServerStatus", wslpg.DbServerStatus
print "AuthServerStatus", wslpg.AuthServerStatus
##sys.exit(0)
if '--autorizar' in sys.argv:
if '--prueba' in sys.argv:
pto_emision = 99
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=pto_emision,
nro_orden=0, # que lo calcule automáticamente
cuit_comprador='20400000000',
nro_act_comprador=40, nro_ing_bruto_comprador='20400000000',
cod_tipo_operacion=2 if "--consign" in sys.argv else 1,
es_liquidacion_propia='N', es_canje='N',
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=31,
cuit_vendedor=23000000019, nro_ing_bruto_vendedor=23000000019,
actua_corredor="S", liquida_corredor="S",
cuit_corredor=wslpg.Cuit, # uso Cuit representado
comision_corredor=1, nro_ing_bruto_corredor=wslpg.Cuit,
fecha_precio_operacion="2014-02-07",
precio_ref_tn=2000,
cod_grado_ref="G1",
cod_grado_ent="FG",
factor_ent=98, val_grado_ent=1.02,
precio_flete_tn=10,
cont_proteico=20,
alic_iva_operacion=10.5,
campania_ppal=1314,
cod_localidad_procedencia=5544,
cod_prov_procedencia=12,
nro_contrato=0,
datos_adicionales=("DATOS ADICIONALES 1234 " * 17) + ".",
##peso_neto_sin_certificado=2000,
precio_operacion=None, # para probar ajustar
total_peso_neto=1000, # para probar ajustar
certificados=[dict(
tipo_certificado_deposito=332, # cert. electronico
nro_certificado_deposito=332000000466,
peso_neto=1000,
cod_localidad_procedencia=3,
cod_prov_procedencia=1,
campania=1314,
fecha_cierre="2014-01-13",)],
retenciones=[dict(
codigo_concepto="RI",
detalle_aclaratorio="DETALLE DE IVA",
base_calculo=1000,
alicuota=10.5,
), dict(
codigo_concepto="RG",
detalle_aclaratorio="DETALLE DE GANANCIAS",
base_calculo=100,
alicuota=0,
), dict(
codigo_concepto="OG",
detalle_aclaratorio="OTRO GRAVAMEN",
base_calculo=1000,
alicuota=0,
nro_certificado_retencion=111111111111,
fecha_certificado_retencion="2013-05-01",
importe_certificado_retencion=105,
)],
deducciones=[dict(
codigo_concepto="OD",
detalle_aclaratorio="FLETE",
dias_almacenaje="0",
precio_pkg_diario=0.0,
comision_gastos_adm=0.0,
base_calculo=100.0,
alicuota=21.0,
),dict(
codigo_concepto="AL",
detalle_aclaratorio="ALMACENAJE",
dias_almacenaje="30",
precio_pkg_diario=0.0001,
comision_gastos_adm=0.0,
alicuota=21.0,
),],
percepciones=[{'detalle_aclaratoria': 'percepcion 1',
'base_calculo': 1000, 'alicuota_iva': 21}],
datos=[
dict(campo="nombre_comprador", valor="NOMBRE 1"),
dict(campo="domicilio1_comprador", valor="DOMICILIO 1"),
dict(campo="domicilio2_comprador", valor="DOMICILIO 1"),
dict(campo="localidad_comprador", valor="LOCALIDAD 1"),
dict(campo="iva_comprador", valor="R.I."),
dict(campo="nombre_vendedor", valor="NOMBRE 2"),
dict(campo="domicilio1_vendedor", valor="DOMICILIO 2"),
dict(campo="domicilio2_vendedor", valor="DOMICILIO 2"),
dict(campo="localidad_vendedor", valor="LOCALIDAD 2"),
dict(campo="iva_vendedor", valor="R.I."),
dict(campo="nombre_corredor", valor="NOMBRE 3"),
dict(campo="domicilio_corredor", valor="DOMICILIO 3"),
]
)
if "--sincorr" in sys.argv:
# ajusto datos para prueba sin corredor
dic.update(dict(
cuit_comprador=wslpg.Cuit,
nro_act_comprador=29, nro_ing_bruto_comprador=wslpg.Cuit,
actua_corredor="N", liquida_corredor="N",
cuit_corredor=0,
comision_corredor=0, nro_ing_bruto_corredor=0,))
dic['retenciones'][1]['alicuota'] = 15
del dic['datos'][-1]
del dic['datos'][-1]
if "--sincert" in sys.argv:
# ajusto datos para prueba sin certificado de deposito
dic['peso_neto_sin_certificado'] = 10000
dic['cod_prov_procedencia_sin_certificado'] = 1
dic['cod_localidad_procedencia_sin_certificado'] = 15124
dic['certificados'] = []
if "--singrado" in sys.argv:
# ajusto datos para prueba sin grado ni valor entregado
dic['cod_grado_ref'] = ""
dic['cod_grado_ent'] = ""
dic['val_grado_ent'] = 0
if "--consign" in sys.argv:
# agrego deducción por comisión de gastos administrativos
dic['deducciones'].append(dict(
codigo_concepto="CO",
detalle_aclaratorio="COMISION",
dias_almacenaje=None,
precio_pkg_diario=None,
comision_gastos_adm=1.0,
base_calculo=1000.00,
alicuota=21.0,
))
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
if int(dic['nro_orden']) == 0 and not '--testing' in sys.argv:
# consulto el último número de orden emitido:
ok = wslpg.ConsultarUltNroOrden(dic['pto_emision'])
if ok:
dic['nro_orden'] = wslpg.NroOrden + 1
# establezco los parametros (se pueden pasar directamente al metodo)
for k, v in sorted(dic.items()):
if DEBUG: print "%s = %s" % (k, v)
wslpg.SetParametro(k, v)
# cargo la liquidación:
wslpg.CrearLiquidacion()
for cert in dic.get('certificados', []):
wslpg.AgregarCertificado(**cert)
for ded in dic.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for ret in dic.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
for per in dic.get('percepciones', []):
wslpg.AgregarPercepcion(**per)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
# usar solo si no está operativo
if '--error' in sys.argv:
wslpg.LoadTestXML("wslpg_error.xml") # cargo error
else:
wslpg.LoadTestXML("wslpg_aut_test.xml") # cargo respuesta
print "Liquidacion: pto_emision=%s nro_orden=%s nro_act=%s tipo_op=%s" % (
wslpg.liquidacion['ptoEmision'],
wslpg.liquidacion['nroOrden'],
wslpg.liquidacion['nroActComprador'],
wslpg.liquidacion['codTipoOperacion'],
)
if not '--dummy' in sys.argv:
if '--recorrer' in sys.argv:
print "Consultando actividades y operaciones habilitadas..."
lista_act_op = wslpg.ConsultarTiposOperacion(sep=None)
# recorro las actividades habilitadas buscando la
for nro_act, cod_op, det in lista_act_op:
print "Probando nro_act=", nro_act, "cod_op=", cod_op,
wslpg.liquidacion['nroActComprador'] = nro_act
wslpg.liquidacion['codTipoOperacion'] = cod_op
ret = wslpg.AutorizarLiquidacion()
if wslpg.COE:
print
break # si obtuve COE salgo
else:
print wslpgPDF.Errores
else:
print "Autorizando..."
ret = wslpg.AutorizarLiquidacion()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "COEAjustado", wslpg.COEAjustado
print "TotalDeduccion", wslpg.TotalDeduccion
print "TotalRetencion", wslpg.TotalRetencion
print "TotalRetencionAfip", wslpg.TotalRetencionAfip
print "TotalOtrasRetenciones", wslpg.TotalOtrasRetenciones
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
if False and '--testing' in sys.argv:
assert wslpg.COE == "330100000357"
assert wslpg.COEAjustado == None
assert wslpg.Estado == "AC"
assert wslpg.TotalPagoSegunCondicion == 1968.00
assert wslpg.GetParametro("fecha_liquidacion") == "2013-02-07"
assert wslpg.GetParametro("retenciones", 1, "importe_retencion") == "157.60"
if DEBUG:
pprint.pprint(wslpg.params_out)
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--ajustar' in sys.argv:
print "Ajustando..."
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=55, nro_orden=0, coe_ajustado='330100025869',
cod_localidad_procedencia=5544, cod_prov_procedencia=12,
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=31, # no enviado a AFIP, pero usado para el PDF
certificados=[dict(
tipo_certificado_deposito=5,
nro_certificado_deposito=555501200729,
peso_neto=10000,
cod_localidad_procedencia=3,
cod_prov_procedencia=1,
campania=1213,
fecha_cierre='2013-01-13',
peso_neto_total_certificado=10000,
)],
fusion=[{'nro_ing_brutos': '20400000000', 'nro_actividad': 40}],
ajuste_credito=dict(
diferencia_peso_neto=1000, diferencia_precio_operacion=100,
cod_grado="G2", val_grado=1.0, factor=100,
diferencia_precio_flete_tn=10,
datos_adicionales='AJUSTE CRED UNIF',
concepto_importe_iva_0='Alicuota Cero',
importe_ajustar_Iva_0=900,
concepto_importe_iva_105='Alicuota Diez',
importe_ajustar_Iva_105=800,
concepto_importe_iva_21='Alicuota Veintiuno',
importe_ajustar_Iva_21=700,
deducciones=[dict(codigo_concepto="AL",
detalle_aclaratorio="Deduc Alm",
dias_almacenaje="1",
precio_pkg_diario=0.01,
comision_gastos_adm=1.0,
base_calculo=1000.0,
alicuota=10.5, )],
retenciones=[dict(codigo_concepto="RI",
detalle_aclaratorio="Ret IVA",
base_calculo=1000,
alicuota=10.5, )],
certificados=[{'peso_neto': 200,
'coe_certificado_deposito': '330100025869'}],
),
ajuste_debito=dict(
diferencia_peso_neto=500, diferencia_precio_operacion=100,
cod_grado="G2", val_grado=1.0, factor=100,
diferencia_precio_flete_tn=0.01,
datos_adicionales='AJUSTE DEB UNIF',
concepto_importe_iva_0='Alic 0',
importe_ajustar_Iva_0=250,
concepto_importe_iva_105='Alic 10.5',
importe_ajustar_Iva_105=200,
concepto_importe_iva_21='Alicuota 21',
importe_ajustar_Iva_21=50,
deducciones=[dict(codigo_concepto="AL",
detalle_aclaratorio="Deduc Alm",
dias_almacenaje="1",
precio_pkg_diario=0.01,
comision_gastos_adm=1.0,
base_calculo=500.0,
alicuota=10.5, )],
retenciones=[dict(codigo_concepto="RI",
detalle_aclaratorio="Ret IVA",
base_calculo=100,
alicuota=10.5, )],
certificados=[{'peso_neto': 300,
'coe_certificado_deposito': '330100025869'}],
),
datos=[
dict(campo="nombre_comprador", valor="NOMBRE 1"),
dict(campo="domicilio1_comprador", valor="DOMICILIO 1"),
dict(campo="domicilio2_comprador", valor="DOMICILIO 1"),
dict(campo="localidad_comprador", valor="LOCALIDAD 1"),
dict(campo="iva_comprador", valor="R.I."),
dict(campo="nombre_vendedor", valor="NOMBRE 2"),
dict(campo="domicilio1_vendedor", valor="DOMICILIO 2"),
dict(campo="domicilio2_vendedor", valor="DOMICILIO 2"),
dict(campo="localidad_vendedor", valor="LOCALIDAD 2"),
dict(campo="iva_vendedor", valor="R.I."),
dict(campo="nombre_corredor", valor="NOMBRE 3"),
dict(campo="domicilio_corredor", valor="DOMICILIO 3"),
# completo datos no contemplados en la respuesta por AFIP:
dict(campo="cod_grano", valor="31"),
dict(campo="cod_grado_ent", valor="G1"),
dict(campo="cod_grado_ref", valor="G1"),
dict(campo="factor_ent", valor="98"),
dict(campo="cod_puerto", valor=14),
dict(campo="cod_localidad_procedencia", valor=3),
dict(campo="cod_prov_procedencia", valor=1),
dict(campo="precio_ref_tn", valor="$ 1000,00"),
dict(campo="precio_flete_tn", valor="$ 100,00"),
dict(campo="des_grado_ref", valor="G1"),
dict(campo="alic_iva_operacion", valor=""),
]
)
if '--contrato' in sys.argv:
dic.update(
{'nro_act_comprador': 40,
'cod_grado_ent': 'G1',
'cod_grano': 31,
'cod_puerto': 14,
'cuit_comprador': 20400000000,
'cuit_corredor': 20267565393,
'cuit_vendedor': 23000000019,
'des_puerto_localidad': 'Desc Puerto',
'nro_contrato': 27,
'precio_flete_tn': 1000,
'precio_ref_tn': 1000,
'val_grado_ent': 1.01})
#del dic['ajuste_debito']['retenciones']
#del dic['ajuste_credito']['retenciones']
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
if int(dic['nro_orden']) == 0 and not '--testing' in sys.argv:
# consulto el último número de orden emitido:
ok = wslpg.ConsultarUltNroOrden(dic['pto_emision'])
if ok:
dic['nro_orden'] = wslpg.NroOrden + 1
if '--contrato' in sys.argv:
for k in ("nro_contrato", "nro_act_comprador", "cod_grano",
"cuit_vendedor", "cuit_comprador", "cuit_corredor",
"precio_ref_tn", "cod_grado_ent", "val_grado_ent",
"precio_flete_tn", "cod_puerto",
"des_puerto_localidad"):
v = dic.get(k)
if v:
wslpg.SetParametro(k, v)
wslpg.CrearAjusteBase(pto_emision=dic['pto_emision'],
nro_orden=dic['nro_orden'],
coe_ajustado=dic['coe_ajustado'],
cod_localidad=dic['cod_localidad_procedencia'],
cod_provincia=dic['cod_prov_procedencia'],
)
for cert in dic.get('certificados', []):
if cert:
wslpg.AgregarCertificado(**cert)
for fusion in dic.get('fusion', []):
wslpg.AgregarFusion(**fusion)
liq = dic['ajuste_credito']
wslpg.CrearAjusteCredito(**liq)
for ded in liq.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for ret in liq.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
for cert in liq.get('certificados', []):
if cert:
wslpg.AgregarCertificado(**cert)
liq = dic['ajuste_debito']
wslpg.CrearAjusteDebito(**liq)
for ded in liq.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for ret in liq.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
for cert in liq.get('certificados', []):
if cert:
wslpg.AgregarCertificado(**cert)
if '--testing' in sys.argv:
wslpg.LoadTestXML("tests/wslpg_ajuste_unificado.xml")
if '--contrato' in sys.argv:
ret = wslpg.AjustarLiquidacionContrato()
else:
ret = wslpg.AjustarLiquidacionUnificado()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "Subtotal", wslpg.Subtotal
print "TotalIva105", wslpg.TotalIva105
print "TotalIva21", wslpg.TotalIva21
print "TotalRetencionesGanancias", wslpg.TotalRetencionesGanancias
print "TotalRetencionesIVA", wslpg.TotalRetencionesIVA
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
ok = wslpg.AnalizarAjusteCredito()
dic['ajuste_credito'].update(wslpg.params_out)
ok = wslpg.AnalizarAjusteDebito()
dic['ajuste_debito'].update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(dic)
if '--asociar' in sys.argv:
print "Asociando...",
if '--prueba' in sys.argv:
# genero datos de ejemplo en el archivo para consultar:
dic = dict(coe="330100004664", nro_contrato=26, cod_grano=31,
cuit_comprador="20400000000",
cuit_vendedor="23000000019",
cuit_corredor="20267565393",
)
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
print ', '.join(sorted(["%s=%s" % (k, v) for k,v in dic.items()
if k in ("nro_contrato", "coe") or
k.startswith("cuit")]))
if not '--lsg' in sys.argv:
wslpg.AsociarLiquidacionAContrato(**dic)
else:
wslpg.AsociarLiquidacionSecundariaAContrato(**dic)
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "Estado", wslpg.Estado
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--anular' in sys.argv:
##print wslpg.client.help("anularLiquidacion")
try:
coe = sys.argv[sys.argv.index("--anular") + 1]
except IndexError:
coe = 330100000357
if '--lsg' in sys.argv:
print "Anulando COE LSG", coe
ret = wslpg.AnularLiquidacionSecundaria(coe)
if '--cg' in sys.argv:
print "Anulando COE CG", coe
ret = wslpg.AnularCertificacion(coe)
else:
print "Anulando COE", coe
ret = wslpg.AnularLiquidacion(coe)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "COE", wslpg.COE
print "Resultado", wslpg.Resultado
print "Errores:", wslpg.Errores
sys.exit(0)
if '--consultar' in sys.argv:
pto_emision = None
nro_orden = 0
coe = pdf = None
try:
pto_emision = sys.argv[sys.argv.index("--consultar") + 1]
nro_orden = sys.argv[sys.argv.index("--consultar") + 2]
coe = sys.argv[sys.argv.index("--consultar") + 3]
pdf = sys.argv[sys.argv.index("--consultar") + 4]
except IndexError:
pass
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
# usar solo si no está operativo
wslpg.LoadTestXML("wslpg_cons_test.xml") # cargo prueba
print "Consultando: pto_emision=%s nro_orden=%s coe=%s" % (pto_emision, nro_orden, coe)
if '--lsg' in sys.argv:
ret = wslpg.ConsultarLiquidacionSecundaria(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
elif '--cg' in sys.argv:
ret = wslpg.ConsultarCertificacion(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
elif '--cancelar-anticipo' in sys.argv:
ret = wslpg.CancelarAnticipo(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
else:
ret = wslpg.ConsultarLiquidacion(pto_emision=pto_emision, nro_orden=nro_orden, coe=coe, pdf=pdf)
print "COE", wslpg.COE
print "Estado", wslpg.Estado
print "Errores:", wslpg.Errores
# actualizo el archivo de salida con los datos devueltos
escribir_archivo(wslpg.params_out, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(wslpg.params_out)
if '--mostrar' in sys.argv and pdf:
wslpg.MostrarPDF(archivo=pdf,
imprimir='--imprimir' in sys.argv)
if '--consultar_ajuste' in sys.argv:
pto_emision = None
nro_orden = 0
nro_contrato = None
coe = pdf = None
try:
pto_emision = int(sys.argv[sys.argv.index("--consultar_ajuste") + 1])
nro_orden = int(sys.argv[sys.argv.index("--consultar_ajuste") + 2])
nro_contrato = int(sys.argv[sys.argv.index("--consultar_ajuste") + 3])
coe = sys.argv[sys.argv.index("--consultar_ajuste") + 4]
pdf = sys.argv[sys.argv.index("--consultar_ajuste") + 5]
except IndexError:
pass
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
# usar solo si no está operativo
wslpg.LoadTestXML("wslpg_cons_ajuste_test.xml") # cargo prueba
print "Consultando: pto_emision=%s nro_orden=%s nro_contrato=%s" % (
pto_emision, nro_orden, nro_contrato)
wslpg.ConsultarAjuste(pto_emision, nro_orden, nro_contrato, coe, pdf)
print "COE", wslpg.COE
print "Estado", wslpg.Estado
print "Errores:", wslpg.Errores
# actualizo el archivo de salida con los datos devueltos
dic = wslpg.params_out
ok = wslpg.AnalizarAjusteCredito()
dic['ajuste_credito'] = wslpg.params_out
ok = wslpg.AnalizarAjusteDebito()
dic['ajuste_debito'] = wslpg.params_out
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(dic)
if '--consultar_por_contrato' in sys.argv:
print "Consultando liquidaciones por contrato...",
if '--prueba' in sys.argv:
# genero datos de ejemplo en el archivo para consultar:
dic = dict(nro_contrato=26, cod_grano=31,
cuit_comprador="20400000000",
cuit_vendedor="23000000019",
cuit_corredor="20267565393",
)
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
print ', '.join(sorted(["%s=%s" % (k, v) for k,v in dic.items()
if k == "nro_contrato" or k.startswith("cuit")]))
if not '--lsg' in sys.argv:
wslpg.ConsultarLiquidacionesPorContrato(**dic)
else:
wslpg.ConsultarLiquidacionesSecundariasPorContrato(**dic)
print "Errores:", wslpg.Errores
while wslpg.COE:
print "COE", wslpg.COE
wslpg.LeerDatosLiquidacion()
##print "Estado", wslpg.Estado
# actualizo el archivo de salida con los datos devueltos
dic['coe'] = wslpg.COE
escribir_archivo(dic, SALIDA, agrega=True)
if '--ult' in sys.argv:
try:
pto_emision = int(sys.argv[sys.argv.index("--ult") + 1])
except IndexError, ValueError:
pto_emision = 1
print "Consultando ultimo nro_orden para pto_emision=%s" % pto_emision,
if '--lsg' in sys.argv:
print "LSG"
ret = wslpg.ConsultarLiquidacionSecundariaUltNroOrden(pto_emision)
elif '--cg' in sys.argv:
print "CG"
ret = wslpg.ConsultarCertificacionUltNroOrden(pto_emision)
else:
print "LPG"
ret = wslpg.ConsultarUltNroOrden(pto_emision)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Ultimo Nro de Orden", wslpg.NroOrden
print "Errores:", wslpg.Errores
sys.exit(0)
if '--autorizar-lsg' in sys.argv:
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=99,
nro_orden=1, nro_contrato=100001232,
cuit_comprador='20400000000',
nro_ing_bruto_comprador='123',
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=2, cantidad_tn=100,
cuit_vendedor="23000000019", nro_act_vendedor=29,
nro_ing_bruto_vendedor=123456,
actua_corredor="S", liquida_corredor="S",
cuit_corredor=wslpg.Cuit, # uso Cuit representado
nro_ing_bruto_corredor=wslpg.Cuit,
fecha_precio_operacion="2014-10-10",
precio_ref_tn=100, precio_operacion=150,
alic_iva_operacion=10.5, campania_ppal=1314,
cod_localidad_procedencia=197,
cod_prov_procedencia=10,
datos_adicionales="Prueba",
deducciones=[{'detalle_aclaratorio': 'deduccion 1',
'base_calculo': 100, 'alicuota_iva': 21}],
percepciones=[{'detalle_aclaratoria': 'percepcion 1',
'base_calculo': 1000, 'alicuota_iva': 21}],
opcionales=[{'codigo': 1,
'descripcion': 'previsto para info adic.'}],
factura_papel=[{'nro_cai': "1234", 'nro_factura_papel': 1,
'fecha_factura': "2015-01-01",
'tipo_comprobante': 1}],
)
escribir_archivo(dic, ENTRADA, agrega=('--agrega' in sys.argv))
dic = leer_archivo(ENTRADA)
# cargo la liquidación:
wslpg.CrearLiqSecundariaBase(**dic)
for ded in dic.get('deducciones', []):
wslpg.AgregarDeduccion(**ded)
for per in dic.get("percepciones", []):
wslpg.AgregarPercepcion(**per)
for opc in dic.get("opcionales", []):
wslpg.AgregarOpcional(**opc)
for fp in dic.get('factura_papel', []):
wslpg.AgregarFacturaPapel(**fp)
print "Liquidacion Secundaria: pto_emision=%s nro_orden=%s" % (
wslpg.liquidacion['ptoEmision'],
wslpg.liquidacion['nroOrden'],
)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
wslpg.LoadTestXML("wslpg_lsg_autorizar_resp.xml")
wslpg.AutorizarLiquidacionSecundaria()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print wslpg.GetParametro("cod_tipo_operacion")
print wslpg.GetParametro("fecha_liquidacion")
print wslpg.GetParametro("subtotal")
print wslpg.GetParametro("importe_iva")
print wslpg.GetParametro("operacion_con_iva")
print wslpg.GetParametro("total_peso_neto")
print wslpg.GetParametro("numero_contrato")
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--ajustar-lsg' in sys.argv:
print "Ajustando LSG..."
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=55, nro_orden=0, coe_ajustado='330100025869',
cod_localidad_procedencia=5544, cod_prov_procedencia=12,
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=2,
nro_contrato='1234' if '--contrato' in sys.argv else 0,
ajuste_credito=dict(
concepto_importe_iva_0='Alicuota Cero',
importe_ajustar_iva_0=900,
concepto_importe_iva_105='Alicuota Diez',
importe_ajustar_iva_105=800,
concepto_importe_iva_21='Alicuota Veintiuno',
importe_ajustar_iva_21=700,
percepciones=[{'detalle_aclaratoria': 'percepcion 1',
'base_calculo': 1000, 'alicuota_iva': 21}],
estado=None,
coe_ajustado=None,
datos_adicionales='AJUSTE CRED LSG',
),
ajuste_debito=dict(
concepto_importe_iva_0='Alic 0',
importe_ajustar_iva_0=250,
concepto_importe_iva_105='Alic 10.5',
importe_ajustar_iva_105=200,
concepto_importe_iva_21='Alicuota 21',
importe_ajustar_iva_21=50,
percepciones=[{'detalle_aclaratoria': 'percepcion 2',
'base_calculo': 1000, 'alicuota_iva': 21}],
datos_adicionales='AJUSTE DEB LSG',
),
)
if '--contrato' in sys.argv:
dic.update(
{'nro_contrato': 27,
'cuit_comprador': 20400000000,
'cuit_vendedor': 23000000019,
'cuit_corredor': 20267565393, #opcional
'cod_grano': 2,
})
escribir_archivo(dic, ENTRADA)
dic = leer_archivo(ENTRADA)
if int(dic['nro_orden']) == 0 and not '--testing' in sys.argv:
# consulto el último número de orden emitido:
ok = wslpg.ConsultarLiquidacionSecundariaUltNroOrden(dic['pto_emision'])
if ok:
dic['nro_orden'] = wslpg.NroOrden + 1
if '--contrato' in sys.argv:
for k in ("nro_contrato", "nro_act_comprador", "cod_grano",
"cuit_vendedor", "cuit_comprador", "cuit_corredor",
):
v = dic.get(k)
if v:
wslpg.SetParametro(k, v)
wslpg.CrearAjusteBase(pto_emision=dic['pto_emision'],
nro_orden=dic['nro_orden'],
coe_ajustado=dic['coe_ajustado'],
cod_localidad=dic['cod_localidad_procedencia'],
cod_provincia=dic['cod_prov_procedencia'],
)
if 'ajuste_credito' in dic:
liq = dic['ajuste_credito']
wslpg.CrearAjusteCredito(**liq)
for per in liq.get("percepciones", []):
wslpg.AgregarPercepcion(**per)
if 'ajuste_debito' in dic:
liq = dic['ajuste_debito']
wslpg.CrearAjusteDebito(**liq)
for per in liq.get("percepciones", []):
wslpg.AgregarPercepcion(**per)
if '--testing' in sys.argv:
wslpg.LoadTestXML("tests/wslpg_ajuste_secundaria.xml")
ret = wslpg.AjustarLiquidacionSecundaria()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print "Subtotal", wslpg.Subtotal
print "TotalIva105", wslpg.TotalIva105
print "TotalIva21", wslpg.TotalIva21
print "TotalRetencionesGanancias", wslpg.TotalRetencionesGanancias
print "TotalRetencionesIVA", wslpg.TotalRetencionesIVA
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
ok = wslpg.AnalizarAjusteCredito()
dic['ajuste_credito'].update(wslpg.params_out)
ok = wslpg.AnalizarAjusteDebito()
dic['ajuste_debito'].update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if DEBUG:
pprint.pprint(dic)
if '--autorizar-anticipo' in sys.argv:
if '--prueba' in sys.argv:
# genero una liquidación de ejemplo:
dic = dict(
pto_emision=33,
nro_orden=1,
cuit_comprador='20400000000',
nro_act_comprador='40',
nro_ing_bruto_comprador='123',
cod_tipo_operacion=2,
cod_puerto=14, des_puerto_localidad="DETALLE PUERTO",
cod_grano=1,
peso_neto_sin_certificado=100,
cuit_vendedor="30000000006",
nro_ing_bruto_vendedor=123456,
actua_corredor="S", liquida_corredor="S",
cuit_corredor=wslpg.Cuit, # uso Cuit representado
nro_ing_bruto_corredor=wslpg.Cuit,
comision_corredor="20.6",
fecha_precio_operacion="2015-10-10",
precio_ref_tn=567, ## precio_operacion=150,
alic_iva_operacion="10.5", campania_ppal=1415,
cod_localidad_procedencia=197,
cod_prov_procedencia=10,
datos_adicionales="Prueba",
retenciones=[dict(codigo_concepto="RI",
detalle_aclaratorio="Retenciones IVA",
base_calculo=100,
alicuota=10.5, ),
dict(codigo_concepto="RG",
detalle_aclaratorio="Retenciones GAN",
base_calculo=100,
alicuota=2, )],
)
escribir_archivo(dic, ENTRADA, agrega=('--agrega' in sys.argv))
dic = leer_archivo(ENTRADA)
# cargo la liquidación:
wslpg.CrearLiquidacion(**dic)
for ret in dic.get('retenciones', []):
wslpg.AgregarRetencion(**ret)
print "Liquidacion Primaria (Ant): pto_emision=%s nro_orden=%s" % (
wslpg.liquidacion['ptoEmision'],
wslpg.liquidacion['nroOrden'],
)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
wslpg.LoadTestXML("wslpg_autorizar_ant_resp.xml")
wslpg.AutorizarAnticipo()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print wslpg.GetParametro("cod_tipo_operacion")
print wslpg.GetParametro("fecha_liquidacion")
print "TootalDeduccion", wslpg.TotalDeduccion
print "TotalRetencion", wslpg.TotalRetencion
print "TotalRetencionAfip", wslpg.TotalRetencionAfip
print "TotalOtrasRetenciones", wslpg.TotalOtrasRetenciones
print "TotalNetoAPagar", wslpg.TotalNetoAPagar
print "TotalIvaRg4310_18", wslpg.TotalIvaRg4310_18
print "TotalPagoSegunCondicion", wslpg.TotalPagoSegunCondicion
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
if '--autorizar-cg' in sys.argv:
if '--prueba' in sys.argv:
# consulto ultimo numero de orden
pto_emision = 99
wslpg.ConsultarCertificacionUltNroOrden(pto_emision)
# genero una certificación de ejemplo a autorizar:
dic = dict(
pto_emision=pto_emision, nro_orden=wslpg.NroOrden + 1,
tipo_certificado="P", nro_planta="3091",
nro_ing_bruto_depositario="20267565393",
titular_grano="T",
cuit_depositante='20111111112',
nro_ing_bruto_depositante='123',
cuit_corredor=None if '--sincorr' in sys.argv else '20222222223',
cod_grano=2, campania=1314,
datos_adicionales="Prueba",)
# datos provisorios de prueba (segun tipo de certificación):
if '--primaria' in sys.argv:
dep = dict(
nro_act_depositario=29,
tipo_certificado="P",
descripcion_tipo_grano="SOJA",
monto_almacenaje=1, monto_acarreo=2,
monto_gastos_generales=3, monto_zarandeo=4,
porcentaje_secado_de=5, porcentaje_secado_a=4,
monto_secado=7, monto_por_cada_punto_exceso=8,
monto_otros=9,
porcentaje_merma_volatil=15, peso_neto_merma_volatil=16,
porcentaje_merma_secado=17, peso_neto_merma_secado=18,
porcentaje_merma_zarandeo=19, peso_neto_merma_zarandeo=20,
peso_neto_certificado=21, servicios_secado=22,
servicios_zarandeo=23, servicio_otros=240000,
servicios_forma_de_pago=25,
# campos no documentados por AFIP:
servicios_conceptos_no_gravados=26,
servicios_percepciones_iva=27,
servicios_otras_percepciones=0, # no enviar si es 0
)
dic.update(dep)
det = dict(descripcion_rubro="bonif",
tipo_rubro="B", porcentaje=1, valor=1)
dic['det_muestra_analisis'] = [det]
cal = dict(analisis_muestra=10, nro_boletin=11,
cod_grado="F1", valor_grado=1.02,
valor_contenido_proteico=1, valor_factor=1)
dic['calidad'] = [cal]
ctg = dict(nro_ctg="123456", nro_carta_porte=1000,
porcentaje_secado_humedad=1, importe_secado=2,
peso_neto_merma_secado=3, tarifa_secado=4,
importe_zarandeo=5, peso_neto_merma_zarandeo=6,
tarifa_zarandeo=7,
peso_neto_confirmado_definitivo=1)
dic['ctgs'] = [ctg, ctg]
if '--retiro-transf' in sys.argv:
rt = dict(
nro_act_depositario=29,
tipo_certificado="R",
cuit_receptor="20267565393",
fecha="2014-11-26",
nro_carta_porte_a_utilizar="530305323",
cee_carta_porte_a_utilizar="123456789012",
)
dic.update(rt)
cert = dict(
peso_neto=10000,
coe_certificado_deposito="332000000357",
)
dic['certificados'] = [cert]
if '--preexistente' in sys.argv:
pre = dict(
tipo_certificado="E",
tipo_certificado_deposito_preexistente=1, # "R" o "T"
nro_certificado_deposito_preexistente="530305327",
cac_certificado_deposito_preexistente="85113524869336",
fecha_emision_certificado_deposito_preexistente="2014-11-26",
peso_neto=10000, nro_planta=3091,
)
dic.update(pre)
escribir_archivo(dic, ENTRADA, agrega=('--agrega' in sys.argv))
dic = leer_archivo(ENTRADA)
# cargar los datos según el tipo de certificación:
wslpg.CrearCertificacionCabecera(**dic)
if dic["tipo_certificado"] in ('P'):
wslpg.AgregarCertificacionPrimaria(**dic)
for ctg in dic.get("ctgs", []):
wslpg.AgregarCTG(**ctg)
for cal in dic.get("calidad", []):
wslpg.AgregarCalidad(**cal)
for det in dic.get("det_muestra_analisis", []):
wslpg.AgregarDetalleMuestraAnalisis(**det)
if dic["tipo_certificado"] in ('R', 'T'):
wslpg.AgregarCertificacionRetiroTransferencia(**dic)
for cert in dic.get("certificados", []):
wslpg.AgregarCertificado(**cert)
if dic["tipo_certificado"] in ('E', ):
wslpg.AgregarCertificacionPreexistente(**dic)
print "Certificacion: pto_emision=%s nro_orden=%s tipo=%s" % (
wslpg.certificacion['cabecera']['ptoEmision'],
wslpg.certificacion['cabecera']['nroOrden'],
wslpg.certificacion['cabecera']['tipoCertificado'],
)
if '--testing' in sys.argv:
# mensaje de prueba (no realiza llamada remota),
wslpg.LoadTestXML("tests/wslpg_cert_autorizar_resp.xml")
wslpg.LoadTestXML("tests/xml/wslpg_cg_err_response.xml")
wslpg.AutorizarCertificacion()
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
print wslpg.GetParametro("fecha_certificacion")
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
# Informar calidad (solo CG primarias)
if '--informar-calidad' in sys.argv:
dic = leer_archivo(ENTRADA)
wslpg.CrearCertificacionCabecera(**dic)
wslpg.AgregarCertificacionPrimaria()
for cal in dic.get("calidad", []):
wslpg.AgregarCalidad(**cal)
for det in dic.get("det_muestra_analisis", []):
wslpg.AgregarDetalleMuestraAnalisis(**det)
# intento obtener el COE por linea de parametros o del archivo:
try:
coe = sys.argv[sys.argv.index("--informar-calidad") + 1]
except IndexError:
coe = dic['coe']
print "Informar Calidad: coe=%s " % (coe, )
wslpg.InformarCalidadCertificacion(coe)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
print "Errores:", wslpg.Errores
print "COE", wslpg.COE
# actualizo el archivo de salida con los datos devueltos
dic.update(wslpg.params_out)
escribir_archivo(dic, SALIDA, agrega=('--agrega' in sys.argv))
# consultar CTG a certificar en una CG:
if '--buscar-ctg' in sys.argv:
argv = dict([(i, e) for i, e
in enumerate(sys.argv[sys.argv.index("--buscar-ctg")+1:])
if not e.startswith("--")])
tipo_certificado = argv.get(0, "P") # P
cuit_depositante = argv.get(1) #
nro_planta = argv.get(2, 3091) or None # opcional si no es primaria
cod_grano = argv.get(3, 2)
campania = argv.get(4, 1314)
ret = wslpg.BuscarCTG(tipo_certificado, cuit_depositante,
nro_planta, cod_grano, campania)
pprint.pprint(wslpg.params_out)
if DEBUG:
print "NRO CTG", wslpg.GetParametro("ctgs", 0, "nro_ctg")
# consultar certificados con saldo disponible para liquidar/transferir:
if '--buscar-cert-con-saldo-disp' in sys.argv:
argv = dict([(i, e) for i, e
in enumerate(sys.argv[sys.argv.index("--buscar-cert-con-saldo-disp")+1:])
if not e.startswith("--")])
cuit_depositante = argv.get(0) # por defecto usa el CUIT .ini
cod_grano = argv.get(1, 2) #
campania = argv.get(2, 1314)
coe = argv.get(3)
fecha_emision_des = argv.get(4)
fecha_emision_has = argv.get(5)
if '--testing' in sys.argv:
wslpg.LoadTestXML("tests/xml/wslpg_resp_buscar_cert.xml") # cargo respuesta
ret = wslpg.BuscarCertConSaldoDisponible(cuit_depositante,
cod_grano, campania, coe,
fecha_emision_des, fecha_emision_has,
)
pprint.pprint(wslpg.params_out)
print wslpg.ErrMsg
if DEBUG:
print "1er COE", wslpg.GetParametro("certificados", 0, "coe")
# Recuperar parámetros:
if '--campanias' in sys.argv:
ret = wslpg.ConsultarCampanias()
print "\n".join(ret)
if '--tipograno' in sys.argv:
ret = wslpg.ConsultarTipoGrano()
print "\n".join(ret)
if '--gradoref' in sys.argv:
ret = wslpg.ConsultarCodigoGradoReferencia()
print "\n".join(ret)
if '--gradoent' in sys.argv:
##wslpg.LoadTestXML("wslpg_cod.xml") # cargo respuesta de ej
cod_grano = raw_input("Ingrese el código de grano: ")
ret = wslpg.ConsultarGradoEntregadoXTipoGrano(cod_grano=cod_grano)
print "\n".join(ret)
if '--datos' in sys.argv:
print "# Grados"
print wslpg.ConsultarCodigoGradoReferencia(sep=None)
print "# Datos de grado entregado por tipo de granos:"
for cod_grano in wslpg.ConsultarTipoGrano(sep=None):
grad_ent = wslpg.ConsultarGradoEntregadoXTipoGrano(cod_grano, sep=None)
print cod_grano, ":", grad_ent, ","
if '--shelve' in sys.argv:
print "# Construyendo BD de Localidades por Provincias"
import wslpg_datos as datos
for cod_prov, desc_prov in wslpg.ConsultarProvincias(sep=None).items():
print "Actualizando Provincia", cod_prov, desc_prov
d = wslpg.BuscarLocalidades(cod_prov)
if '--certdeposito' in sys.argv:
ret = wslpg.ConsultarTipoCertificadoDeposito()
print "\n".join(ret)
if '--deducciones' in sys.argv:
ret = wslpg.ConsultarTipoDeduccion()
print "\n".join(ret)
if '--retenciones' in sys.argv:
ret = wslpg.ConsultarTipoRetencion()
print "\n".join(ret)
if '--puertos' in sys.argv:
ret = wslpg.ConsultarPuerto()
print "\n".join(ret)
if '--actividades' in sys.argv:
ret = wslpg.ConsultarTipoActividad()
print "\n".join(ret)
if '--actividadesrep' in sys.argv:
ret = wslpg.ConsultarTipoActividadRepresentado()
print "\n".join(ret)
print "Errores:", wslpg.Errores
if '--operaciones' in sys.argv:
ret = wslpg.ConsultarTiposOperacion()
print "\n".join(ret)
if '--provincias' in sys.argv:
ret = wslpg.ConsultarProvincias()
print "\n".join(ret)
if '--localidades' in sys.argv:
cod_prov = raw_input("Ingrese el código de provincia:")
ret = wslpg.ConsultarLocalidadesPorProvincia(cod_prov)
print "\n".join(ret)
# Generación del PDF:
if '--pdf' in sys.argv:
# cargo los datos del archivo de salida:
liq = wslpg.params_out = leer_archivo(SALIDA)
conf_liq = dict(config.items('LIQUIDACION'))
conf_pdf = dict(config.items('PDF'))
# establezco formatos (cantidad de decimales) según configuración:
wslpg.FmtCantidad = conf_liq.get("fmt_cantidad", "0.2")
wslpg.FmtPrecio = conf_liq.get("fmt_precio", "0.2")
# determino el formato según el tipo de liquidación y datos
if '--ajuste' not in sys.argv:
# liquidación estándar
formatos = [('formato', '')]
copias = int(conf_liq.get("copias", 3))
else:
# ajustes (páginas distintas), revisar si hay debitos/creditos:
formatos = [('formato_ajuste_base', '')]
copias = 1
if liq['ajuste_debito']:
formatos.append(('formato_ajuste_debcred', 'ajuste_debito' ))
if liq['ajuste_credito']:
formatos.append(('formato_ajuste_debcred', 'ajuste_credito'))
wslpg.CrearPlantillaPDF(
papel=conf_liq.get("papel", "legal"),
orientacion=conf_liq.get("orientacion", "portrait"),
)
for num_formato, (formato, clave) in enumerate(formatos):
# cargo el formato CSV por defecto (liquidacion....csv)
wslpg.CargarFormatoPDF(conf_liq.get(formato))
# datos fijos (configuracion):
for k, v in conf_pdf.items():
wslpg.AgregarDatoPDF(k, v)
# datos adicionales (tipo de registro 9):
for dato in liq.get('datos', []):
wslpg.AgregarDatoPDF(dato['campo'], dato['valor'])
if DEBUG: print "DATO", dato['campo'], dato['valor']
wslpg.ProcesarPlantillaPDF(num_copias=copias,
lineas_max=int(conf_liq.get("lineas_max", 24)),
qty_pos=conf_liq.get("cant_pos") or 'izq',
clave=clave)
if wslpg.Excepcion:
print >> sys.stderr, "EXCEPCION:", wslpg.Excepcion
if DEBUG: print >> sys.stderr, wslpg.Traceback
salida = conf_liq.get("salida", "")
# genero el nombre de archivo según datos de factura
d = os.path.join(conf_liq.get('directorio', "."),
liq['fecha_liquidacion'].replace("-", "_"))
if not os.path.isdir(d):
if DEBUG: print "Creando directorio!", d
os.makedirs(d)
fs = conf_liq.get('archivo','pto_emision,nro_orden').split(",")
fn = u'_'.join([unicode(liq.get(ff,ff)) for ff in fs])
fn = fn.encode('ascii', 'replace').replace('?','_')
salida = os.path.join(d, "%s.pdf" % fn)
if num_formato == len(formatos) - 1:
dest = "F" # si es el último, escribir archivo
else:
dest = "" # sino, no escribir archivo todavía
wslpg.GenerarPDF(archivo=salida, dest=dest)
print "Generando PDF", salida, dest
if '--mostrar' in sys.argv:
wslpg.MostrarPDF(archivo=salida,
imprimir='--imprimir' in sys.argv)
print "hecho."
except SoapFault,e:
print >> sys.stderr, "Falla SOAP:", e.faultcode, e.faultstring.encode("ascii","ignore")
sys.exit(3)
except Exception, e:
try:
print >> sys.stderr, traceback.format_exception_only(sys.exc_type, sys.exc_value)[0]
except:
print >> sys.stderr, "Excepción no disponible:", type(e)
if DEBUG:
raise
sys.exit(5)
finally:
if XML:
open("wslpg_request.xml", "w").write(wslpg.client.xml_request)
open("wslpg_response.xml", "w").write(wslpg.client.xml_response)
|
gpl-3.0
| -587,401,228,965,672,400 | -852,360,217,534,037,600 | 46.503294 | 135 | 0.514621 | false |
sadaf2605/django
|
django/contrib/messages/api.py
|
71
|
3105
|
from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure(
'You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware'
)
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
return getattr(request, '_messages', [])
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
storage = getattr(request, '_messages', default_storage(request))
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
bsd-3-clause
| 887,087,186,006,687,500 | -8,285,468,695,395,383,000 | 29.145631 | 80 | 0.645733 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.