code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def get_settings(self):
host = '127.0.0.1'
port = 6379
db = 0
if self.connection_uri is not None:
re_connection_uri = r'redis://(?:([\w]+)@)?([\w\d\.]+):(\d+)(?:/(\d+))?'
match = re.match(re_connection_uri, self.connection_uri)
if match:
if match.group(2):
host = match.group(2)
if match.group(3):
port = int(match.group(3))
if match.group(4):
db = int(match.group(4))
return {
'host': host,
'port': port,
'db': db
} | This creates a dict with keyword arguments used to create the redis client.
It is used like ``redis.StrictClient(**self.get_settings())``. Thus, if the
settings string is not enough to generate the wanted setting you can override
this function.
:return: A dict with keyword arguments for the redis client constructor. |
def parse_environ(name, parse_class=ParseResult, **defaults):
return parse(os.environ[name], parse_class, **defaults) | same as parse() but you pass in an environment variable name that will be used
to fetch the dsn
:param name: string, the environment variable name that contains the dsn to parse
:param parse_class: ParseResult, the class that will be used to hold parsed values
:param **defaults: dict, any values you want to have defaults for if they aren't in the dsn
:returns: ParseResult() tuple |
def parse_environs(name, parse_class=ParseResult, **defaults):
ret = []
if name in os.environ:
ret.append(parse_environ(name, parse_class, **defaults))
# now try importing _1 -> _N dsns
increment_name = lambda name, num: '{name}_{num}'.format(name=name, num=num)
dsn_num = 0 if increment_name(name, 0) in os.environ else 1
dsn_env_num_name = increment_name(name, dsn_num)
if dsn_env_num_name in os.environ:
try:
while True:
ret.append(parse_environ(dsn_env_num_name, parse_class, **defaults))
dsn_num += 1
dsn_env_num_name = increment_name(name, dsn_num)
except KeyError:
pass
return ret | same as parse_environ() but will also check name_1, name_2, ..., name_N and
return all the found dsn strings from the environment
this will look for name, and name_N (where N is 1 through infinity) in the environment,
if it finds them, it will assume they are dsn urls and will parse them.
The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3,
because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...)
example --
export DSN_1=some.Interface://host:port/dbname#i1
export DSN_2=some.Interface://host2:port/dbname2#i2
$ python
>>> import dsnparse
>>> print dsnparse.parse_environs('DSN') # prints list with 2 parsed dsn objects
:param dsn_env_name: string, the name of the environment variables, _1, ... will be appended
:param parse_class: ParseResult, the class that will be used to hold parsed values
:returns: list all the found dsn strings in the environment with the given name prefix |
def parse(dsn, parse_class=ParseResult, **defaults):
r = parse_class(dsn, **defaults)
return r | parse a dsn to parts similar to parseurl
:param dsn: string, the dsn to parse
:param parse_class: ParseResult, the class that will be used to hold parsed values
:param **defaults: dict, any values you want to have defaults for if they aren't in the dsn
:returns: ParseResult() tuple-like instance |
def netloc(self):
s = ''
prefix = ''
if self.username:
s += self.username
prefix = '@'
if self.password:
s += ":{password}".format(password=self.password)
prefix = '@'
s += "{prefix}{hostloc}".format(prefix=prefix, hostloc=self.hostloc)
return s | return username:password@hostname:port |
def hostloc(self):
hostloc = self.hostname
if self.port:
hostloc = '{hostloc}:{port}'.format(hostloc=hostloc, port=self.port)
return hostloc | return host:port |
def setdefault(self, key, val):
if not getattr(self, key, None):
setattr(self, key, val) | set a default value for key
this is different than dict's setdefault because it will set default either
if the key doesn't exist, or if the value at the key evaluates to False, so
an empty string or a None value will also be updated
:param key: string, the attribute to update
:param val: mixed, the attributes new value if key has a current value
that evaluates to False |
def geturl(self):
return urlparse.urlunparse((
self.scheme,
self.netloc,
self.path,
self.params,
self.query_str,
self.fragment,
)) | return the dsn back into url form |
def preparse(self, context):
context.early_args, unused = (
context.early_parser.parse_known_args(context.argv)) | Parse a portion of command line arguments with the early parser.
This method relies on ``context.argv`` and ``context.early_parser``
and produces ``context.early_args``.
The ``context.early_args`` object is the return value from argparse.
It is the dict/object like namespace object. |
def build_parser(self, context):
context.parser, context.max_level = self._create_parser(context) | Create the final argument parser.
This method creates the non-early (full) argparse argument parser.
Unlike the early counterpart it is expected to have knowledge of
the full command tree.
This method relies on ``context.cmd_tree`` and produces
``context.parser``. Other ingredients can interact with the parser
up until :meth:`parse()` is called. |
def parse(self, context):
context.args = context.parser.parse_args(context.argv) | Parse command line arguments.
This method relies on ``context.argv`` and ``context.early_parser``
and produces ``context.args``. Note that ``.argv`` is modified by
:meth:`preparse()` so it actually has _less_ things in it.
The ``context.args`` object is the return value from argparse.
It is the dict/object like namespace object. |
def parse(self, context):
try:
import argcomplete
except ImportError:
return
try:
parser = context.parser
except AttributeError:
raise RecipeError(
"""
The context doesn't have the parser attribute.
The auto-complete ingredient depends on having a parser object
to generate completion data for she shell. In a typical
application this requires that the AutocompleteIngredient and
ParserIngredient are present and that the auto-complete
ingredient precedes the parser.
""")
else:
argcomplete.autocomplete(parser) | Optionally trigger argument completion in the invoking shell.
This method is called to see if bash argument completion is requested
and to honor the request, if needed. This causes the process to exit
(early) without giving other ingredients a chance to initialize or shut
down.
Due to the way argcomple works, no other ingredient can print()
anything to stdout prior to this point. |
def ansi_cmd(cmd, *args):
try:
obj = getattr(ANSI, str('cmd_{}'.format(cmd)))
except AttributeError:
raise ValueError(
"incorrect command: {!r}".format(cmd))
if isinstance(obj, type("")):
return obj
else:
return obj(*args) | Get ANSI command code by name. |
def get_visible_color(color):
if isinstance(color, (str, type(""))):
try:
return getattr(_Visible, str('{}'.format(color)))
except AttributeError:
raise ValueError("incorrect color: {!r}".format(color))
elif isinstance(color, tuple):
return (0x80 ^ color[0], 0x80 ^ color[1], 0x80 ^ color[2])
elif isinstance(color, int):
if 0 <= color <= 0x07:
index = color
return 0xFF if index == 0 else 0xE8
elif 0x08 <= color <= 0x0F:
index = color - 0x08
return 0xFF if index == 0 else 0xE8
elif 0x10 <= color <= 0xE7:
index = color - 0x10
if 0 <= index % 36 < 18:
return 0xFF
else:
return 0x10
elif 0xE8 <= color <= 0xFF:
index = color - 0x0E8
return 0xFF if 0 <= index < 12 else 0xE8
else:
raise ValueError("incorrect color: {!r}".format(color)) | Get the visible counter-color. |
def sgr_fg_rgb(r, g, b):
assert r in range(256)
assert g in range(256)
assert b in range(256)
return '38;2;{};{};{}'.format(r, g, b) | Get SGR (Set Graphics Rendition) foreground RGB color. |
def sgr_bg_rgb(r, g, b):
assert r in range(256)
assert g in range(256)
assert b in range(256)
return '48;2;{};{};{}'.format(r, g, b) | Get SGR (Set Graphics Rendition) background RGB color. |
def _aprint2(self, *values, **kwargs):
sep = kwargs.pop(str('sep'), ' ')
end = kwargs.pop(str('end'), '\n')
file = kwargs.pop(str('file'), None) or sys.stdout
flush = kwargs.pop(str('flush'), False)
fg = kwargs.pop(str('fg'), None)
bg = kwargs.pop(str('bg'), None)
style = kwargs.pop(str('style'), None)
reset = kwargs.pop(str('reset'), True)
sgr = kwargs
text = sep.join(str(value) for value in values)
text = self(text, fg, bg, style, reset, **sgr)
print(text, end=end, file=file)
if flush:
file.flush() | ANSI formatting-aware print().
This method is a version of print() (function) that understands
additional ansi control parameters.
:param value:
The values to print, same as with ``print()``
:param sep:
Separator between values, same as with ``print()``
:param end:
Terminator of the line, same as with ``print()``
:param file:
File to print to, same as with ``print()``
:param flush:
Flag that controls stream flush behavior, same as with ``print()``
:param fg:
Foreground color, same as with :meth:`__call__()`.
:param bg:
Background color, same as with :meth:`__call__()`.
:param style:
Text style, same as with :meth:`__call__()`.
:param reset:
Flag that controls if ANSI attributes are reset at the end, same as
with :meth:`__call__()`.
:param sgr:
Additonal (custom) Set Graphics Rendition directives, same as with
:meth:`__call__()`.
.. note::
This implementation is intended for Python 2 |
def added(self, context):
context.ansi = ANSIFormatter(self._enable)
context.aprint = context.ansi.aprint | Ingredient method called before anything else. |
def using_git(cwd):
try:
git_log = shell_out(["git", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError): # pragma: no cover
return False | Test whether the directory cwd is contained in a git repository. |
def using_hg(cwd):
try:
hg_log = shell_out(["hg", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError):
return False | Test whether the directory cwd is contained in a mercurial
repository. |
def using_bzr(cwd):
try:
bzr_log = shell_out(["bzr", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError):
return False | Test whether the directory cwd is contained in a bazaar repository. |
def from_string(vc):
try:
# Note: this means all version controls must have
# a title naming convention (!)
vc = globals()[vc.title()]
assert(issubclass(vc, VersionControl))
return vc
except (KeyError, AssertionError):
raise NotImplementedError("Unknown version control system.") | Return the VersionControl superclass from a string, for example
VersionControl.from_string('git') will return Git. |
def which(cwd=None): # pragma: no cover
if cwd is None:
cwd = os.getcwd()
for (k, using_vc) in globals().items():
if k.startswith('using_') and using_vc(cwd=cwd):
return VersionControl.from_string(k[6:])
# Not supported (yet)
raise NotImplementedError("Unknown version control system, "
"or you're not in the project directory.") | Try to find which version control system contains the cwd directory.
Returns the VersionControl superclass e.g. Git, if none were
found this will raise a NotImplementedError. |
def modified_lines(self, r, file_name):
cmd = self.file_diff_cmd(r, file_name)
diff = shell_out_ignore_exitcode(cmd, cwd=self.root)
return list(self.modified_lines_from_diff(diff)) | Returns the line numbers of a file which have been changed. |
def modified_lines_from_diff(self, diff):
from pep8radius.diff import modified_lines_from_udiff
for start, end in modified_lines_from_udiff(diff):
yield start, end | Returns the changed lines in a diff.
- Potentially this is vc specific (if not using udiff).
Note: this returns the line numbers in descending order. |
def get_filenames_diff(self, r):
cmd = self.filenames_diff_cmd(r)
diff_files = shell_out_ignore_exitcode(cmd, cwd=self.root)
diff_files = self.parse_diff_filenames(diff_files)
return set(f for f in diff_files if f.endswith('.py')) | Get the py files which have been changed since rev. |
def parse_diff_filenames(diff_files):
# ? .gitignore
# M 0.txt
files = []
for line in diff_files.splitlines():
line = line.strip()
fn = re.findall('[^ ]+\s+(.*.py)', line)
if fn and not line.startswith('?'):
files.append(fn[0])
return files | Parse the output of filenames_diff_cmd. |
def render_to_response(self, obj, **response_kwargs):
return HttpResponse(self.serialize(obj), content_type='application/json', **response_kwargs) | Returns an ``HttpResponse`` object instance with Content-Type:
application/json.
The response body will be the return value of ``self.serialize(obj)`` |
def http_method_not_allowed(self, *args, **kwargs):
resp = super(JsonResponseMixin, self).http_method_not_allowed(*args, **kwargs)
resp['Content-Type'] = 'application/json'
return resp | Returns super after setting the Content-Type header to
``application/json`` |
def data(self):
if self.request.method == 'GET':
return self.request.GET
else:
assert self.request.META['CONTENT_TYPE'].startswith('application/json')
charset = self.request.encoding or settings.DEFAULT_CHARSET
return json.loads(self.request.body.decode(charset)) | Helper class for parsing JSON POST data into a Python object. |
def dispatch(self, *args, **kwargs):
try:
self.auth(*args, **kwargs)
return super(RestView, self).dispatch(*args, **kwargs)
except ValidationError as e:
return self.render_to_response(e.message_dict, status=409)
except Http404 as e:
return self.render_to_response(str(e), status=404)
except PermissionDenied as e:
return self.render_to_response(str(e), status=403)
except ValueError as e:
return self.render_to_response(str(e), status=400) | Authenticates the request and dispatches to the correct HTTP method
function (GET, POST, PUT,...).
Translates exceptions into proper JSON serialized HTTP responses:
- ValidationError: HTTP 409
- Http404: HTTP 404
- PermissionDenied: HTTP 403
- ValueError: HTTP 400 |
def options(self, request, *args, **kwargs):
allow = []
for method in self.http_method_names:
if hasattr(self, method):
allow.append(method.upper())
r = self.render_to_response(None)
r['Allow'] = ','.join(allow)
return r | Implements a OPTIONS HTTP method function returning all allowed HTTP
methods. |
def main(args=None, vc=None, cwd=None, apply_config=False):
import signal
try: # pragma: no cover
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
if args is None:
args = []
try:
# Note: argparse on py 2.6 you can't pass a set
# TODO neater solution for this!
args_set = set(args)
except TypeError:
args_set = args # args is a Namespace
if '--version' in args_set or getattr(args_set, 'version', 0):
print(version)
return 0
if '--list-fixes' in args_set or getattr(args_set, 'list_fixes', 0):
from autopep8 import supported_fixes
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
try:
try:
args = parse_args(args, apply_config=apply_config)
except TypeError:
pass # args is already a Namespace (testing)
if args.from_diff: # pragma: no cover
r = Radius.from_diff(args.from_diff.read(),
options=args, cwd=cwd)
else:
r = Radius(rev=args.rev, options=args, vc=vc, cwd=cwd)
except NotImplementedError as e: # pragma: no cover
print(e)
return 1
except CalledProcessError as c: # pragma: no cover
# cut off usage and exit
output = c.output.splitlines()[0]
print(output)
return c.returncode
any_changes = r.fix()
if any_changes and args.error_status:
return 1
return 0
except KeyboardInterrupt: # pragma: no cover
return 1 | PEP8 clean only the parts of the files touched since the last commit, a
previous commit or branch. |
def parse_args(arguments=None, root=None, apply_config=False):
if arguments is None:
arguments = []
parser = create_parser()
args = parser.parse_args(arguments)
if apply_config:
parser = apply_config_defaults(parser, args, root=root)
args = parser.parse_args(arguments)
# sanity check args (from autopep8)
if args.max_line_length <= 0: # pragma: no cover
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select and args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = []
return args | Parse the arguments from the CLI.
If apply_config then we first look up and apply configs using
apply_config_defaults. |
def apply_config_defaults(parser, args, root):
if root is None:
try:
from pep8radius.vcs import VersionControl
root = VersionControl.which().root_dir()
except NotImplementedError:
pass # don't update local, could be using as module
config = SafeConfigParser()
config.read(args.global_config)
if root and not args.ignore_local_config:
config.read(local_config_files(root))
try:
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items("pep8"))
parser.set_defaults(**defaults)
except NoSectionError:
pass # just do nothing, potentially this could raise ?
return parser | Update the parser's defaults from either the arguments' config_arg or
the config files given in config_files(root). |
def read_vint32(self):
result = 0
count = 0
while True:
if count > 4:
raise ValueError("Corrupt VarInt32")
b = self.read_byte()
result = result | (b & 0x7F) << (7 * count)
count += 1
if not b & 0x80:
return result | This seems to be a variable length integer ala utf-8 style |
def read_message(self, message_type, compressed=False, read_size=True):
if read_size:
size = self.read_vint32()
b = self.read(size)
else:
b = self.read()
if compressed:
b = snappy.decompress(b)
m = message_type()
m.ParseFromString(b)
return m | Read a protobuf message |
def run_hooks(self, packet):
if packet.__class__ in self.internal_hooks:
self.internal_hooks[packet.__class__](packet)
if packet.__class__ in self.hooks:
self.hooks[packet.__class__](packet) | Run any additional functions that want to process this type of packet.
These can be internal parser hooks, or external hooks that process
information |
def parse_string_table(self, tables):
self.info("String table: %s" % (tables.tables, ))
for table in tables.tables:
if table.table_name == "userinfo":
for item in table.items:
if len(item.data) > 0:
if len(item.data) == 140:
p = PlayerInfo()
ctypes.memmove(ctypes.addressof(p), item.data, 140)
p.str = item.str
self.run_hooks(p)
if table.table_name == "CombatLogNames":
self.combat_log_names = dict(enumerate(
(item.str for item in table.items))) | Need to pull out player information from string table |
def parse_game_event(self, event):
if event.eventid in self.event_lookup:
#Bash this into a nicer data format to work with
event_type = self.event_lookup[event.eventid]
ge = GameEvent(event_type.name)
for i, key in enumerate(event.keys):
key_type = event_type.keys[i]
ge.keys[key_type.name] = getattr(key,
KEY_DATA_TYPES[key.type])
self.debug("|==========> %s" % (ge, ))
self.run_hooks(ge) | So CSVCMsg_GameEventList is a list of all events that can happen.
A game event has an eventid which maps to a type of event that happened |
def parse(self):
self.important("Parsing demo file '%s'" % (self.filename, ))
with open(self.filename, 'rb') as f:
reader = Reader(StringIO(f.read()))
filestamp = reader.read(8)
offset = reader.read_int32()
if filestamp != "PBUFDEM\x00":
raise ValueError("Invalid replay - incorrect filestamp")
buff = StringIO(f.read())
frame = 0
more = True
while more and reader.remaining > 0:
cmd = reader.read_vint32()
tick = reader.read_vint32()
compressed = False
if cmd & demo_pb2.DEM_IsCompressed:
compressed = True
cmd = cmd & ~demo_pb2.DEM_IsCompressed
if cmd not in messages.MESSAGE_TYPES:
raise KeyError("Unknown message type found")
message_type = messages.MESSAGE_TYPES[cmd]
message = reader.read_message(message_type, compressed)
self.info('%s: %s' % (frame, message_type))
self.worthless(message)
self.run_hooks(message)
self.info('|%s' % ('-' * 79, ))
frame += 1
if self.frames and frame > self.frames:
break | Parse a replay |
def convert(data):
try:
st = basestring
except NameError:
st = str
if isinstance(data, st):
return str(data)
elif isinstance(data, Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, Iterable):
return type(data)(map(convert, data))
else:
return data | Convert from unicode to native ascii |
def get_type_properties(self, property_obj, name, additional_prop=False):
property_type = property_obj.get('type', 'object')
property_format = property_obj.get('format')
property_dict = {}
if property_type in ['object', 'array']:
schema_type = SchemaTypes.MAPPED if additional_prop else SchemaTypes.INLINE
schema_id = self._get_object_schema_id(property_obj, schema_type)
if not ('$ref' in property_obj or self.storage.get(schema_id)):
_schema = self.storage.create_schema(
property_obj, name, schema_type, root=self.root)
self._after_create_schema(_schema)
property_type = schema_id
property_dict['default'] = property_obj.get('default')
property_dict['maximum'] = property_obj.get('maximum')
property_dict['exclusive_maximum'] = property_obj.get('exclusiveMaximum')
property_dict['minimum'] = property_obj.get('minimum')
property_dict['exclusive_minimum'] = property_obj.get('exclusiveMinimum')
property_dict['max_length'] = property_obj.get('maxLength')
property_dict['min_length'] = property_obj.get('minLength')
#TODO: fixme. remove ugly convert. add property template renderer instead
property_dict['enum'] = convert(property_obj.get('enum'))
#TODO: fixme. cleanup empty properties. add configurable filter for properties instead
property_dict = {k: v for k, v in property_dict.items() if v}
return property_type, property_format, property_dict | Get internal properties of property (extended in schema)
:param dict property_obj: raw property object
:param str name: name of property
:param bool additional_prop: recursion's param
:return: Type, format and internal properties of property
:rtype: tuple(str, str, dict) |
def set_type_by_schema(self, schema_obj, schema_type):
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id | Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type: |
def tablib_export_action(modeladmin, request, queryset, file_type="xls"):
dataset = SimpleDataset(queryset, headers=None)
filename = '{0}.{1}'.format(
smart_str(modeladmin.model._meta.verbose_name_plural), file_type)
response_kwargs = {
'content_type': get_content_type(file_type)
}
response = HttpResponse(getattr(dataset, file_type), **response_kwargs)
response['Content-Disposition'] = 'attachment; filename={0}'.format(
filename)
return response | Allow the user to download the current filtered list of items
:param file_type:
One of the formats supported by tablib (e.g. "xls", "csv", "html",
etc.) |
def _fill_schemas_from_definitions(self, obj):
if obj.get('definitions'):
self.schemas.clear()
all_of_stack = []
for name, definition in obj['definitions'].items():
if 'allOf' in definition:
all_of_stack.append((name, definition))
else:
self.schemas.create_schema(
definition, name, SchemaTypes.DEFINITION, root=self)
while all_of_stack:
name, definition = all_of_stack.pop(0)
self.schemas.create_schema(
definition, name, SchemaTypes.DEFINITION, root=self) | At first create schemas without 'AllOf'
:param obj:
:return: None |
def get_type_properties(self, property_obj, name, additional_prop=False):
property_type, property_format, property_dict = \
super(Schema, self).get_type_properties(property_obj, name, additional_prop=additional_prop)
_schema = self.storage.get(property_type)
if _schema and ('additionalProperties' in property_obj):
_property_type, _property_format, _property_dict = super(Schema, self).get_type_properties(
property_obj['additionalProperties'], '{}-mapped'.format(name), additional_prop=True)
if _property_type not in PRIMITIVE_TYPES:
SchemaMapWrapper.wrap(self.storage.get(_property_type))
_schema.nested_schemas.add(_property_type)
else:
_schema.type_format = _property_type
return property_type, property_format, property_dict | Extend parents 'Get internal properties of property'-method |
def generic_export(request, model_name=None):
if model_name not in settings.TABLIB_MODELS:
raise Http404()
model = get_model(*model_name.split(".", 2))
if not model:
raise ImproperlyConfigured(
"Model {0} is in settings.TABLIB_MODELS but"
" could not be loaded".format(model_name))
qs = model._default_manager.all()
# Filtering may be allowed based on TABLIB_MODELS:
filter_settings = settings.TABLIB_MODELS[model_name]
filters = {}
for k, v in request.GET.items():
try:
# Allow joins (they'll be checked below) but chop off the trailing
# lookup operator:
rel, lookup_type = k.rsplit("__", 1)
except ValueError:
rel = k
lookup_type = "exact"
allowed_lookups = filter_settings.get(rel, None)
if allowed_lookups is None:
return HttpResponseBadRequest(
"Filtering on {0} is not allowed".format(rel)
)
elif lookup_type not in allowed_lookups:
return HttpResponseBadRequest(
"{0} may only be filtered using {1}".format(
k, " ".join(allowed_lookups)))
else:
filters[str(k)] = v
if filters:
qs = qs.filter(**filters)
return export(request, model=model, queryset=qs) | Generic view configured through settings.TABLIB_MODELS
Usage:
1. Add the view to ``urlpatterns`` in ``urls.py``::
url(r'export/(?P<model_name>[^/]+)/$',
"django_tablib.views.generic_export"),
2. Create the ``settings.TABLIB_MODELS`` dictionary using model names
as keys the allowed lookup operators as values, if any::
TABLIB_MODELS = {
'myapp.simple': None,
'myapp.related': {'simple__title': ('exact', 'iexact')},
}
3. Open ``/export/myapp.simple`` or
``/export/myapp.related/?simple__title__iexact=test`` |
def sorted(collection):
if len(collection) < 1:
return collection
if isinstance(collection, dict):
return sorted(collection.items(), key=lambda x: x[0])
if isinstance(list(collection)[0], Operation):
key = lambda x: x.operation_id
elif isinstance(list(collection)[0], str):
key = lambda x: SchemaObjects.get(x).name
else:
raise TypeError(type(collection[0]))
return sorted(collection, key=key) | sorting dict by key,
schema-collection by schema-name
operations by id |
def get_regular_properties(self, _type, *args, **kwargs):
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
if schema.schema_type == SchemaTypes.DEFINITION and not kwargs.get('definition'):
return ''
head = """.. csv-table::
:delim: |
:header: "Name", "Required", "Type", "Format", "Properties", "Description"
:widths: 20, 10, 15, 15, 30, 25
"""
body = []
if schema.properties:
for p in schema.properties:
body.append(' {} | {} | {} | {} | {} | {}'.format(
p.get('name') or '',
'Yes' if p.get('required') else 'No',
self.get_type_description(p['type'], *args, **kwargs),
p.get('type_format') or '',
'{}'.format(p.get('type_properties') or ''),
p.get('description') or '')
)
body.sort()
return (head + '\n'.join(body)) | Make table with properties by schema_id
:param str _type:
:rtype: str |
def get_type_description(self, _type, suffix='', *args, **kwargs):
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
if schema.all_of:
models = ','.join(
(self.get_type_description(_type, *args, **kwargs) for _type in schema.all_of)
)
result = '{}'.format(models.split(',')[0])
for r in models.split(',')[1:]:
result += ' extended {}'.format(r)
elif schema.is_array:
result = 'array of {}'.format(
self.get_type_description(schema.item['type'], *args, **kwargs))
else:
result = ':ref:`{} <{}{}>`'.format(schema.name, schema.schema_id, suffix)
return result | Get description of type
:param suffix:
:param str _type:
:rtype: str |
def get_additional_properties(self, _type, *args, **kwargs):
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
body = []
for sch in schema.nested_schemas: # complex types
nested_schema = SchemaObjects.get(sch)
if not (nested_schema or isinstance(nested_schema, SchemaMapWrapper)):
continue
body.append('Map of {{"key":"{}"}}\n\n'.format(self.get_type_description(
nested_schema.schema_id, *args, **kwargs)) # head
)
if nested_schema.is_array: # table
_schema = SchemaObjects.get(nested_schema.item.get('type'))
if _schema and _schema.schema_type == SchemaTypes.INLINE:
body.append(self.get_regular_properties(_schema.schema_id, *args, **kwargs))
else:
body.append(self.get_regular_properties(nested_schema.schema_id, *args, **kwargs))
if schema.type_format: # basic types, only head
body.append(
'Map of {{"key":"{}"}}'.format(self.get_type_description(schema.type_format, *args, **kwargs)))
return ''.join(body) | Make head and table with additional properties by schema_id
:param str _type:
:rtype: str |
def pre_save(self, instance, add):
if not self.natural_text_field or self.attname not in instance.__dict__:
return
edtf = getattr(instance, self.attname)
# Update EDTF field based on latest natural text value, if any
natural_text = getattr(instance, self.natural_text_field)
if natural_text:
edtf = text_to_edtf(natural_text)
else:
edtf = None
# TODO If `natural_text_field` becomes cleared the derived EDTF field
# value should also be cleared, rather than left at original value?
# TODO Handle case where EDTF field is set to a string directly, not
# via `natural_text_field` (this is a slightly unexpected use-case, but
# is a very efficient way to set EDTF values in situations like for API
# imports so we probably want to continue to support it?)
if edtf and not isinstance(edtf, EDTFObject):
edtf = parse_edtf(edtf, fail_silently=True)
setattr(instance, self.attname, edtf)
# set or clear related date fields on the instance
for attr in DATE_ATTRS:
field_attr = "%s_field" % attr
g = getattr(self, field_attr, None)
if g:
if edtf:
try:
target_field = instance._meta.get_field(g)
except FieldDoesNotExist:
continue
value = getattr(edtf, attr)() # struct_time
if isinstance(target_field, models.FloatField):
value = struct_time_to_jd(value)
elif isinstance(target_field, models.DateField):
value = struct_time_to_date(value)
else:
raise NotImplementedError(
u"EDTFField does not support %s as a derived data"
u" field, only FloatField or DateField"
% type(target_field))
setattr(instance, g, value)
else:
setattr(instance, g, None)
return edtf | Updates the edtf value from the value of the display_field.
If there's a valid edtf, then set the date values. |
def apply_delta(op, time_struct, delta):
if not delta:
return time_struct # No work to do
try:
dt_result = op(datetime(*time_struct[:6]), delta)
return dt_to_struct_time(dt_result)
except (OverflowError, ValueError):
# Year is not within supported 1 to 9999 AD range
pass
# Here we fake the year to one in the acceptable range to avoid having to
# write our own date rolling logic
# Adjust the year to be close to the 2000 millenium in 1,000 year
# increments to try and retain accurate relative leap years
actual_year = time_struct.tm_year
millenium = int(float(actual_year) / 1000)
millenium_diff = (2 - millenium) * 1000
adjusted_year = actual_year + millenium_diff
# Apply delta to the date/time with adjusted year
dt = datetime(*(adjusted_year,) + time_struct[1:6])
dt_result = op(dt, delta)
# Convert result year back to its original millenium
final_year = dt_result.year - millenium_diff
return struct_time(
(final_year,) + dt_result.timetuple()[1:6] + tuple(TIME_EMPTY_EXTRAS)) | Apply a `relativedelta` to a `struct_time` data structure.
`op` is an operator function, probably always `add` or `sub`tract to
correspond to `a_date + a_delta` and `a_date - a_delta`.
This function is required because we cannot use standard `datetime` module
objects for conversion when the date/time is, or will become, outside the
boundary years 1 AD to 9999 AD. |
def _strict_date(self, lean):
return struct_time(
(
self._precise_year(lean),
self._precise_month(lean),
self._precise_day(lean),
) + tuple(TIME_EMPTY_TIME) + tuple(TIME_EMPTY_EXTRAS)
) | Return a `time.struct_time` representation of the date. |
def _get_fuzzy_padding(self, lean):
result = relativedelta(0)
if self.year_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_ua._get_multiplier()
if self.month_ua:
result += appsettings.PADDING_MONTH_PRECISION * self.month_ua._get_multiplier()
if self.day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.day_ua._get_multiplier()
if self.year_month_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_month_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.year_month_ua._get_multiplier()
if self.month_day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.month_day_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.month_day_ua._get_multiplier()
if self.season_ua:
result += appsettings.PADDING_SEASON_PRECISION * self.season_ua._get_multiplier()
if self.all_ua:
multiplier = self.all_ua._get_multiplier()
if self.precision == PRECISION_DAY:
result += multiplier * appsettings.PADDING_DAY_PRECISION
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_MONTH:
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_YEAR:
result += multiplier * appsettings.PADDING_YEAR_PRECISION
return result | This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness. |
def deploy(self, *lambdas):
if not self.role:
logger.error('Missing AWS Role')
raise ArgumentsError('Role required')
logger.debug('Deploying lambda {}'.format(self.lambda_name))
zfh = self.package()
if self.lambda_name in self.get_function_names():
logger.info('Updating {} lambda'.format(self.lambda_name))
response = self.client.update_function_code(
FunctionName=self.lambda_name,
ZipFile=zfh.getvalue(),
Publish=True
)
else:
logger.info('Adding new {} lambda'.format(self.lambda_name))
response = self.client.create_function(
FunctionName=self.lambda_name,
Runtime=yaep.env(
'LAMBDA_RUNTIME',
'python2.7'
),
Role=self.role,
Handler=yaep.env(
'LAMBDA_HANDLER',
'lambda_function.lambda_handler'
),
Code={
'ZipFile': zfh.getvalue(),
},
Description=yaep.env(
'LAMBDA_DESCRIPTION',
'Lambda code for {}'.format(self.lambda_name)
),
Timeout=yaep.env(
'LAMBDA_TIMEOUT',
3,
convert_booleans=False,
type_class=int
),
MemorySize=yaep.env(
'LAMBDA_MEMORY_SIZE',
128,
convert_booleans=False,
type_class=int
),
Publish=True
)
status_code = response.get(
'ResponseMetadata', {}
).get('HTTPStatusCode')
if status_code in [200, 201]:
logger.info('Successfully deployed {} version {}'.format(
self.lambda_name,
response.get('Version', 'Unkown')
))
else:
logger.error('Error deploying {}: {}'.format(
self.lambda_name,
response
)) | Deploys lambdas to AWS |
def list(self):
for function in self.client.list_functions().get('Functions', []):
lines = json.dumps(function, indent=4, sort_keys=True).split('\n')
for line in lines:
logger.info(line) | Lists already deployed lambdas |
def get_info(self):
if LooseVersion(django.get_version()) < LooseVersion('1.7.0'):
info = self.model._meta.app_label, self.model._meta.module_name
else:
info = self.model._meta.app_label, self.model._meta.model_name
return info | Helper method to get model info in a form of (app_label, model_name).
Avoid deprecation warnings and failures with different Django versions. |
def date_to_jd(year,month,day):
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd | Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75 |
def jd_to_date(jd):
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day | Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25) |
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24. | Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25 |
def days_to_hmsm(days):
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro) | Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0) |
def datetime_to_jd(date):
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days) | Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75 |
def jd_to_datetime(jd):
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro) | Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0) |
def timedelta_to_days(td):
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days | Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5 |
def create_schema(cls, obj, name, schema_type, root):
if schema_type == SchemaTypes.MAPPED:
schema = SchemaMapWrapper(obj, storage=cls, name=name, root=root)
else:
schema = Schema(obj, schema_type, storage=cls, name=name, root=root)
cls.add_schema(schema)
return schema | Create Schema object
:param dict obj: swagger schema object
:param str name: schema name
:param str schema_type: schema location.
Can be ``inline``, ``definition`` or ``mapped``
:param BaseSwaggerObject root: root doc
:return: new schema
:rtype: Schema |
def get_schemas(cls, schema_types=None, sort=True):
result = filter(lambda x: not x.is_inline_array, cls._schemas.values())
if schema_types:
result = filter(lambda x: x.schema_type in schema_types, result)
if sort:
result = sorted(result, key=attrgetter('name'))
return result | Get schemas by type. If ``schema_type`` is None, return all schemas
:param schema_types: list of schema types
:type schema_types: list or None
:param bool sort: sort by name
:return: list of schemas
:rtype: list |
def merge_schemas(cls, schema, _schema):
tmp = schema.properties[:] # copy
prop = {}
to_dict = lambda e: prop.update({e.pop('name'): e})
[to_dict(i) for i in tmp] # map(to_dict, tmp)
for _prop in _schema.properties:
if prop.get(_prop['name']):
prop.pop(_prop['name'])
if prop:
for k, v in prop.items():
v['name'] = k
_schema.properties.append(v)
return _schema | Return second Schema, which is extended by first Schema
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#composition-and-inheritance-polymorphism |
def dt_to_struct_time(dt):
if isinstance(dt, datetime):
return struct_time(
[dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +
TIME_EMPTY_EXTRAS
)
elif isinstance(dt, date):
return struct_time(
[dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS
)
else:
raise NotImplementedError(
"Cannot convert %s to `struct_time`" % type(dt)) | Convert a `datetime.date` or `datetime.datetime` to a `struct_time`
representation *with zero values* for data fields that we cannot always
rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst
NOTE: If it wasn't for the requirement that the extra fields are unset
we could use the `timetuple()` method instead of this function. |
def trim_struct_time(st, strip_time=False):
if strip_time:
return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS) | Return a `struct_time` based on the one provided but with the extra fields
`tm_wday`, `tm_yday`, and `tm_isdst` reset to default values.
If `strip_time` is set to true the time value are also set to zero:
`tm_hour`, `tm_min`, and `tm_sec`. |
def struct_time_to_jd(st):
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day) | Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored. |
def jd_to_struct_time(jd):
year, month, day = jdutil.jd_to_date(jd)
# Convert time of day from fraction of day
day_fraction = day - int(day)
hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction)
day = int(day)
# This conversion can return negative values for items we do not want to be
# negative: month, day, hour, minute, second.
year, month, day, hour, minute, second = _roll_negative_time_fields(
year, month, day, hour, minute, second)
return struct_time(
[year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS
) | Return a `struct_time` converted from a Julian Date float number.
WARNING: Conversion to then from Julian Date value to `struct_time` can be
inaccurate and lose or gain time, especially for BC (negative) years.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default
values, not real ones. |
def _roll_negative_time_fields(year, month, day, hour, minute, second):
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second) | Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long. |
def get_example_by_schema(cls, schema, ignored_schemas=None, paths=None, name=''):
if schema.schema_example:
return schema.schema_example
if ignored_schemas is None:
ignored_schemas = []
if paths is None:
paths = []
if name:
paths = list(map(lambda path: '.'.join((path, name)), paths))
if schema.ref_path:
paths.append(schema.ref_path)
if schema.schema_id in ignored_schemas:
result = [] if schema.is_array else {}
else:
schemas = ignored_schemas + [schema.schema_id]
kwargs = dict(
ignored_schemas=schemas,
paths=paths
)
if schema.is_array:
result = cls.get_example_for_array(
schema.item, **kwargs)
elif schema.type in PRIMITIVE_TYPES:
result = cls.get_example_value_for_primitive_type(
schema.type, schema.raw, schema.type_format, paths=paths
)
elif schema.all_of:
result = {}
for _schema_id in schema.all_of:
schema = SchemaObjects.get(_schema_id)
result.update(cls.get_example_by_schema(schema, **kwargs))
else:
result = cls.get_example_for_object(
schema.properties, nested=schema.nested_schemas, **kwargs)
return result | Get example by schema object
:param Schema schema: current schema
:param list ignored_schemas: list of previous schemas
for avoid circular references
:param list paths: list object paths (ex. #/definitions/Model.property)
If nested schemas exists, custom examples checks in order from paths
:param str name: name of property schema object if exists
:return: dict or list (if schema is array) |
def get_body_example(cls, operation):
path = "#/paths/'{0.path}'/{0.method}/parameters/{name}".format(
operation, name=operation.body.name or 'body')
return cls.get_example_by_schema(operation.body, paths=[path]) | Get example for body parameter example by operation
:param Operation operation: operation object |
def get_response_example(cls, operation, response):
path = "#/paths/'{}'/{}/responses/{}".format(
operation.path, operation.method, response.name)
kwargs = dict(paths=[path])
if response.type in PRIMITIVE_TYPES:
result = cls.get_example_value_for_primitive_type(
response.type, response.properties, response.type_format, **kwargs)
else:
schema = SchemaObjects.get(response.type)
result = cls.get_example_by_schema(schema, **kwargs)
return result | Get example for response object by operation object
:param Operation operation: operation object
:param Response response: response object |
def get_header_example(cls, header):
if header.is_array:
result = cls.get_example_for_array(header.item)
else:
example_method = getattr(cls, '{}_example'.format(header.type))
result = example_method(header.properties, header.type_format)
return {header.name: result} | Get example for header object
:param Header header: Header object
:return: example
:rtype: dict |
def get_property_example(cls, property_, nested=None, **kw):
paths = kw.get('paths', [])
name = kw.get('name', '')
result = None
if name and paths:
paths = list(map(lambda path: '.'.join((path, name)), paths))
result, path = cls._get_custom_example(paths)
if result is not None and property_['type'] in PRIMITIVE_TYPES:
cls._example_validate(
path, result, property_['type'], property_['type_format'])
return result
if SchemaObjects.contains(property_['type']):
schema = SchemaObjects.get(property_['type'])
if result is not None:
if schema.is_array:
if not isinstance(result, list):
result = [result] * cls.EXAMPLE_ARRAY_ITEMS_COUNT
else:
if isinstance(result, list):
cls.logger.warning(
'Example type mismatch in path {}'.format(schema.ref_path))
else:
result = cls.get_example_by_schema(schema, **kw)
if (not result) and schema.nested_schemas:
for _schema_id in schema.nested_schemas:
_schema = SchemaObjects.get(_schema_id)
if _schema:
if isinstance(_schema, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema, **kw)
elif _schema.nested_schemas:
for _schema__id in _schema.nested_schemas:
_schema_ = SchemaObjects.get(_schema__id)
if isinstance(_schema_, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema_, **kw)
else:
result = cls.get_example_value_for_primitive_type(
property_['type'],
property_['type_properties'],
property_['type_format'],
**kw
)
return result | Get example for property
:param dict property_:
:param set nested:
:return: example value |
def mkres(self):
for d in DENSITY_TYPES:
if d == 'ldpi' and not self.ldpi:
continue # skip ldpi
if d == 'xxxhdpi' and not self.xxxhdpi:
continue # skip xxxhdpi
try:
path = os.path.join(self.out, 'res/drawable-%s' % d)
os.makedirs(path, 0o755)
except OSError:
pass | Create a directory tree for the resized assets |
def get_size_for_density(self, size, target_density):
current_size = size
current_density = DENSITY_MAP[self.source_density]
target_density = DENSITY_MAP[target_density]
return int(current_size * (target_density / current_density)) | Return the new image size for the target density |
def resize_image(self, path, im):
# Get the original filename
_, filename = os.path.split(path)
# Generate the new filename
filename = self.get_safe_filename(filename)
filename = '%s%s' % (self.prefix if self.prefix else '', filename)
# Get the original image size
w, h = im.size
# Generate assets from the source image
for d in DENSITY_TYPES:
if d == 'ldpi' and not self.ldpi:
continue # skip ldpi
if d == 'xxxhdpi' and not self.xxxhdpi:
continue # skip xxxhdpi
out_file = os.path.join(self.out,
self.get_out_for_density(d), filename)
if d == self.source_density:
im.save(out_file, quality=self.image_quality)
else:
size = (self.get_size_for_density(w, d),
self.get_size_for_density(h, d))
im.resize(size, self.image_filter).save(out_file,
quality=self.image_quality) | Generate assets from the given image and path in case you've already
called Image.open |
def encode_transit(records):
'''Returns the records serialized as Transit/json in utf8'''
with StringIO() as buf:
writer = Writer(buf, "json")
writer.write(records)
return buf.getvalue().encode('utf8'f encode_transit(records):
'''Returns the records serialized as Transit/json in utf8'''
with StringIO() as buf:
writer = Writer(buf, "json")
writer.write(records)
return buf.getvalue().encode('utf8') | Returns the records serialized as Transit/json in utf8 |
def push(self, message, callback_arg=None):
if message['action'] == 'upsert':
message.setdefault('key_names', self.key_names)
message['client_id'] = self.client_id
message.setdefault('table_name', self.table_name)
self._add_message(message, callback_arg)
batch = self._take_batch(self.target_messages_per_batch)
if batch:
self._send_batch(batch) | message should be a dict recognized by the Stitch Import API.
See https://www.stitchdata.com/docs/integrations/import-api. |
def _take_batch(self, min_records):
'''If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.'''
if not self._buffer:
return []
enough_messages = len(self._buffer) >= min_records
enough_time = time.time() - self.time_last_batch_sent >= self.batch_delay_seconds
ready = enough_messages or enough_time
if not ready:
return []
result = list(self._buffer)
self._buffer.clear()
return resulf _take_batch(self, min_records):
'''If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer.'''
if not self._buffer:
return []
enough_messages = len(self._buffer) >= min_records
enough_time = time.time() - self.time_last_batch_sent >= self.batch_delay_seconds
ready = enough_messages or enough_time
if not ready:
return []
result = list(self._buffer)
self._buffer.clear()
return result | If we have enough data to build a batch, returns all the data in the
buffer and then clears the buffer. |
def get_parameters_by_location(self, locations=None, excludes=None):
result = self.parameters
if locations:
result = filter(lambda x: x.location_in in locations, result)
if excludes:
result = filter(lambda x: x.location_in not in excludes, result)
return list(result) | Get parameters list by location
:param locations: list of locations
:type locations: list or None
:param excludes: list of excludes locations
:type excludes: list or None
:return: list of Parameter
:rtype: list |
def body(self):
body = self.get_parameters_by_location(['body'])
return self.root.schemas.get(body[0].type) if body else None | Return body request parameter
:return: Body parameter
:rtype: Parameter or None |
def find(node):
if node.parent is None:
return node
root = node
while root.parent is not None:
root = root.parent
parent = node
while parent.parent is not root:
grandparent = parent.parent
parent.parent = root
parent = grandparent
return root | Find current canonical representative equivalent to node.
Adjust the parent pointer of each node along the way to the root
to point directly at the root for inverse-Ackerman-fast access. |
def union(a, b):
a = find(a)
b = find(b)
if a is not b:
if a.rank < b.rank:
a.parent = b
elif b.rank < a.rank:
b.parent = a
else:
b.parent = a
a.rank += 1 | Assert equality of two nodes a and b so find(a) is find(b). |
def classes(equivalences):
node = OrderedDict()
def N(x):
if x in node:
return node[x]
n = node[x] = Node(x)
return n
for x, y in equivalences:
union(N(x), N(y))
eqclass = OrderedDict()
for x, n in node.iteritems():
x_ = find(n).element
if x_ not in eqclass:
eqclass[x_] = []
eqclass[x_].append(x)
eqclass[x] = eqclass[x_]
return eqclass | Compute mapping from element to list of equivalent elements.
`equivalences` is an iterable of (x, y) tuples representing
equivalences x ~ y.
Returns an OrderedDict mapping each x to the list of elements
equivalent to x. |
def ascii2h5(dat_fname, h5_fname):
table = np.loadtxt(dat_fname, skiprows=1, dtype='f4')
filter_kwargs = dict(
chunks=True,
compression='gzip',
compression_opts=3)
# Filter out pixels with all zeros
idx = ~np.all(table[:,2:32] < 1.e-5, axis=1)
with h5py.File(h5_fname, 'w') as f:
d = np.arange(0., 4.351, 0.15).astype('f4')
dset = f.create_dataset('dists', data=d, **filter_kwargs)
dset.attrs['description'] = 'Distances at which extinction is measured'
dset.attrs['units'] = 'kpc'
dset = f.create_dataset('pix_lb', data=table[idx,0:2], **filter_kwargs)
dset.attrs['description'] = 'Galactic (l, b) of each pixel'
dset.attrs['units'] = 'deg'
dset = f.create_dataset('A_r', data=table[idx,2:32], **filter_kwargs)
dset.attrs['description'] = 'Extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag'
dset = f.create_dataset('A_r_err', data=table[idx,32:], **filter_kwargs)
dset.attrs['description'] = 'Gaussian uncertainty in extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag' | Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to. |
def fetch(clobber=False):
dest_dir = fname_pattern = os.path.join(data_dir(), 'chen2014')
url = 'http://lamost973.pku.edu.cn/site/Photometric-Extinctions-and-Distances/table2.dat'
dat_fname = os.path.join(dest_dir, 'chen2014.dat')
h5_fname = os.path.join(dest_dir, 'chen2014.h5')
md5 = 'f8a2bc46d411c57ca4c76dc344e291f1'
# Check if file already exists
if not clobber:
h5_size = 52768768 # Guess, in Bytes
h5_dsets = {
'dists': (30,),
'pix_lb': (557398, 2),
'A_r': (557398, 30),
'A_r_err': (557398, 30)
}
if fetch_utils.h5_file_exists(h5_fname, h5_size, dsets=h5_dsets):
print('File appears to exist already. Call `fetch(clobber=True)` '
'to force overwriting of existing file.')
return
# Download the table
print('Downloading {}'.format(url))
fetch_utils.download_and_verify(url, md5, fname=dat_fname)
# Convert from ASCII to HDF5 format
print('Repacking files...')
ascii2h5(dat_fname, h5_fname)
# Cleanup
print('Removing original file...')
os.remove(dat_fname) | Downloads the Chen et al. (2014) dust map.
Args:
clobber (Optional[:obj:`bool`]): If ``True``, any existing file will be
overwritten, even if it appears to match. If ``False`` (the
default), :obj:`fetch()` will attempt to determine if the dataset
already exists. This determination is not 100\% robust against data
corruption. |
def lpad(msg, symbol, length):
if len(msg) >= length:
return msg
return symbol * (length - len(msg)) + msg | Left-pad a given string (msg) with a character (symbol) for a given number of bytes (length).
Return the padded string |
def changebase(string, frm, to, minlen=0):
if frm == to:
return lpad(string, get_code_string(frm)[0], minlen)
return encode(decode(string, frm), to, minlen) | Change a string's characters from one base to another.
Return the re-encoded string |
def num_to_var_int(x):
x = int(x)
if x < 253:
return from_int_to_byte(x)
elif x < 65536:
return from_int_to_byte(253) + encode(x, 256, 2)[::-1]
elif x < 4294967296:
return from_int_to_byte(254) + encode(x, 256, 4)[::-1]
else:
return from_int_to_byte(255) + encode(x, 256, 8)[::-1] | (bitcoin-specific): convert an integer into a variable-length integer |
def encode(val, base, minlen=0):
base, minlen = int(base), int(minlen)
code_string = get_code_string(base)
result = ""
while val > 0:
result = code_string[val % base] + result
val //= base
return code_string[0] * max(minlen - len(result), 0) + result | Given an integer value (val) and a numeric base (base),
encode it into the string of symbols with the given base.
(with minimum length minlen)
Returns the (left-padded) re-encoded val as a string. |
def decode(string, base):
base = int(base)
code_string = get_code_string(base)
result = 0
if base == 16:
string = string.lower()
while len(string) > 0:
result *= base
result += code_string.find(string[0])
string = string[1:]
return result | Given a string (string) and a numeric base (base),
decode the string into an integer.
Returns the integer |
def json_is_base(obj, base):
alpha = get_code_string(base)
if isinstance(obj, (str, unicode)):
for i in range(len(obj)):
if alpha.find(obj[i]) == -1:
return False
return True
elif isinstance(obj, (int, long, float)) or obj is None:
return True
elif isinstance(obj, list):
for i in range(len(obj)):
if not json_is_base(obj[i], base):
return False
return True
else:
for x in obj:
if not json_is_base(obj[x], base):
return False
return True | Given a primitive compound Python object
(i.e. a dict, string, int, or list) and a numeric base,
verify whether or not the object and all relevant
sub-components have the given numeric base.
Return True if so.
Return False if not. |
def json_changebase(obj, changer):
if isinstance(obj, (str, unicode)):
return changer(obj)
elif isinstance(obj, (int, long)) or obj is None:
return obj
elif isinstance(obj, list):
return [json_changebase(x, changer) for x in obj]
elif isinstance(obj, dict):
return dict((x, json_changebase(obj[x], changer)) for x in obj)
else:
raise ValueError("Invalid object") | Given a primitive compound Python object (i.e. a dict,
string, int, or list) and a changer function that takes
a primitive Python object as an argument, apply the
changer function to the object and each sub-component.
Return the newly-reencoded object. |
def get_CrossCatClient(client_type, **kwargs):
client = None
if client_type == 'local':
import crosscat.LocalEngine as LocalEngine
le = LocalEngine.LocalEngine(**kwargs)
client = CrossCatClient(le)
elif client_type == 'multiprocessing':
import crosscat.MultiprocessingEngine as MultiprocessingEngine
me = MultiprocessingEngine.MultiprocessingEngine(**kwargs)
client = CrossCatClient(me)
else:
raise Exception('unknown client_type: %s' % client_type)
return client | Helper which instantiates the appropriate Engine and returns a Client |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.