text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Dynamically import a module returning a handle to it.
<END_TASK>
<USER_TASK:>
Description:
def _import_module(self, module_path):
"""Dynamically import a module returning a handle to it.
:param str module_path: The module path
:rtype: module
""" |
LOGGER.debug('Importing %s', module_path)
try:
return __import__(module_path)
except ImportError as error:
LOGGER.critical('Could not import %s: %s', module_path, error)
return None |
<SYSTEM_TASK:>
If the "base" path is set in the paths section of the config, insert
<END_TASK>
<USER_TASK:>
Description:
def _insert_base_path(self):
"""If the "base" path is set in the paths section of the config, insert
it into the python path.
""" |
if config.BASE in self.paths:
sys.path.insert(0, self.paths[config.BASE]) |
<SYSTEM_TASK:>
Take a given inbound list for a route and parse it creating the
<END_TASK>
<USER_TASK:>
Description:
def _prepare_route(self, attrs):
"""Take a given inbound list for a route and parse it creating the
route and importing the class it belongs to.
:param list attrs: Route attributes
:rtype: list
""" |
if type(attrs) not in (list, tuple):
LOGGER.error('Invalid route, must be a list or tuple: %r', attrs)
return
# By default there are not any extra kwargs
kwargs = None
# If there is a regex based route, set it up with a raw string
if attrs[0] == 're':
route = r'%s' % attrs[1]
classpath = attrs[2]
if len(attrs) == 4:
kwargs = attrs[3]
else:
route = r'%s' % attrs[0]
classpath = attrs[1]
if len(attrs) == 3:
kwargs = attrs[2]
LOGGER.debug('Initializing route: %s with %s', route, classpath)
try:
handler = self._import_class(classpath)
except ImportError as error:
LOGGER.error('Class import error for %s: %r', classpath, error)
return None
# Setup the prepared route, adding kwargs if there are any
prepared_route = [route, handler]
if kwargs:
prepared_route.append(kwargs)
# Return the prepared route as a tuple
return tuple(prepared_route) |
<SYSTEM_TASK:>
Prepare the routes by iterating through the list of tuples & calling
<END_TASK>
<USER_TASK:>
Description:
def _prepare_routes(self, routes):
"""Prepare the routes by iterating through the list of tuples & calling
prepare route on them.
:param routes: Routes to prepare
:type routes: list
:rtype: list
:raises: ValueError
""" |
if not isinstance(routes, list):
raise ValueError('Routes parameter must be a list of tuples')
prepared_routes = list()
for parts in routes:
route = self._prepare_route(parts)
if route:
LOGGER.info('Appending handler: %r', route)
prepared_routes.append(route)
return prepared_routes |
<SYSTEM_TASK:>
Prepare the list of transforming objects
<END_TASK>
<USER_TASK:>
Description:
def _prepare_transforms(self):
"""Prepare the list of transforming objects""" |
for offset, value in enumerate(self._config.get(config.TRANSFORMS, [])):
self._config[config.TRANSFORMS][offset] = self._import_class(value) |
<SYSTEM_TASK:>
Load in translations if they are set, and add the default locale as
<END_TASK>
<USER_TASK:>
Description:
def _prepare_translations(self):
"""Load in translations if they are set, and add the default locale as
well.
""" |
if config.TRANSLATIONS in self.paths:
LOGGER.info('Loading translations from %s',
self.paths[config.TRANSLATIONS])
from tornado import locale
locale.load_translations(self.paths[config.TRANSLATIONS])
if config.DEFAULT_LOCALE in self._config:
LOGGER.info('Setting default locale to %s',
self._config[config.DEFAULT_LOCALE])
locale.set_default_locale(self._config[config.DEFAULT_LOCALE]) |
<SYSTEM_TASK:>
Prepare the UI Modules from a list of namespaced paths.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_uimodules(self):
"""Prepare the UI Modules from a list of namespaced paths.""" |
for key, value in self._config.get(config.UI_MODULES, {}).iteritems():
self._config[config.UI_MODULES][key] = self._import_class(value)
self._config[config.UI_MODULES] = dict(self._config[config.UI_MODULES] or {}) |
<SYSTEM_TASK:>
Setup the application version
<END_TASK>
<USER_TASK:>
Description:
def _prepare_version(self):
"""Setup the application version""" |
if config.VERSION not in self._config:
self._config[config.VERSION] = __version__ |
<SYSTEM_TASK:>
Add an attribute value to our object instance.
<END_TASK>
<USER_TASK:>
Description:
def add(self, item, value):
"""Add an attribute value to our object instance.
:param str item: Application attribute name
:param any value: Value to associate with the attribute
:raises: AttributeError
""" |
if item in self.__dict__[self.ATTRIBUTES].keys():
raise AttributeError('%s already exists' % item)
setattr(self, item, value) |
<SYSTEM_TASK:>
Remove an attribute value to our object instance.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, item):
"""Remove an attribute value to our object instance.
:param str item: Application attribute name
:raises: AttributeError
""" |
if item not in self.__dict__[self.ATTRIBUTES].keys():
raise AttributeError('%s does not exist' % item)
delattr(self, item) |
<SYSTEM_TASK:>
Returns the total number of objects, across all pages.
<END_TASK>
<USER_TASK:>
Description:
def count(self):
"""
Returns the total number of objects, across all pages.
""" |
try:
return self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
return len(self.object_list) |
<SYSTEM_TASK:>
Returns the total number of pages.
<END_TASK>
<USER_TASK:>
Description:
def num_pages(self):
"""
Returns the total number of pages.
""" |
if self.count == 0 and not self.allow_empty_first_page:
return 0
hits = max(1, self.count - self.orphans)
return int(ceil(hits / float(self.per_page))) |
<SYSTEM_TASK:>
Turns an array-style response into a list of models.
<END_TASK>
<USER_TASK:>
Description:
def as_models(self, klass):
"""Turns an array-style response into a list of models.""" |
try:
return [klass(mod) for mod in self.tree.reply.data.results.array[0].item]
except AttributeError:
return [] |
<SYSTEM_TASK:>
Create and return the argument parser with all of the arguments
<END_TASK>
<USER_TASK:>
Description:
def _create_argument_parser(self):
"""Create and return the argument parser with all of the arguments
and configuration ready to go.
:rtype: argparse.ArgumentParser
""" |
parser = self._new_argument_parser()
self._add_base_arguments(parser)
self._add_required_arguments(parser)
return parser |
<SYSTEM_TASK:>
Load all modules automatically and find bases and eggs.
<END_TASK>
<USER_TASK:>
Description:
def autodiscover(cls,
module_paths: List[str],
subclass: 'Container' = None) -> None:
"""
Load all modules automatically and find bases and eggs.
:param module_paths: List of paths that should be discovered
:param subclass: Optional Container subclass that should be used
""" |
def find_base(bases: set, implementation: Type):
found = {b for b in bases if issubclass(implementation, b)}
if not found:
raise ConfigurationError(
"No base defined for %r" % implementation)
elif len(found) > 1:
raise ConfigurationError(
"More than one base found for %r" % implementation)
else:
return found.pop()
def walk(pkg: Union[str, ModuleType]) -> Dict[str, ModuleType]:
if isinstance(pkg, str):
pkg: ModuleType = importlib.import_module(pkg)
results = {}
try:
path = pkg.__path__
except AttributeError:
results[pkg.__name__] = importlib.import_module(pkg.__name__)
else:
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = pkg.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(walk(full_name))
return results
with cls._lock:
for module_path in module_paths:
walk(module_path)
config: List[Egg] = []
for egg_ in egg.factories:
base_ = find_base(base.classes, egg_.type_)
egg_.base_ = base_
config.append(egg_)
cls.configure(config, subclass=subclass) |
<SYSTEM_TASK:>
Get instance directly from the container.
<END_TASK>
<USER_TASK:>
Description:
def get_object(self, base_: Type, qualifier: str = None) -> Any:
"""
Get instance directly from the container.
If the qualifier is not None, proper method to create/retrieve instance
is used.
:param base_: `base` of this object
:param qualifier: optional qualifier
:return: object instance
""" |
egg_ = self._find_egg(base_, qualifier)
if egg_ is None:
raise UnknownDependency('Unknown dependency %s' % base_)
scope_id = getattr(egg_.egg, '__haps_custom_scope', INSTANCE_SCOPE)
try:
_scope = self.scopes[scope_id]
except KeyError:
raise UnknownScope('Unknown scopes with id %s' % scope_id)
else:
with self._lock:
return _scope.get_object(egg_.egg) |
<SYSTEM_TASK:>
Register new scopes which should be subclass of `Scope`
<END_TASK>
<USER_TASK:>
Description:
def register_scope(self, name: str, scope_class: Type[Scope]) -> None:
"""
Register new scopes which should be subclass of `Scope`
:param name: Name of new scopes
:param scope_class: Class of new scopes
""" |
with self._lock:
if name in self.scopes:
raise AlreadyConfigured(f'Scope {name} already registered')
self.scopes[name] = scope_class() |
<SYSTEM_TASK:>
Renders an article
<END_TASK>
<USER_TASK:>
Description:
def render_article(request, article, current_language, slug):
"""
Renders an article
""" |
context = {}
context['article'] = article
context['lang'] = current_language
context['current_article'] = article
context['has_change_permissions'] = article.has_change_permission(request)
response = TemplateResponse(request, article.template, context)
response.add_post_render_callback(set_page_cache)
# Add headers for X Frame Options - this really should be changed upon moving to class based views
xframe_options = article.tree.get_xframe_options()
# xframe_options can be None if there's no xframe information on the page
# (eg. a top-level page which has xframe options set to "inherit")
if xframe_options == Page.X_FRAME_OPTIONS_INHERIT or xframe_options is None:
# This is when we defer to django's own clickjacking handling
return response
# We want to prevent django setting this in their middlewear
response.xframe_options_exempt = True
if xframe_options == Page.X_FRAME_OPTIONS_ALLOW:
# Do nothing, allowed is no header.
return response
elif xframe_options == Page.X_FRAME_OPTIONS_SAMEORIGIN:
response['X-Frame-Options'] = 'SAMEORIGIN'
elif xframe_options == Page.X_FRAME_OPTIONS_DENY:
response['X-Frame-Options'] = 'DENY'
return response |
<SYSTEM_TASK:>
Create a new Action linked to this endpoint with the given args.
<END_TASK>
<USER_TASK:>
Description:
def new_action(self, method='GET', **kwargs):
"""
Create a new Action linked to this endpoint with the given args.
""" |
if method not in self.methods:
raise TypeError('{} not in valid method(s): {}.'.format(method, self.methods))
return Action(self, method, **kwargs) |
<SYSTEM_TASK:>
Add a single Action to the APIObject.
<END_TASK>
<USER_TASK:>
Description:
def add_action(self, name, parent, action):
"""
Add a single Action to the APIObject.
""" |
if iskeyword(name):
name = '_' + name
self._actions[name] = parent.new_action(**action)
setattr(self, name, self._actions[name].execute) |
<SYSTEM_TASK:>
Create a string describing the APIObject and its children
<END_TASK>
<USER_TASK:>
Description:
def printed_out(self, name):
"""
Create a string describing the APIObject and its children
""" |
out = ''
out += '|\n'
if self._id_variable:
subs = '[{}]'.format(self._id_variable)
else:
subs = ''
out += '|---{}{}\n'.format(name, subs)
if self._description:
out += '| | {}\n'.format(self._description)
for name, action in self._actions.items():
out += action.printed_out(name)
return out |
<SYSTEM_TASK:>
Create a string representation of the action
<END_TASK>
<USER_TASK:>
Description:
def printed_out(self, name):
"""
Create a string representation of the action
""" |
opt = self.variables().optional_namestring()
req = self.variables().required_namestring()
out = ''
out += '| |\n'
out += '| |---{}({}{})\n'.format(name, req, opt)
if self.description:
out += '| | {}\n'.format(self.description)
return out |
<SYSTEM_TASK:>
Open a local JSON hive file and initialize from the hive contained
<END_TASK>
<USER_TASK:>
Description:
def from_hive_file(cls, fname, *args, **kwargs):
"""
Open a local JSON hive file and initialize from the hive contained
in that file, paying attention to the version keyword argument.
""" |
version = kwargs.pop('version', None)
require = kwargs.pop('require_https', True)
return cls(Hive.from_file(fname, version, require), *args, **kwargs) |
<SYSTEM_TASK:>
Download a JSON hive file from a URL, and initialize from it,
<END_TASK>
<USER_TASK:>
Description:
def from_remote_hive(cls, url, *args, **kwargs):
"""
Download a JSON hive file from a URL, and initialize from it,
paying attention to the version keyword argument.
""" |
version = kwargs.pop('version', None)
require = kwargs.pop('require_https', False)
return cls(Hive.from_url(url, version, require), *args, **kwargs) |
<SYSTEM_TASK:>
Add an endpoint with the given name to the API.
<END_TASK>
<USER_TASK:>
Description:
def add_endpoint(self, name, **kwargs):
"""
Add an endpoint with the given name to the API.
""" |
self._endpoints[name] = Endpoint(self, **kwargs) |
<SYSTEM_TASK:>
Initialize an APIObject with the given name and make it available
<END_TASK>
<USER_TASK:>
Description:
def add_object(self, name, obj):
"""
Initialize an APIObject with the given name and make it available
using dot notation from the top-level namespace.
""" |
if iskeyword(name):
name = '_' + name
setattr(self, name, APIObject(self, **obj))
self._objects[name] = getattr(self, name) |
<SYSTEM_TASK:>
Remove an entry from the altair encoding dict
<END_TASK>
<USER_TASK:>
Description:
def raenc(self, key):
"""
Remove an entry from the altair encoding dict
""" |
if key in self.altair_encode:
del self.altair_encode[key]
else:
self.warning("Key " + key + " not found in Altair encoding dict") |
<SYSTEM_TASK:>
Get a line + text number chart
<END_TASK>
<USER_TASK:>
Description:
def _altair_line_num_(self, xfield, yfield, opts, style, encode):
"""
Get a line + text number chart
""" |
try:
c = self._altair_chart_num_("line", xfield,
yfield, opts, style, encode)
except Exception as e:
self.err(e, "Can not draw a line num chart")
return
return c |
<SYSTEM_TASK:>
Get a chart + text number chart
<END_TASK>
<USER_TASK:>
Description:
def _altair_chart_num_(self, chart_type, xfield, yfield, opts, style2, encode):
"""
Get a chart + text number chart
""" |
style = {**style2}
text_color = "grey"
if "text_color" in style:
text_color = style["text_color"]
del style["text_color"]
if "text_color" in self.chart_style:
del self.chart_style["text_color"]
if chart_type == "line":
c = Chart(self.df).mark_line(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "bar":
c = Chart(self.df).mark_bar(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "point":
c = Chart(self.df).mark_point(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
encoder = encode
if "text" not in encoder:
encoder["text"] = yfield
if "align" not in style:
style["align"] = "center"
if "dy" not in style:
style["dy"] = -5
if "dx" not in style and chart_type != "bar":
style["dx"] = 8
if "size" in style:
del style["size"]
style["color"] = text_color
df2 = self.df.replace({yfield.split(":")[0]: {0: self.nan}})
num = Chart(df2).mark_text(**style).encode(x=xfield, \
y=yfield, **encoder).properties(**opts)
return c + num |
<SYSTEM_TASK:>
Get a mean line chart
<END_TASK>
<USER_TASK:>
Description:
def _altair_hline_(self, xfield, yfield, opts, style, encode):
"""
Get a mean line chart
""" |
try:
rawy = yfield
if ":" in yfield:
rawy = yfield.split(":")[0]
mean = self.df[rawy].mean()
l = []
i = 0
while i < len(self.df[rawy]):
l.append(mean)
i += 1
self.df["Mean"] = l
chart = Chart(self.df).mark_line(**style).encode(x=xfield, \
y="Mean", **encode).properties(**opts)
self.drop("Mean")
return chart
except Exception as e:
self.err(e, "Can not draw mean line chart") |
<SYSTEM_TASK:>
Get an Altair chart object
<END_TASK>
<USER_TASK:>
Description:
def _get_altair_chart(self, xfield, yfield, chart_type,
label, opts={}, style={}, **kwargs):
"""
Get an Altair chart object
""" |
encode = self.altair_encode
chart = None
if chart_type == "bar":
chart = Chart(self.df).mark_bar(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "circle":
chart = Chart(self.df).mark_circle(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "line":
chart = Chart(self.df).mark_line(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "hline":
chart = self._altair_hline_(xfield, yfield, opts, style, encode)
elif chart_type == "line_num":
chart = self._altair_line_num_(xfield, yfield, opts, style, encode)
elif chart_type == "bar_num":
chart = self._altair_bar_num_(xfield, yfield, opts, style, encode)
elif chart_type == "point_num":
chart = self._altair_point_num_(xfield, yfield, opts, style, encode)
elif chart_type == "point":
chart = Chart(self.df).mark_point(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "area":
chart = Chart(self.df).mark_area(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "heatmap":
chart = Chart(self.df).mark_rect(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "text":
chart = Chart(self.df).mark_text(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "square":
chart = Chart(self.df).mark_square(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "tick":
chart = Chart(self.df).mark_tick(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
elif chart_type == "rule":
chart = Chart(self.df).mark_rule(**style).encode(x=xfield, \
y=yfield, **encode).properties(**opts)
return chart |
<SYSTEM_TASK:>
Parses integers in bases 10 and 16 and floats.
<END_TASK>
<USER_TASK:>
Description:
def parse_number(self, s):
"""Parses integers in bases 10 and 16 and floats.""" |
start = 1 if s[0] in ["-","+"] else 0
all_digits = lambda x: all(map(lambda c: c.isdigit(), x))
ishex = lambda c: c.isdigit() or ord(c.lower()) in range(ord("a"), ord("f"))
all_hex = lambda x: all(map(ishex, x))
if all_digits(s[start:]):
try:
return (Tokenizer.INTEGER, int(s))
except ValueError:
raise ParseError("%d:%d: Invalid integer '%s'" % (self.lineno,
self.column, s))
if s[start:].startswith("0x") and all_hex(s[start+2:]):
try:
return (Tokenizer.INTEGER, int(s, base=16))
except ValueError:
raise ParseError("%d:%d: Invalid hexadecimal integer '%s'" %
(self.lineno, self.column, s))
if any(map(lambda c: c==".", s)) or any(map(lambda c: c=="e", s)):
try:
return (Tokenizer.FLOAT, float(s))
except ValueError:
raise ParseError("%d:%d: Invalid float '%s'" % (self.lineno,
self.column, s))
raise ParseError("%d:%d: Invalid number '%s'" % (self.lineno,
self.column, s)) |
<SYSTEM_TASK:>
Breaks a stream up into tokens.
<END_TASK>
<USER_TASK:>
Description:
def tokenize(self):
"""Breaks a stream up into tokens.
Yields tuples of the form (line_number, column, Tokenizer.TOKEN).
""" |
def readlines(s):
while True:
line = s.readline()
if line != "":
yield line.rstrip()
else:
break
for self.lineno, line in enumerate(readlines(self.stream), 1):
for self.column, part in self.split(line):
if part[0] == "#": # COMMENT
break
yield (self.lineno, self.column, self.tokentype(part)) |
<SYSTEM_TASK:>
Runner for haps application.
<END_TASK>
<USER_TASK:>
Description:
def run(app_class: Type[Application],
extra_module_paths: List[str] = None, **kwargs: Any) -> None:
"""
Runner for haps application.
:param app_class: :class:`~haps.application.Application` type
:param extra_module_paths: Extra modules list to autodiscover
:param kwargs: Extra arguments are passed to\
:func:`~haps.Container.autodiscover`
""" |
module = app_class.__module__
if (module == '__main__' and
extra_module_paths is None and
'module_paths' not in kwargs):
raise ConfigurationError(
'You cannot run application from __main__ module without '
'providing module_paths')
if module != '__main__':
module_paths = [app_class.__module__]
else:
module_paths = []
if extra_module_paths is not None:
module_paths.extend(extra_module_paths)
autodiscover_kwargs = {
'module_paths': module_paths,
}
autodiscover_kwargs.update(kwargs)
app_class.configure(Configuration())
Container.autodiscover(**autodiscover_kwargs)
app = app_class()
app.run() |
<SYSTEM_TASK:>
Given a dict in python-zimbra format or XML, generate
<END_TASK>
<USER_TASK:>
Description:
def from_dict(cls, d):
""" Given a dict in python-zimbra format or XML, generate
a Python object.
""" |
if type(d) != dict:
raise TypeError('Expecting a <dict>, got a {0}'.format(type(d)))
obj = cls()
obj._full_data = d
# import attributes
obj._import_attributes(d)
# import <a> child tags as dict items, see __getitem__()
obj._a_tags = obj._parse_a_tags(d)
return obj |
<SYSTEM_TASK:>
Returns a property value
<END_TASK>
<USER_TASK:>
Description:
def property(self, property_name, default=Ellipsis):
""" Returns a property value
:param: default will return that value if the property is not found,
else, will raise a KeyError.
""" |
try:
return self._a_tags[property_name]
except KeyError:
if default != Ellipsis:
return default
else:
raise |
<SYSTEM_TASK:>
Iterates over the dictionary
<END_TASK>
<USER_TASK:>
Description:
def _unparse_a_tags(cls, attrs_dict):
""" Iterates over the dictionary
:param: attrs_dict a dict of attributes
:returns: a SimpleXMLElement list containing <a> tags
""" |
prop_tags = []
for k, v in attrs_dict.items():
node = {cls.ATTRNAME_PROPERTY: k, '_content': utils.auto_type(v)}
prop_tags.append(node)
return prop_tags |
<SYSTEM_TASK:>
Returns the dict suitable for CreateIdentity or ModifyIdentity
<END_TASK>
<USER_TASK:>
Description:
def to_creator(self):
""" Returns the dict suitable for CreateIdentity or ModifyIdentity
""" |
o = {}
for prop in ('name', 'id'):
if hasattr(self, prop):
o[prop] = getattr(self, prop)
try:
if len(self.a) > 0:
o['a'] = []
for node in self._unparse_a_tags(self._a_tags):
o['a'].append(node)
except AttributeError:
pass
return o |
<SYSTEM_TASK:>
Override default, adding the capture of members.
<END_TASK>
<USER_TASK:>
Description:
def from_dict(cls, d):
""" Override default, adding the capture of members.
""" |
o = super(DistributionList, cls).from_dict(d)
o.members = []
if 'dlm' in d:
o.members = [utils.get_content(member)
for member in utils.as_list(d["dlm"])]
return o |
<SYSTEM_TASK:>
Override default, adding the capture of content and contenttype.
<END_TASK>
<USER_TASK:>
Description:
def from_dict(cls, d):
""" Override default, adding the capture of content and contenttype.
""" |
o = super(Signature, cls).from_dict(d)
if 'content' in d:
# Sometimes, several contents, (one txt, other html), take last
try:
o._content = d['content']['_content']
o._contenttype = d['content']['type']
except TypeError:
o._content = d['content'][-1]['_content']
o._contenttype = d['content'][-1]['type']
return o |
<SYSTEM_TASK:>
Returns a dict object suitable for a 'CreateSignature'.
<END_TASK>
<USER_TASK:>
Description:
def to_creator(self, for_modify=False):
""" Returns a dict object suitable for a 'CreateSignature'.
A signature object for creation is like :
<signature name="unittest">
<content type="text/plain">My signature content</content>
</signature>
which is :
{
'name' : 'unittest',
'content': {
'type': 'text/plain',
'_content': 'My signature content'
}
}
Note that if the contenttype is text/plain, the content with text/html
will be cleared by the request (for consistency).
""" |
signature = {}
if for_modify:
try:
# we should have an ID
signature['id'] = self.id
except AttributeError:
raise AttributeError('a modify request should specify an ID')
# Case where we change or set a name
if hasattr(self, 'name'):
signature['name'] = self.name
else:
# a new signature should have a name
signature['name'] = self.name
if self.has_content():
# Set one, flush the other (otherwise, we let relief behind...)
if self._contenttype == 'text/plain':
plain_text = self._content
html_text = ''
else:
html_text = self._content
plain_text = ''
content_plain = {'type': 'text/plain', '_content': plain_text}
content_html = {'type': 'text/html', '_content': html_text}
signature['content'] = [content_plain, content_html]
else:
# A creation request should have a content
if not for_modify:
raise AttributeError(
'too little information on signature, '
'run setContent before')
return signature |
<SYSTEM_TASK:>
Read list of authors from a text file, filtering comments.
<END_TASK>
<USER_TASK:>
Description:
def get_authors():
"""Read list of authors from a text file, filtering comments.""" |
authors = []
authorfile = os.path.join('doc', 'authors.txt')
with codecs.open(authorfile, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if line and not line.startswith(u'#'):
authors.append(line)
return u", ".join(authors) |
<SYSTEM_TASK:>
Restore the main dataframe
<END_TASK>
<USER_TASK:>
Description:
def restore(self):
"""
Restore the main dataframe
""" |
if self.backup_df is None:
self.warning("No dataframe is backed up: nothing restore")
return
self.df = self.backup_df
self.ok("Dataframe is restored") |
<SYSTEM_TASK:>
Load data in the main dataframe from json
<END_TASK>
<USER_TASK:>
Description:
def load_json(self, path, **kwargs):
"""Load data in the main dataframe from json
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:param kwargs: keyword arguments to pass to
Pandas ``read_json`` function
:example: ``ds.load_json("./myfile.json")``
""" |
try:
df = pd.read_json(path, **kwargs)
self.df = df
except Exception as e:
self.err(e, "Can not load json") |
<SYSTEM_TASK:>
Load a Hdf5 file to the main dataframe
<END_TASK>
<USER_TASK:>
Description:
def load_h5(self, filepath):
"""Load a Hdf5 file to the main dataframe
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:example: ``ds.load_h5("./myfile.hdf5")``
""" |
try:
self.start("Loading Hdf5 data...")
self.df = dd.io.load(filepath)
self.end("Finished loading Hdf5 data")
except Exception as e:
self.err(e, "Can not load Hdf5 file") |
<SYSTEM_TASK:>
Set the main dataframe with the content of an Excel file
<END_TASK>
<USER_TASK:>
Description:
def load_excel(self, filepath, **kwargs):
"""Set the main dataframe with the content of an Excel file
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:param kwargs: keyword arguments to pass to
Pandas ``read_excel`` function
:example: ``ds.load_excel("./myfile.xlsx")``
""" |
try:
df = pd.read_excel(filepath, **kwargs)
if len(df.index) == 0:
self.warning("Empty Excel file. Can not set the dataframe.")
return
self.df = df
except Exception as e:
self.err(e, "Can not load Excel file") |
<SYSTEM_TASK:>
Loads csv data in the main dataframe
<END_TASK>
<USER_TASK:>
Description:
def load_csv(self, url, **kwargs):
"""Loads csv data in the main dataframe
:param url: url of the csv file to load:
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type url: str
:param kwargs: keyword arguments to pass to Pandas
``read_csv`` function
:example: ``ds.load_csv("./myfile.csv")``
""" |
self.start("Loading csv...")
try:
if self.datapath is not None and url.startswith("/") is False:
url = self.datapath + "/" + url
df = pd.read_csv(url, **kwargs)
self.df = df
except FileNotFoundError:
msg = "File " + url + " not found"
self.warning(msg)
return
except Exception as e:
self.err(e, "Can not load csv file")
return
self.end("Finished loading csv") |
<SYSTEM_TASK:>
Perform the authentication redirect to GitHub
<END_TASK>
<USER_TASK:>
Description:
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None, callback=None):
"""Perform the authentication redirect to GitHub
""" |
self.require_setting(self._CLIENT_ID_SETTING, self._API_NAME)
scope = self._BASE_SCOPE
if extended_permissions:
scope += extended_permissions
args = {'client_id': self.settings[self._CLIENT_ID_SETTING],
'redirect_uri': self.oauth2_redirect_uri(callback_uri),
'scope': ','.join(scope)}
# If cookie_secret is set, use it for GitHub's state value
if not self.state and 'cookie_secret' in self.settings:
sha1 = hashlib.sha1(self.settings['cookie_secret'])
self.state = str(sha1.hexdigest())
# If state is set, add it to args
if self.state:
args['state'] = self.state
LOGGER.info('Redirect args: %r', args)
# Redirect the user to the proper URL
self.redirect(self._OAUTH_AUTHORIZE_URL +
auth.urllib_parse.urlencode(args))
callback() |
<SYSTEM_TASK:>
Fetches the authenticated user
<END_TASK>
<USER_TASK:>
Description:
def get_authenticated_user(self, callback):
""" Fetches the authenticated user
:param method callback: The callback method to invoke
""" |
self.require_setting(self._CLIENT_ID_SETTING, self._API_NAME)
self.require_setting(self._CLIENT_SECRET_SETTING, self._API_NAME)
if self.state:
if (not self.get_argument('state', None) or
self.state != self.get_argument('state')):
LOGGER.error('State did not match: %s != %s',
self.state, self.get_argument('state'))
raise auth.AuthError('Problematic Reply from %s' %
self._API_NAME)
args = {'client_id': self.settings[self._CLIENT_ID_SETTING],
'client_secret': self.settings[self._CLIENT_SECRET_SETTING],
'code': self.get_argument('code'),
'redirect_uri': self.oauth2_redirect_uri()}
http_client = self._get_auth_http_client()
callback = self.async_callback(self._on_access_token, callback)
http_client.fetch(self._OAUTH_ACCESS_TOKEN_URL,
method='POST',
headers={'Accept': self._ACCEPT},
user_agent=self._USER_AGENT,
body=auth.urllib_parse.urlencode(args),
callback=callback) |
<SYSTEM_TASK:>
Invoked as a callback when GitHub has returned a response to the
<END_TASK>
<USER_TASK:>
Description:
def _on_access_token(self, future, response):
"""Invoked as a callback when GitHub has returned a response to the
access token request.
:param method future: The callback method to pass along
:param tornado.httpclient.HTTPResponse response: The HTTP response
""" |
content = escape.json_decode(response.body)
if 'error' in content:
LOGGER.error('Error fetching access token: %s', content['error'])
future.set_exception(auth.AuthError('Github auth error: %s' %
str(content['error'])))
return
callback = self.async_callback(self._on_github_user, future,
content['access_token'])
self.github_request('user', callback, content['access_token']) |
<SYSTEM_TASK:>
Invoked as a callback when self.github_request returns the response
<END_TASK>
<USER_TASK:>
Description:
def _on_github_user(self, future, access_token, response):
"""Invoked as a callback when self.github_request returns the response
to the request for user data.
:param method future: The callback method to pass along
:param str access_token: The access token for the user's use
:param dict response: The HTTP response already decoded
""" |
response['access_token'] = access_token
future.set_result(response) |
<SYSTEM_TASK:>
Make a request to the GitHub API, passing in the path, a callback,
<END_TASK>
<USER_TASK:>
Description:
def github_request(self, path, callback, access_token=None,
post_args=None, **kwargs):
"""Make a request to the GitHub API, passing in the path, a callback,
the access token, optional post arguments and keyword arguments to be
added as values in the request body or URI
""" |
url = self._API_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(kwargs)
if all_args:
url += "?" + auth.urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_github_request, callback)
http = self._get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST",
user_agent='Tinman/Tornado',
body=auth.urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, user_agent='Tinman/Tornado', callback=callback) |
<SYSTEM_TASK:>
Invoked as a response to the GitHub API request. Will decode the
<END_TASK>
<USER_TASK:>
Description:
def _on_github_request(self, future, response):
"""Invoked as a response to the GitHub API request. Will decode the
response and set the result for the future to return the callback or
raise an exception
""" |
try:
content = escape.json_decode(response.body)
except ValueError as error:
future.set_exception(Exception('Github error: %s' %
response.body))
return
if 'error' in content:
future.set_exception(Exception('Github error: %s' %
str(content['error'])))
return
future.set_result(content) |
<SYSTEM_TASK:>
Invoked as a callback when StackExchange has returned a response to
<END_TASK>
<USER_TASK:>
Description:
def _on_access_token(self, future, response):
"""Invoked as a callback when StackExchange has returned a response to
the access token request.
:param method future: The callback method to pass along
:param tornado.httpclient.HTTPResponse response: The HTTP response
""" |
LOGGER.info(response.body)
content = escape.json_decode(response.body)
if 'error' in content:
LOGGER.error('Error fetching access token: %s', content['error'])
future.set_exception(auth.AuthError('StackExchange auth error: %s' %
str(content['error'])))
return
callback = self.async_callback(self._on_stackexchange_user, future,
content['access_token'])
self.stackexchange_request('me', callback, content['access_token']) |
<SYSTEM_TASK:>
Invoked as a callback when self.stackexchange_request returns the
<END_TASK>
<USER_TASK:>
Description:
def _on_stackexchange_user(self, future, access_token, response):
"""Invoked as a callback when self.stackexchange_request returns the
response to the request for user data.
:param method future: The callback method to pass along
:param str access_token: The access token for the user's use
:param dict response: The HTTP response already decoded
""" |
response['access_token'] = access_token
future.set_result(response) |
<SYSTEM_TASK:>
Make a request to the StackExchange API, passing in the path, a
<END_TASK>
<USER_TASK:>
Description:
def stackexchange_request(self, path, callback, access_token=None,
post_args=None, **kwargs):
"""Make a request to the StackExchange API, passing in the path, a
callback, the access token, optional post arguments and keyword
arguments to be added as values in the request body or URI
""" |
url = self._API_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(kwargs)
if all_args:
url += "?" + auth.urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_stackexchange_request, callback)
http = self._get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST",
body=auth.urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback) |
<SYSTEM_TASK:>
Invoked as a response to the StackExchange API request. Will decode
<END_TASK>
<USER_TASK:>
Description:
def _on_stackexchange_request(self, future, response):
"""Invoked as a response to the StackExchange API request. Will decode
the response and set the result for the future to return the callback or
raise an exception
""" |
content = escape.json_decode(response.body)
if 'error' in content:
future.set_exception(Exception('StackExchange error: %s' %
str(content['error'])))
return
future.set_result(content) |
<SYSTEM_TASK:>
You may want to override this method if you want to add custom filtering to an ViewSet while still
<END_TASK>
<USER_TASK:>
Description:
def filter_query(self, request, query, view):
"""
You may want to override this method if you want to add custom filtering to an ViewSet while still
utilizing the feature of the ``AttributeFilter`` implementation.
:param request: The pyramid ``Request`` instance.
:param query: The SQLAlchemy ``Query`` instance.
:param view: An instance of the view class that the filter has been applied to.
:return: The filtered query.
""" |
if not request.params:
return query
querystring_params = self.parse_query_string(request.params)
query, filter_list = self.build_filter_list(querystring_params, query, view)
return self.apply_filter(query, filter_list) |
<SYSTEM_TASK:>
Multilingual flat page view.
<END_TASK>
<USER_TASK:>
Description:
def multilingual_flatpage(request, url):
"""
Multilingual flat page view.
Models: `multilingual.flatpages.models`
Templates: Uses the template defined by the ``template_name`` field,
or `flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
""" |
if not url.endswith('/') and settings.APPEND_SLASH:
return HttpResponseRedirect("%s/" % request.path)
if not url.startswith('/'):
url = "/" + url
f = get_object_or_404(MultilingualFlatPage, url__exact=url, sites__id__exact=settings.SITE_ID)
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
# Serve the content in the language defined by the Django translation module
# if possible else serve the default language.
f._default_language = get_language()
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
populate_xheaders(request, response, MultilingualFlatPage, f.id)
return response |
<SYSTEM_TASK:>
Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, raw_X, y=None):
"""Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models.
Parameters
----------
raw_X : List of (sequence1_n, sequence2_n) pairs, one for each training example n.
y : (ignored)
Returns
-------
X : List of numpy ndarrays, each with shape = (I_n, J_n, K), where I_n is the length of sequence1_n, J_n is the
length of sequence2_n, and K is the number of features.
Feature matrix list, for use with estimators or further transformers.
""" |
return [self._extract_features(sequence1, sequence2) for sequence1, sequence2 in raw_X] |
<SYSTEM_TASK:>
Helper to extract features for one data point.
<END_TASK>
<USER_TASK:>
Description:
def _extract_features(self, sequence1, sequence2):
""" Helper to extract features for one data point. """ |
array1 = np.array(tuple(sequence1), ndmin=2).T
array2 = np.array(tuple(sequence2), ndmin=2)
K = (len(self._binary_features)
+ sum(num_feats for _, num_feats in self._sparse_features))
feature_array = np.zeros((array1.size, array2.size, K), dtype='float64')
for k, feature_function in enumerate(self._binary_features):
feature_array[..., k] = feature_function(array1, array2)
if self._sparse_features:
n_binary_features = len(self._binary_features)
for i, j in np.ndindex(len(sequence1), len(sequence2)):
k = n_binary_features
for feature_function, num_features in self._sparse_features:
feature_array[i, j, k + feature_function(i, j, sequence1, sequence2)] = 1.0
k += num_features
return feature_array |
<SYSTEM_TASK:>
Prints a message with an ok prefix
<END_TASK>
<USER_TASK:>
Description:
def ok(self, *msg):
"""
Prints a message with an ok prefix
""" |
label = colors.green("OK")
self._msg(label, *msg) |
<SYSTEM_TASK:>
Prints a message with an info prefix
<END_TASK>
<USER_TASK:>
Description:
def info(self, *msg):
"""
Prints a message with an info prefix
""" |
label = colors.blue("INFO")
self._msg(label, *msg) |
<SYSTEM_TASK:>
Prints a progress message
<END_TASK>
<USER_TASK:>
Description:
def progress(self, *msg):
"""
Prints a progress message
""" |
label = colors.purple("Progress")
self._msg(label, *msg) |
<SYSTEM_TASK:>
Prints an start message
<END_TASK>
<USER_TASK:>
Description:
def start(self, *msg):
"""
Prints an start message
""" |
self.start_time = datetime.datetime.now()
label = colors.purple("START")
self._msg(label, *msg) |
<SYSTEM_TASK:>
Prints an end message with elapsed time
<END_TASK>
<USER_TASK:>
Description:
def end(self, *msg):
"""
Prints an end message with elapsed time
""" |
if self.start_time is None:
self.err("No start time set: please use start() "
"before using this function")
endtime = datetime.datetime.now()
rd = dateutil.relativedelta.relativedelta(endtime, self.start_time)
endmsg = self._endmsg(rd)
label = colors.purple("END")
msg += ("in " + endmsg,)
self._msg(label, *msg)
self.start_time = None |
<SYSTEM_TASK:>
Returns a message with a label
<END_TASK>
<USER_TASK:>
Description:
def msg_(self, label, *msg):
"""
Returns a message with a label
""" |
txt = self._unpack_msg(*msg)
return "[" + label + "] " + txt |
<SYSTEM_TASK:>
Prints a message with a label
<END_TASK>
<USER_TASK:>
Description:
def _msg(self, label, *msg):
"""
Prints a message with a label
""" |
if self.quiet is False:
txt = self._unpack_msg(*msg)
print("[" + label + "] " + txt) |
<SYSTEM_TASK:>
Convert all message elements to string
<END_TASK>
<USER_TASK:>
Description:
def _unpack_msg(self, *msg):
"""
Convert all message elements to string
""" |
l = []
for m in msg:
l.append(str(m))
return " ".join(l) |
<SYSTEM_TASK:>
Returns an end message with elapsed time
<END_TASK>
<USER_TASK:>
Description:
def _endmsg(self, rd):
"""
Returns an end message with elapsed time
""" |
msg = ""
s = ""
if rd.hours > 0:
if rd.hours > 1:
s = "s"
msg += colors.bold(str(rd.hours)) + " hour" + s + " "
s = ""
if rd.minutes > 0:
if rd.minutes > 1:
s = "s"
msg += colors.bold(str(rd.minutes)) + " minute" + s + " "
# if rd.seconds > 0:
# msg+=str(rd.seconds)
# else:
# msg+="0."
milliseconds = int(rd.microseconds / 1000)
if milliseconds > 0:
msg += colors.bold(str(rd.seconds) + "." + str(milliseconds))
msg += " seconds"
return msg |
<SYSTEM_TASK:>
Performs optimizations on already parsed code.
<END_TASK>
<USER_TASK:>
Description:
def optimized(code, silent=True, ignore_errors=True):
"""Performs optimizations on already parsed code.""" |
return constant_fold(code, silent=silent, ignore_errors=ignore_errors) |
<SYSTEM_TASK:>
Adds the JOINS and SELECTS for fetching multilingual data.
<END_TASK>
<USER_TASK:>
Description:
def pre_sql_setup(self):
"""
Adds the JOINS and SELECTS for fetching multilingual data.
""" |
super(MultilingualSQLCompiler, self).pre_sql_setup()
if not self.query.include_translation_data:
return
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if hasattr(opts, 'translation_model'):
master_table_name = self.query.join((None, opts.db_table, None, None))
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
for language_code in get_language_code_list():
table_alias = get_translation_table_alias(trans_table_name,
language_code)
trans_join = ("LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_code = '%s'))"
% (qn2(translation_opts.db_table),
qn2(table_alias),
qn2(table_alias),
qn(master_table_name),
qn2(opts.pk.column),
qn2(table_alias),
language_code))
self.query.extra_join[table_alias] = trans_join |
<SYSTEM_TASK:>
Add the JOINS for related multilingual fields filtering.
<END_TASK>
<USER_TASK:>
Description:
def get_from_clause(self):
"""
Add the JOINS for related multilingual fields filtering.
""" |
result = super(MultilingualSQLCompiler, self).get_from_clause()
if not self.query.include_translation_data:
return result
from_ = result[0]
for join in self.query.extra_join.values():
from_.append(join)
return (from_, result[1]) |
<SYSTEM_TASK:>
Filter objects which don't have a value in this language
<END_TASK>
<USER_TASK:>
Description:
def queryset(self, request):
"""
Filter objects which don't have a value in this language
""" |
qs = super(MultilingualInlineAdmin, self).queryset(request)
# Don't now what the hell I was thinking here, but this code breaks stuff:
#
# checkfield = self.get_fill_check_field()
# if checkfield is not None:
# kwargs = {str('%s_%s__isnull' % (checkfield, GLL.language_code)): False}
# from django.db.models.fields import CharField
# if isinstance(self.model._meta.translation_model._meta.get_field_by_name(checkfield)[0], CharField):
# kwargs[str('%s_%s__gt' % (checkfield, GLL.language_code))] = ''
# return qs.filter(**kwargs)
return qs.filter(translations__language_code=GLL.language_code).distinct() |
<SYSTEM_TASK:>
This is only used on models which use placeholders from the django-cms
<END_TASK>
<USER_TASK:>
Description:
def placeholder_plugin_filter(self, request, queryset):
"""
This is only used on models which use placeholders from the django-cms
""" |
if not request:
return queryset
if GLL.is_active:
return queryset.filter(language=GLL.language_code)
return queryset |
<SYSTEM_TASK:>
Concatenate dataswim instances from and
<END_TASK>
<USER_TASK:>
Description:
def concat(self, *dss, **kwargs):
"""
Concatenate dataswim instances from and
set it to the main dataframe
:param dss: dataswim instances to concatenate
:type dss: Ds
:param kwargs: keyword arguments for ``pd.concat``
""" |
try:
df = pd.DataFrame()
for dsx in dss:
df = pd.concat([df, dsx.df], **kwargs)
self.df = df
except Exception as e:
self.err(e, "Can not concatenate data") |
<SYSTEM_TASK:>
Split the main dataframe according to a column's unique values and
<END_TASK>
<USER_TASK:>
Description:
def split_(self, col: str) -> "list(Ds)":
"""
Split the main dataframe according to a column's unique values and
return a dict of dataswim instances
:return: list of dataswim instances
:rtype: list(Ds)
:example: ``dss = ds.slit_("Col 1")``
""" |
try:
dss = {}
unique = self.df[col].unique()
for key in unique:
df2 = self.df.loc[self.df[col] == key]
ds2 = self._duplicate_(df2)
dss[key] = ds2
return dss
except Exception as e:
self.err(e, "Can not split dataframe") |
<SYSTEM_TASK:>
Set the main dataframe from the current dataframe and the passed
<END_TASK>
<USER_TASK:>
Description:
def merge(self, df: pd.DataFrame, on: str, how: str="outer", **kwargs):
"""
Set the main dataframe from the current dataframe and the passed
dataframe
:param df: the pandas dataframe to merge
:type df: pd.DataFrame
:param on: param for ``pd.merge``
:type on: str
:param how: param for ``pd.merge``, defaults to "outer"
:type how: str, optional
:param kwargs: keyword arguments for ``pd.merge``
""" |
try:
df = pd.merge(self.df, df, on=on, how=how, **kwargs)
self.df = df
except Exception as e:
self.err(e, self.merge, "Can not merge dataframes") |
<SYSTEM_TASK:>
Query the server for a list of caches, parse the JSON response, and
<END_TASK>
<USER_TASK:>
Description:
def caches(self, options={}):
"""Query the server for a list of caches, parse the JSON response, and
return the result.
Keyword arguments:
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#list_caches for more
information on defaults and possible values.
""" |
query = urllib.urlencode(options)
url = "caches"
if query != "":
url = "%s?%s" % (url, query)
result = self.client.get(url)
return [cache["name"] for cache in result["body"]] |
<SYSTEM_TASK:>
Query the server for an item, parse the JSON, and return the result.
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, cache=None):
"""Query the server for an item, parse the JSON, and return the result.
Keyword arguments:
key -- the key of the item that you'd like to retrieve. Required.
cache -- the name of the cache that the item resides in. Defaults to
None, which uses self.name. If no name is set, raises a
ValueError.
""" |
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
cache = quote_plus(cache)
key = quote_plus(key)
url = "caches/%s/items/%s" % (cache, key)
result = self.client.get(url)
return Item(values=result["body"]) |
<SYSTEM_TASK:>
Query the server to set the key specified to the value specified in
<END_TASK>
<USER_TASK:>
Description:
def put(self, key, value, cache=None, options={}):
"""Query the server to set the key specified to the value specified in
the specified cache.
Keyword arguments:
key -- the name of the key to be set. Required.
value -- the value to set key to. Must be a string or JSON
serialisable. Required.
cache -- the cache to store the item in. Defaults to None, which uses
self.name. If no name is set, raises a ValueError.
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#put_item for more
information on defaults and possible values.
""" |
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
if not isinstance(value, str_type) and not isinstance(value, int_types):
value = json.dumps(value)
options["value"] = value
body = json.dumps(options)
cache = quote_plus(cache)
key = quote_plus(key)
result = self.client.put("caches/%s/items/%s" % (cache, key), body,
{"Content-Type": "application/json"})
return Item(cache=cache, key=key, value=value) |
<SYSTEM_TASK:>
Query the server to delete the key specified from the cache
<END_TASK>
<USER_TASK:>
Description:
def delete(self, key, cache=None):
"""Query the server to delete the key specified from the cache
specified.
Keyword arguments:
key -- the key the item is stored under. Required.
cache -- the cache to delete the item from. Defaults to None, which
uses self.name. If no name is set, raises a ValueError.
""" |
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
cache = quote_plus(cache)
key = quote_plus(key)
self.client.delete("caches/%s/items/%s" % (cache, key))
return True |
<SYSTEM_TASK:>
Query the server to increment the value of the key by the specified
<END_TASK>
<USER_TASK:>
Description:
def increment(self, key, cache=None, amount=1):
"""Query the server to increment the value of the key by the specified
amount. Negative amounts can be used to decrement.
Keyword arguments:
key -- the key the item is stored under. Required.
cache -- the cache the item belongs to. Defaults to None, which uses
self.name. If no name is set, raises a ValueError.
amount -- the amount to increment the value by. Can be negative to
decrement the value. Defaults to 1.
""" |
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
cache = quote_plus(cache)
key = quote_plus(key)
body = json.dumps({"amount": amount})
result = self.client.post("caches/%s/items/%s/increment" % (cache,
key), body, {"Content-Type": "application/json"})
result = result["body"]
return Item(values=result, cache=cache, key=key) |
<SYSTEM_TASK:>
A convenience function for passing negative values to increment.
<END_TASK>
<USER_TASK:>
Description:
def decrement(self, key, cache=None, amount=1):
"""A convenience function for passing negative values to increment.
Keyword arguments:
key -- the key the item is stored under. Required.
cache -- the cache the item belongs to. Defaults to None, which uses
self.name. If no name is set, raises a ValueError.
amount -- the amount to decrement the value by. Can be negative to
increment the value. Defaults to 1.
""" |
amount = amount * -1
return self.increment(key=key, cache=cache, amount=amount) |
<SYSTEM_TASK:>
Dumps a representation of the Model on standard output.
<END_TASK>
<USER_TASK:>
Description:
def dump(self, *args, **kwargs):
"""Dumps a representation of the Model on standard output.""" |
lxml.etree.dump(self._obj, *args, **kwargs) |
<SYSTEM_TASK:>
Get start URL by "bouncing" back and forth one time.
<END_TASK>
<USER_TASK:>
Description:
def bounceStarter(url, nextSearch):
"""Get start URL by "bouncing" back and forth one time.""" |
@classmethod
def _starter(cls):
"""Get bounced start URL."""
data = cls.getPage(url)
url1 = cls.fetchUrl(url, data, cls.prevSearch)
data = cls.getPage(url1)
return cls.fetchUrl(url1, data, nextSearch)
return _starter |
<SYSTEM_TASK:>
Return the availability of one or more domain names.
<END_TASK>
<USER_TASK:>
Description:
def check_domain_request(self, domains):
"""
Return the availability of one or more domain names.
The availability is a model containing a domain and a status. It can also have a premium
attribute in case the domain has non-default costs.
""" |
request = E.checkDomainRequest(
E.domains(
E.array(
*[E.item(
E.name(domain.split(".")[0]),
E.extension(domain.split(".")[1])
) for domain in domains]
)
)
)
response = self.request(request)
return [Model(item) for item in response.data.array.item] |
<SYSTEM_TASK:>
Returns a DataSwim instance with a column filled from a relation foreign key
<END_TASK>
<USER_TASK:>
Description:
def relation_(self, table, origin_field, search_field, destination_field=None,
id_field="id"):
"""
Returns a DataSwim instance with a column filled from a relation foreign key
""" |
df = self._relation(table, origin_field,
search_field, destination_field, id_field)
return self._duplicate_(df) |
<SYSTEM_TASK:>
Clean pre-existing links in output directory.
<END_TASK>
<USER_TASK:>
Description:
def prepare_output(d):
"""Clean pre-existing links in output directory.""" |
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir |
<SYSTEM_TASK:>
Create new symbolic links in output directory.
<END_TASK>
<USER_TASK:>
Description:
def create_symlinks(d):
"""Create new symbolic links in output directory.""" |
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img))) |
<SYSTEM_TASK:>
Parses source code returns an array of instructions suitable for
<END_TASK>
<USER_TASK:>
Description:
def parse(source):
"""Parses source code returns an array of instructions suitable for
optimization and execution by a Machine.
Args:
source: A string or stream containing source code.
""" |
if isinstance(source, str):
return parse_stream(six.StringIO(source))
else:
return parse_stream(source) |
<SYSTEM_TASK:>
Parse a Forth-like language and return code.
<END_TASK>
<USER_TASK:>
Description:
def parse_stream(stream):
"""Parse a Forth-like language and return code.""" |
code = []
for (line, col, (token, value)) in Tokenizer(stream).tokenize():
if token == Tokenizer.STRING:
value = '"' + value + '"'
code.append(value)
return code |
<SYSTEM_TASK:>
Variable resolver decorator. Function or method decorated with it is
<END_TASK>
<USER_TASK:>
Description:
def resolver(cls, var_name: str) -> FunctionType:
"""
Variable resolver decorator. Function or method decorated with it is
used to resolve the config variable.
.. note::
Variable is resolved only once.
Next gets are returned from the cache.
:param var_name: Variable name
:return: Function decorator
""" |
def dec(f):
if var_name in cls().resolvers:
raise ConfigurationError(
f'Resolver for {var_name} already registered')
cls().resolvers[var_name] = f
return f
return dec |
<SYSTEM_TASK:>
Method for configuring environment resolver.
<END_TASK>
<USER_TASK:>
Description:
def env_resolver(cls, var_name: str, env_name: str = None,
default: Any = _NONE) -> 'Configuration':
"""
Method for configuring environment resolver.
:param var_name: Variable name
:param env_name: An optional environment variable name. If not set\
haps looks for `HAPS_var_name`
:param default: Default value for variable. If it's a callable,\
is called before return. If not provided\
:class:`~haps.exceptions.UnknownConfigVariable` is raised
:return: :class:`~haps.config.Configuration` instance for easy\
chaining
""" |
cls.resolver(var_name)(
partial(
_env_resolver, var_name=var_name, env_name=env_name,
default=default))
return cls() |
<SYSTEM_TASK:>
Set the variable
<END_TASK>
<USER_TASK:>
Description:
def set(cls, var_name: str, value: Any) -> 'Configuration':
"""
Set the variable
:param var_name: Variable name
:param value: Value of variable
:return: :class:`~haps.config.Configuration` instance for easy\
chaining
""" |
with cls._lock:
if var_name not in cls().cache:
cls().cache[var_name] = value
else:
raise ConfigurationError(
f'Value for {var_name} already set')
return cls() |
<SYSTEM_TASK:>
Hook for controlling the creation of an model instance. Override this if you need to do more with your
<END_TASK>
<USER_TASK:>
Description:
def perform_create(self, data):
"""
Hook for controlling the creation of an model instance. Override this if you need to do more with your
data before saving your object than just mapping the deserialized data to a new instance of ``self.model``.
""" |
instance = self.model(**data)
self.request.dbsession.add(instance)
self.request.dbsession.flush()
return instance |
<SYSTEM_TASK:>
Print just the top level of an object, being sure to show where
<END_TASK>
<USER_TASK:>
Description:
def top_level(self):
"""
Print just the top level of an object, being sure to show where
it goes deeper
""" |
output = {}
if isinstance(self.obj, dict):
for name, item in self.obj.items():
if isinstance(item, dict):
if item:
output[name] = StrReprWrapper('{...}')
else:
output[name] = StrReprWrapper('{}')
elif isinstance(item, list):
if item:
output[name] = StrReprWrapper('[...]')
else:
output[name] = StrReprWrapper('[]')
else:
output[name] = item
return output
else:
return self.obj |
<SYSTEM_TASK:>
Returns a closure that pushed the given value onto a Machine's stack.
<END_TASK>
<USER_TASK:>
Description:
def make_embedded_push(value):
"""Returns a closure that pushed the given value onto a Machine's stack.
We use this to embed stack pushes in the VM code, so that the interpreter
can assume that all instructions are callable Python functions. This makes
dispatching much faster than checking if an instruction is a constant
(number, string, etc) or a Python function.
""" |
push = lambda vm: vm.push(value)
push.tag = EMBEDDED_PUSH_TAG
return push |
<SYSTEM_TASK:>
Extracts the embedded push value.
<END_TASK>
<USER_TASK:>
Description:
def get_embedded_push_value(obj):
"""Extracts the embedded push value.""" |
assert(is_embedded_push(obj))
assert(len(obj.__closure__) == 1)
return obj.__closure__[0].cell_contents |
<SYSTEM_TASK:>
Checks code for obvious errors.
<END_TASK>
<USER_TASK:>
Description:
def check(code):
"""Checks code for obvious errors.""" |
def safe_lookup(op):
try:
return instructions.lookup(op)
except Exception:
return op
for i, a in enumerate(code):
b = code[i+1] if i+1 < len(code) else None
# Does instruction exist?
if not isconstant(a):
try:
instructions.lookup(a)
except KeyError as err:
# Skip embedded push closures
if not (len(err.args)==1 and is_embedded_push(err.args[0])):
raise CompileError("Instruction at index %d is unknown: %s"
% (i, a))
# Invalid: <str> int
if isstring(a) and safe_lookup(b) == instructions.cast_int:
raise CompileError(
"Cannot convert string to integer (index %d): %s %s" % (i, a,
b))
# Invalid: <int> <binary op>
boolean_ops = [instructions.boolean_not,
instructions.boolean_or,
instructions.boolean_and]
if not isbool(a) and safe_lookup(b) in boolean_ops:
raise CompileError(
"Can only use binary operators on booleans (index %d): %s %s" %
(i, a, b))
return code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.