text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Find coordinate types based on shape name and coordlist length
<END_TASK>
<USER_TASK:>
Description:
def _generate_arg_types(coordlist_length, shape_name):
"""Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length.
""" |
from .ds9_region_parser import ds9_shape_defs
from .ds9_attr_parser import ds9_shape_in_comment_defs
if shape_name in ds9_shape_defs:
shape_def = ds9_shape_defs[shape_name]
else:
shape_def = ds9_shape_in_comment_defs[shape_name]
initial_arg_types = shape_def.args_list
arg_repeats = shape_def.args_repeat
if arg_repeats is None:
return initial_arg_types
# repeat args between n1 and n2
n1, n2 = arg_repeats
arg_types = list(initial_arg_types[:n1])
num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)
arg_types.extend((num_of_repeats - n1) //
(n2 - n1) * initial_arg_types[n1:n2])
arg_types.extend(initial_arg_types[n2:])
return arg_types |
<SYSTEM_TASK:>
Convert the coordlist of `shape` to image coordinates
<END_TASK>
<USER_TASK:>
Description:
def convert_to_imagecoord(shape, header):
"""Convert the coordlist of `shape` to image coordinates
Parameters
----------
shape : `pyregion.parser_helper.Shape`
The `Shape` to convert coordinates
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Returns
-------
new_coordlist : list
A list of image coordinates defining the shape.
""" |
arg_types = _generate_arg_types(len(shape.coord_list), shape.name)
new_coordlist = []
is_even_distance = True
coord_list_iter = iter(zip(shape.coord_list, arg_types))
new_wcs = WCS(header)
pixel_scales = proj_plane_pixel_scales(new_wcs)
for coordinate, coordinate_type in coord_list_iter:
if coordinate_type == CoordOdd:
even_coordinate = next(coord_list_iter)[0]
old_coordinate = SkyCoord(coordinate, even_coordinate,
frame=shape.coord_format, unit='degree',
obstime='J2000')
new_coordlist.extend(
np.asscalar(x)
for x in old_coordinate.to_pixel(new_wcs, origin=1)
)
elif coordinate_type == Distance:
if arg_types[-1] == Angle:
degree_per_pixel = pixel_scales[0 if is_even_distance else 1]
is_even_distance = not is_even_distance
else:
degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))
new_coordlist.append(coordinate / degree_per_pixel)
elif coordinate_type == Angle:
new_angle = _estimate_angle(coordinate,
shape.coord_format,
header)
new_coordlist.append(new_angle)
else:
new_coordlist.append(coordinate)
return new_coordlist |
<SYSTEM_TASK:>
Get authentication details to jottacloud.
<END_TASK>
<USER_TASK:>
Description:
def get_auth_info():
""" Get authentication details to jottacloud.
Will first check environment variables, then the .netrc file.
""" |
env_username = os.environ.get('JOTTACLOUD_USERNAME')
env_password = os.environ.get('JOTTACLOUD_PASSWORD')
netrc_auth = None
try:
netrc_file = netrc.netrc()
netrc_auth = netrc_file.authenticators('jottacloud.com')
except IOError:
# .netrc file doesn't exist
pass
netrc_username = None
netrc_password = None
if netrc_auth:
netrc_username, _, netrc_password = netrc_auth
username = env_username or netrc_username
password = env_password or netrc_password
if not (username and password):
raise JFSError('Could not find username and password in either env or ~/.netrc, '
'you need to add one of these to use these tools')
return (username, password) |
<SYSTEM_TASK:>
Utility function to calculate md5 hashes while being light on memory usage.
<END_TASK>
<USER_TASK:>
Description:
def calculate_md5(fileobject, size=2**16):
"""Utility function to calculate md5 hashes while being light on memory usage.
By reading the fileobject piece by piece, we are able to process content that
is larger than available memory""" |
fileobject.seek(0)
md5 = hashlib.md5()
for data in iter(lambda: fileobject.read(size), b''):
if not data: break
if isinstance(data, six.text_type):
data = data.encode('utf-8') # md5 needs a byte string
md5.update(data)
fileobject.seek(0) # rewind read head
return md5.hexdigest() |
<SYSTEM_TASK:>
Bytes uploaded of the file so far.
<END_TASK>
<USER_TASK:>
Description:
def size(self):
"""Bytes uploaded of the file so far.
Note that we only have the file size if the file was requested directly,
not if it's part of a folder listing.
""" |
if hasattr(self.f.latestRevision, 'size'):
return int(self.f.latestRevision.size)
return None |
<SYSTEM_TASK:>
Create a new mountpoint
<END_TASK>
<USER_TASK:>
Description:
def new_mountpoint(self, name):
"""Create a new mountpoint""" |
url = posixpath.join(self.path, name)
r = self._jfs.post(url, extra_headers={'content-type': 'application/x-www-form-urlencoded'})
return r |
<SYSTEM_TASK:>
Open, read and parse DS9 region file.
<END_TASK>
<USER_TASK:>
Description:
def open(fname):
"""Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
""" |
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string) |
<SYSTEM_TASK:>
Read region as image coordinates.
<END_TASK>
<USER_TASK:>
Description:
def read_region_as_imagecoord(s, header):
"""Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape`
""" |
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list) |
<SYSTEM_TASK:>
New shape list in image coordinates.
<END_TASK>
<USER_TASK:>
Description:
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
""" |
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list) |
<SYSTEM_TASK:>
Write this shape list to a region file.
<END_TASK>
<USER_TASK:>
Description:
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
""" |
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close() |
<SYSTEM_TASK:>
This is the default handler. It checks for feature flags in the current app's configuration.
<END_TASK>
<USER_TASK:>
Description:
def AppConfigFlagHandler(feature=None):
""" This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
}
""" |
if not current_app:
log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature))
return False
try:
return current_app.config[FEATURE_FLAGS_CONFIG][feature]
except (AttributeError, KeyError):
raise NoFeatureFlagFound() |
<SYSTEM_TASK:>
Check if a feature is active
<END_TASK>
<USER_TASK:>
Description:
def is_active(feature):
""" Check if a feature is active """ |
if current_app:
feature_flagger = current_app.extensions.get(EXTENSION_NAME)
if feature_flagger:
return feature_flagger.check(feature)
else:
raise AssertionError("Oops. This application doesn't have the Flask-FeatureFlag extention installed.")
else:
log.warn(u"Got a request to check for {feature} but we're running outside the request context. Check your setup. Returning False".format(feature=feature))
return False |
<SYSTEM_TASK:>
Decorator for Flask views. If a feature is off, it can either return a 404 or redirect to a URL if you'd rather.
<END_TASK>
<USER_TASK:>
Description:
def is_active_feature(feature, redirect_to=None, redirect=None):
"""
Decorator for Flask views. If a feature is off, it can either return a 404 or redirect to a URL if you'd rather.
""" |
def _is_active_feature(func):
@wraps(func)
def wrapped(*args, **kwargs):
if not is_active(feature):
url = redirect_to
if redirect:
url = url_for(redirect)
if url:
log.debug(u'Feature {feature} is off, redirecting to {url}'.format(feature=feature, url=url))
return _redirect(url, code=302)
else:
log.debug(u'Feature {feature} is off, aborting request'.format(feature=feature))
abort(404)
return func(*args, **kwargs)
return wrapped
return _is_active_feature |
<SYSTEM_TASK:>
Add ourselves into the app config and setup, and add a jinja function test
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app):
""" Add ourselves into the app config and setup, and add a jinja function test """ |
app.config.setdefault(FEATURE_FLAGS_CONFIG, {})
app.config.setdefault(RAISE_ERROR_ON_MISSING_FEATURES, False)
if hasattr(app, "add_template_test"):
# flask 0.10 and higher has a proper hook
app.add_template_test(self.check, name=self.JINJA_TEST_NAME)
else:
app.jinja_env.tests[self.JINJA_TEST_NAME] = self.check
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions[EXTENSION_NAME] = self |
<SYSTEM_TASK:>
Loop through all our feature flag checkers and return true if any of them are true.
<END_TASK>
<USER_TASK:>
Description:
def check(self, feature):
""" Loop through all our feature flag checkers and return true if any of them are true.
The order of handlers matters - we will immediately return True if any handler returns true.
If you want to a handler to return False and stop the chain, raise the StopCheckingFeatureFlags exception.""" |
found = False
for handler in self.handlers:
try:
if handler(feature):
return True
except StopCheckingFeatureFlags:
return False
except NoFeatureFlagFound:
pass
else:
found = True
if not found:
message = u"No feature flag defined for {feature}".format(feature=feature)
if current_app.debug and current_app.config.get(RAISE_ERROR_ON_MISSING_FEATURES, False):
raise KeyError(message)
else:
log.info(message)
missing_feature.send(self, feature=feature)
return False |
<SYSTEM_TASK:>
Clear cache of results from a specific path
<END_TASK>
<USER_TASK:>
Description:
def yank_path(self, path):
"""Clear cache of results from a specific path""" |
for func in self._caches:
cache = {}
for key in self._caches[func].keys():
log.debug("cache key %s for func %s", key, func)
if path in key[0]:
log.debug("del cache key %s", key)
del self._caches[func][key] |
<SYSTEM_TASK:>
Converts a `ShapeList` into shapes with coordinates in image coordinates
<END_TASK>
<USER_TASK:>
Description:
def sky_to_image(shape_list, header):
"""Converts a `ShapeList` into shapes with coordinates in image coordinates
Parameters
----------
shape_list : `pyregion.ShapeList`
The ShapeList to convert
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Yields
-------
shape, comment : Shape, str
Shape with image coordinates and the associated comment
Note
----
The comments in the original `ShapeList` are unaltered
""" |
for shape, comment in shape_list:
if isinstance(shape, Shape) and \
(shape.coord_format not in image_like_coordformats):
new_coords = convert_to_imagecoord(shape, header)
l1n = copy.copy(shape)
l1n.coord_list = new_coords
l1n.coord_format = "image"
yield l1n, comment
elif isinstance(shape, Shape) and shape.coord_format == "physical":
if header is None:
raise RuntimeError("Physical coordinate is not known.")
new_coordlist = convert_physical_to_imagecoord(shape, header)
l1n = copy.copy(shape)
l1n.coord_list = new_coordlist
l1n.coord_format = "image"
yield l1n, comment
else:
yield shape, comment |
<SYSTEM_TASK:>
Transform an angle into a different frame
<END_TASK>
<USER_TASK:>
Description:
def _estimate_angle(angle, reg_coordinate_frame, header):
"""Transform an angle into a different frame
Parameters
----------
angle : float, int
The number of degrees, measured from the Y axis in origin's frame
reg_coordinate_frame : str
Coordinate frame in which ``angle`` is defined
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
angle : float
The angle, measured from the Y axis in the WCS defined by ``header'`
""" |
y_axis_rot = _calculate_rotation_angle(reg_coordinate_frame, header)
return angle - y_axis_rot |
<SYSTEM_TASK:>
Calculates the rotation angle from the region to the header's frame
<END_TASK>
<USER_TASK:>
Description:
def _calculate_rotation_angle(reg_coordinate_frame, header):
"""Calculates the rotation angle from the region to the header's frame
This attempts to be compatible with the implementation used by SAOImage
DS9. In particular, this measures the rotation of the north axis as
measured at the center of the image, and therefore requires a
`~astropy.io.fits.Header` object with defined 'NAXIS1' and 'NAXIS2'
keywords.
Parameters
----------
reg_coordinate_frame : str
Coordinate frame used by the region file
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
y_axis_rot : float
Degrees by which the north axis in the region's frame is rotated when
transformed to pixel coordinates
""" |
new_wcs = WCS(header)
region_frame = SkyCoord(
'0d 0d',
frame=reg_coordinate_frame,
obstime='J2000')
region_frame = SkyCoord(
'0d 0d',
frame=reg_coordinate_frame,
obstime='J2000',
equinox=region_frame.equinox)
origin = SkyCoord.from_pixel(
header['NAXIS1'] / 2,
header['NAXIS2'] / 2,
wcs=new_wcs,
origin=1).transform_to(region_frame)
offset = proj_plane_pixel_scales(new_wcs)[1]
origin_x, origin_y = origin.to_pixel(new_wcs, origin=1)
origin_lon = origin.data.lon.degree
origin_lat = origin.data.lat.degree
offset_point = SkyCoord(
origin_lon, origin_lat + offset, unit='degree',
frame=origin.frame.name, obstime='J2000')
offset_x, offset_y = offset_point.to_pixel(new_wcs, origin=1)
north_rot = np.arctan2(
offset_y - origin_y,
offset_x - origin_x) / np.pi * 180.
cdelt = new_wcs.wcs.get_cdelt()
if (cdelt > 0).all() or (cdelt < 0).all():
return north_rot - 90
else:
return -(north_rot - 90) |
<SYSTEM_TASK:>
Create and return a SyncFile tuple from filename.
<END_TASK>
<USER_TASK:>
Description:
def sf(f, dirpath, jottapath):
"""Create and return a SyncFile tuple from filename.
localpath will be a byte string with utf8 code points
jottapath will be a unicode string""" |
log.debug('Create SyncFile from %s', repr(f))
log.debug('Got encoded filename %r, joining with dirpath %r', _encode_filename_to_filesystem(f), dirpath)
return SyncFile(localpath=os.path.join(dirpath, _encode_filename_to_filesystem(f)),
jottapath=posixpath.join(_decode_filename_to_unicode(jottapath), _decode_filename_to_unicode(f))) |
<SYSTEM_TASK:>
Make a tree of local files and folders and compare it with what's currently on JottaCloud.
<END_TASK>
<USER_TASK:>
Description:
def compare(localtopdir, jottamountpoint, JFS, followlinks=False, exclude_patterns=None):
"""Make a tree of local files and folders and compare it with what's currently on JottaCloud.
For each folder, yields:
dirpath, # byte string, full path
onlylocal, # set(), files that only exist locally, i.e. newly added files that don't exist online,
onlyremote, # set(), files that only exist in the JottaCloud, i.e. deleted locally
bothplaces # set(), files that exist both locally and remotely
onlyremotefolders, # set(), folders that only exist in the JottaCloud, i.e. deleted locally
""" |
def excluded(unicodepath, fname):
fpath = os.path.join(unicodepath, _decode_filename_to_unicode(fname))
# skip FIFOs, block devices, character devices and the like, see bug#129
mode = os.stat(fpath).st_mode
if not (stat.S_ISREG(mode) or
stat.S_ISLNK(mode) or
stat.S_ISDIR(mode)): # we only like regular files, dirs or symlinks
return True
if exclude_patterns is None:
return False
for p in exclude_patterns:
if p.search(fpath):
log.debug("%r excluded by pattern %r", fpath, p.pattern)
return True
return False
bytestring_localtopdir = _encode_filename_to_filesystem(localtopdir)
for dirpath, dirnames, filenames in os.walk(bytestring_localtopdir, followlinks=followlinks):
# to keep things explicit, and avoid encoding/decoding issues,
# keep a bytestring AND a unicode variant of dirpath
dirpath = _encode_filename_to_filesystem(dirpath)
unicodepath = _decode_filename_to_unicode(dirpath)
log.debug("compare walk: %r -> %s files ", unicodepath, len(filenames))
# create set()s of local files and folders
# paths will be unicode strings
localfiles = set([f for f in filenames if not excluded(unicodepath, f)]) # these are on local disk
localfolders = set([f for f in dirnames if not excluded(unicodepath, f)]) # these are on local disk
jottapath = get_jottapath(localtopdir, unicodepath, jottamountpoint) # translate to jottapath
log.debug("compare jottapath: %r", jottapath)
# create set()s of remote files and folders
# paths will be unicode strings
cloudfiles = filelist(jottapath, JFS) # set(). these are on jottacloud
cloudfolders = folderlist(jottapath, JFS)
log.debug("--cloudfiles: %r", cloudfiles)
log.debug("--localfiles: %r", localfiles)
log.debug("--cloudfolders: %r", cloudfolders)
onlylocal = [ sf(f, dirpath, jottapath) for f in localfiles.difference(cloudfiles)]
onlyremote = [ sf(f, dirpath, jottapath) for f in cloudfiles.difference(localfiles)]
bothplaces = [ sf(f, dirpath, jottapath) for f in localfiles.intersection(cloudfiles)]
onlyremotefolders = [ sf(f, dirpath, jottapath) for f in cloudfolders.difference(localfolders)]
yield dirpath, onlylocal, onlyremote, bothplaces, onlyremotefolders |
<SYSTEM_TASK:>
Continue uploading a new file from local file (already exists on JottaCloud
<END_TASK>
<USER_TASK:>
Description:
def resume(localfile, jottafile, JFS):
"""Continue uploading a new file from local file (already exists on JottaCloud""" |
with open(localfile) as lf:
_complete = jottafile.resume(lf)
return _complete |
<SYSTEM_TASK:>
Compare md5 hash to determine if contents have changed.
<END_TASK>
<USER_TASK:>
Description:
def replace_if_changed(localfile, jottapath, JFS):
"""Compare md5 hash to determine if contents have changed.
Upload a file from local disk and replace file on JottaCloud if the md5s differ,
or continue uploading if the file is incompletely uploaded.
Returns the JottaFile object""" |
jf = JFS.getObject(jottapath)
lf_hash = getxattrhash(localfile) # try to read previous hash, stored in xattr
if lf_hash is None: # no valid hash found in xattr,
with open(localfile) as lf:
lf_hash = calculate_md5(lf) # (re)calculate it
if type(jf) == JFSIncompleteFile:
log.debug("Local file %s is incompletely uploaded, continue", localfile)
return resume(localfile, jf, JFS)
elif jf.md5 == lf_hash: # hashes are the same
log.debug("hash match (%s), file contents haven't changed", lf_hash)
setxattrhash(localfile, lf_hash)
return jf # return the version from jottaclouds
else:
setxattrhash(localfile, lf_hash)
return new(localfile, jottapath, JFS) |
<SYSTEM_TASK:>
Get a tree of of files and folders. use as an iterator, you get something like os.walk
<END_TASK>
<USER_TASK:>
Description:
def iter_tree(jottapath, JFS):
"""Get a tree of of files and folders. use as an iterator, you get something like os.walk""" |
filedirlist = JFS.getObject('%s?mode=list' % jottapath)
log.debug("got tree: %s", filedirlist)
if not isinstance(filedirlist, JFSFileDirList):
yield ( '', tuple(), tuple() )
for path in filedirlist.tree:
yield path |
<SYSTEM_TASK:>
Parse drawing element.
<END_TASK>
<USER_TASK:>
Description:
def parse_drawing(document, container, elem):
"""Parse drawing element.
We don't do much with drawing element. We can find embeded image but we don't do more than that.
""" |
_blip = elem.xpath('.//a:blip', namespaces=NAMESPACES)
if len(_blip) > 0:
blip = _blip[0]
_rid = blip.attrib[_name('{{{r}}}embed')]
img = doc.Image(_rid)
container.elements.append(img) |
<SYSTEM_TASK:>
Parse paragraph element.
<END_TASK>
<USER_TASK:>
Description:
def parse_paragraph(document, par):
"""Parse paragraph element.
Some other elements could be found inside of paragraph element (math, links).
""" |
paragraph = doc.Paragraph()
paragraph.document = document
for elem in par:
if elem.tag == _name('{{{w}}}pPr'):
parse_paragraph_properties(document, paragraph, elem)
if elem.tag == _name('{{{w}}}r'):
parse_text(document, paragraph, elem)
if elem.tag == _name('{{{m}}}oMath'):
_m = doc.Math()
paragraph.elements.append(_m)
if elem.tag == _name('{{{m}}}oMathPara'):
_m = doc.Math()
paragraph.elements.append(_m)
if elem.tag == _name('{{{w}}}commentRangeStart'):
_m = doc.Comment(elem.attrib[_name('{{{w}}}id')], 'start')
paragraph.elements.append(_m)
if elem.tag == _name('{{{w}}}commentRangeEnd'):
_m = doc.Comment(elem.attrib[_name('{{{w}}}id')], 'end')
paragraph.elements.append(_m)
if elem.tag == _name('{{{w}}}hyperlink'):
try:
t = doc.Link(elem.attrib[_name('{{{r}}}id')])
parse_text(document, t, elem)
paragraph.elements.append(t)
except:
logger.error('Error with with hyperlink [%s].', str(elem.attrib.items()))
if elem.tag == _name('{{{w}}}smartTag'):
parse_smarttag(document, paragraph, elem)
return paragraph |
<SYSTEM_TASK:>
Parse document with content.
<END_TASK>
<USER_TASK:>
Description:
def parse_document(xmlcontent):
"""Parse document with content.
Content is placed in file 'document.xml'.
""" |
document = etree.fromstring(xmlcontent)
body = document.xpath('.//w:body', namespaces=NAMESPACES)[0]
document = doc.Document()
for elem in body:
if elem.tag == _name('{{{w}}}p'):
document.elements.append(parse_paragraph(document, elem))
if elem.tag == _name('{{{w}}}tbl'):
document.elements.append(parse_table(document, elem))
if elem.tag == _name('{{{w}}}sdt'):
document.elements.append(doc.TOC())
return document |
<SYSTEM_TASK:>
Parse relationship document.
<END_TASK>
<USER_TASK:>
Description:
def parse_relationship(document, xmlcontent, rel_type):
"""Parse relationship document.
Relationships hold information like external or internal references for links.
Relationships are placed in file '_rels/document.xml.rels'.
""" |
doc = etree.fromstring(xmlcontent)
for elem in doc:
if elem.tag == _name('{{{pr}}}Relationship'):
rel = {'target': elem.attrib['Target'],
'type': elem.attrib['Type'],
'target_mode': elem.attrib.get('TargetMode', 'Internal')}
document.relationships[rel_type][elem.attrib['Id']] = rel |
<SYSTEM_TASK:>
Find style by it's descriptive name.
<END_TASK>
<USER_TASK:>
Description:
def get_by_name(self, name, style_type = None):
"""Find style by it's descriptive name.
:Returns:
Returns found style of type :class:`ooxml.doc.Style`.
""" |
for st in self.styles.values():
if st:
if st.name == name:
return st
if style_type and not st:
st = self.styles.get(self.default_styles[style_type], None)
return st |
<SYSTEM_TASK:>
Find style by it's unique identifier
<END_TASK>
<USER_TASK:>
Description:
def get_by_id(self, style_id, style_type = None):
"""Find style by it's unique identifier
:Returns:
Returns found style of type :class:`ooxml.doc.Style`.
""" |
for st in self.styles.values():
if st:
if st.style_id == style_id:
return st
if style_type:
return self.styles.get(self.default_styles[style_type], None)
return None |
<SYSTEM_TASK:>
Get or set the CPU affinity set for the current process.
<END_TASK>
<USER_TASK:>
Description:
def process_affinity(affinity=None):
"""Get or set the CPU affinity set for the current process.
This will affect all future threads spawned by this process. It is
implementation-defined whether it will also affect previously-spawned
threads.
""" |
if affinity is not None:
affinity = CPUSet(affinity)
if not affinity.issubset(system_affinity()):
raise ValueError("unknown cpus: %s" % affinity)
return system_affinity() |
<SYSTEM_TASK:>
Attempt to acquire this lock.
<END_TASK>
<USER_TASK:>
Description:
def acquire(self,blocking=True,timeout=None):
"""Attempt to acquire this lock.
If the optional argument "blocking" is True and "timeout" is None,
this methods blocks until is successfully acquires the lock. If
"blocking" is False, it returns immediately if the lock could not
be acquired. Otherwise, it blocks for at most "timeout" seconds
trying to acquire the lock.
In all cases, this methods returns True if the lock was successfully
acquired and False otherwise.
""" |
if timeout is None:
return self.__lock.acquire(blocking)
else:
# Simulated timeout using progressively longer sleeps.
# This is the same timeout scheme used in the stdlib Condition
# class. If there's lots of contention on the lock then there's
# a good chance you won't get it; but then again, Python doesn't
# guarantee fairness anyway. We hope that platform-specific
# extensions can provide a better mechanism.
endtime = _time() + timeout
delay = 0.0005
while not self.__lock.acquire(False):
remaining = endtime - _time()
if remaining <= 0:
return False
delay = min(delay*2,remaining,0.05)
_sleep(delay)
return True |
<SYSTEM_TASK:>
Convert a vanilla thread object into an instance of this class.
<END_TASK>
<USER_TASK:>
Description:
def from_thread(cls,thread):
"""Convert a vanilla thread object into an instance of this class.
This method "upgrades" a vanilla thread object to an instance of this
extended class. You might need to call this if you obtain a reference
to a thread by some means other than (a) creating it, or (b) from the
methods of the threading2 module.
""" |
new_classes = []
for new_cls in cls.__mro__:
if new_cls not in thread.__class__.__mro__:
new_classes.append(new_cls)
if isinstance(thread,cls):
pass
elif issubclass(cls,thread.__class__):
thread.__class__ = cls
else:
class UpgradedThread(thread.__class__,cls):
pass
thread.__class__ = UpgradedThread
for new_cls in new_classes:
if hasattr(new_cls,"_upgrade_thread"):
new_cls._upgrade_thread(thread)
return thread |
<SYSTEM_TASK:>
Acquire the lock in shared or exclusive mode.
<END_TASK>
<USER_TASK:>
Description:
def acquire(self,blocking=True,timeout=None,shared=False):
"""Acquire the lock in shared or exclusive mode.""" |
with self._lock:
if shared:
self._acquire_shared(blocking,timeout)
else:
self._acquire_exclusive(blocking,timeout)
assert not (self.is_shared and self.is_exclusive) |
<SYSTEM_TASK:>
Get font size defined for this style.
<END_TASK>
<USER_TASK:>
Description:
def _get_font_size(document, style):
"""Get font size defined for this style.
It will try to get font size from it's parent style if it is not defined by original style.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
Returns font size as a number. -1 if it can not get font size.
""" |
font_size = style.get_font_size()
if font_size == -1:
if style.based_on:
based_on = document.styles.get_by_id(style.based_on)
if based_on:
return _get_font_size(document, based_on)
return font_size |
<SYSTEM_TASK:>
Returns type for the list.
<END_TASK>
<USER_TASK:>
Description:
def _get_numbering(document, numid, ilvl):
"""Returns type for the list.
:Returns:
Returns type for the list. Returns "bullet" by default or in case of an error.
""" |
try:
abs_num = document.numbering[numid]
return document.abstruct_numbering[abs_num][ilvl]['numFmt']
except:
return 'bullet' |
<SYSTEM_TASK:>
Returns root element for a list.
<END_TASK>
<USER_TASK:>
Description:
def _get_parent(root):
"""Returns root element for a list.
:Args:
root (Element): lxml element of current location
:Returns:
lxml element representing list
""" |
elem = root
while True:
elem = elem.getparent()
if elem.tag in ['ul', 'ol']:
return elem |
<SYSTEM_TASK:>
Close already opened list if needed.
<END_TASK>
<USER_TASK:>
Description:
def close_list(ctx, root):
"""Close already opened list if needed.
This will try to see if it is needed to close already opened list.
:Args:
- ctx (:class:`Context`): Context object
- root (Element): lxml element representing current position.
:Returns:
lxml element where future content should be placed.
""" |
try:
n = len(ctx.in_list)
if n <= 0:
return root
elem = root
while n > 0:
while True:
if elem.tag in ['ul', 'ol', 'td']:
elem = elem.getparent()
break
elem = elem.getparent()
n -= 1
ctx.in_list = []
return elem
except:
return None |
<SYSTEM_TASK:>
Open list if it is needed and place current element as first member of a list.
<END_TASK>
<USER_TASK:>
Description:
def open_list(ctx, document, par, root, elem):
"""Open list if it is needed and place current element as first member of a list.
:Args:
- ctx (:class:`Context`): Context object
- document (:class:`ooxml.doc.Document`): Document object
- par (:class:`ooxml.doc.Paragraph`): Paragraph element
- root (Element): lxml element of current location
- elem (Element): lxml element representing current element we are trying to insert
:Returns:
lxml element where future content should be placed.
""" |
_ls = None
if par.ilvl != ctx.ilvl or par.numid != ctx.numid:
# start
if ctx.ilvl is not None and (par.ilvl > ctx.ilvl):
fmt = _get_numbering(document, par.numid, par.ilvl)
if par.ilvl > 0:
# get last <li> in <ul>
# could be nicer
_b = list(root)[-1]
_ls = etree.SubElement(_b, _get_numbering_tag(fmt))
root = _ls
else:
_ls = etree.SubElement(root, _get_numbering_tag(fmt))
root = _ls
fire_hooks(ctx, document, par, _ls, ctx.get_hook(_get_numbering_tag(fmt)))
ctx.in_list.append((par.numid, par.ilvl))
elif ctx.ilvl is not None and par.ilvl < ctx.ilvl:
fmt = _get_numbering(document, ctx.numid, ctx.ilvl)
try:
while True:
numid, ilvl = ctx.in_list[-1]
if numid == par.numid and ilvl == par.ilvl:
break
root = _get_parent(root)
ctx.in_list.pop()
except:
pass
# if ctx.numid is not None and par.numid > ctx.numid:
# if ctx.numid != None:
if par.numid > ctx.numid:
fmt = _get_numbering(document, par.numid, par.ilvl)
_ls = etree.SubElement(root, _get_numbering_tag(fmt))
fire_hooks(ctx, document, par, _ls, ctx.get_hook(_get_numbering_tag(fmt)))
ctx.in_list.append((par.numid, par.ilvl))
root = _ls
ctx.ilvl = par.ilvl
ctx.numid = par.numid
_a = etree.SubElement(root, 'li')
_a.text = elem.text
for a in list(elem):
_a.append(a)
fire_hooks(ctx, document, par, _a, ctx.get_hook('li'))
return root |
<SYSTEM_TASK:>
Serialize math element.
<END_TASK>
<USER_TASK:>
Description:
def serialize_math(ctx, document, elem, root):
"""Serialize math element.
Math objects are not supported at the moment. This is wht we only show error message.
""" |
_div = etree.SubElement(root, 'span')
if ctx.options['embed_styles']:
_div.set('style', 'border: 1px solid red')
_div.text = 'We do not support Math blocks at the moment.'
fire_hooks(ctx, document, elem, _div, ctx.get_hook('math'))
return root |
<SYSTEM_TASK:>
Serilaze link element.
<END_TASK>
<USER_TASK:>
Description:
def serialize_link(ctx, document, elem, root):
"""Serilaze link element.
This works only for external links at the moment.
""" |
_a = etree.SubElement(root, 'a')
for el in elem.elements:
_ser = ctx.get_serializer(el)
if _ser:
_td = _ser(ctx, document, el, _a)
else:
if isinstance(el, doc.Text):
children = list(_a)
if len(children) == 0:
_text = _a.text or u''
_a.text = u'{}{}'.format(_text, el.value())
else:
_text = children[-1].tail or u''
children[-1].tail = u'{}{}'.format(_text, el.value())
if elem.rid in document.relationships[ctx.options['relationship']]:
_a.set('href', document.relationships[ctx.options['relationship']][elem.rid].get('target', ''))
fire_hooks(ctx, document, elem, _a, ctx.get_hook('a'))
return root |
<SYSTEM_TASK:>
Serialize image element.
<END_TASK>
<USER_TASK:>
Description:
def serialize_image(ctx, document, elem, root):
"""Serialize image element.
This is not abstract enough.
""" |
_img = etree.SubElement(root, 'img')
# make path configurable
if elem.rid in document.relationships[ctx.options['relationship']]:
img_src = document.relationships[ctx.options['relationship']][elem.rid].get('target', '')
img_name, img_extension = os.path.splitext(img_src)
_img.set('src', 'static/{}{}'.format(elem.rid, img_extension))
fire_hooks(ctx, document, elem, _img, ctx.get_hook('img'))
return root |
<SYSTEM_TASK:>
Fire hooks on newly created element.
<END_TASK>
<USER_TASK:>
Description:
def fire_hooks(ctx, document, elem, element, hooks):
"""Fire hooks on newly created element.
For each newly created element we will try to find defined hooks and execute them.
:Args:
- ctx (:class:`Context`): Context object
- document (:class:`ooxml.doc.Document`): Document object
- elem (:class:`ooxml.doc.Element`): Element which we serialized
- element (Element): lxml element which we created
- hooks (list): List of hooks
""" |
if not hooks:
return
for hook in hooks:
hook(ctx, document, elem, element) |
<SYSTEM_TASK:>
Tells us if node element has defined styling.
<END_TASK>
<USER_TASK:>
Description:
def has_style(node):
"""Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False
""" |
elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps']
return any([True for elem in elements if elem in node.rpr]) |
<SYSTEM_TASK:>
Returns as string defined CSS for this node.
<END_TASK>
<USER_TASK:>
Description:
def get_style_css(ctx, node, embed=True, fontsize=-1):
"""Returns as string defined CSS for this node.
Defined CSS can be different if it is embeded or no. When it is embeded styling
for bold,italic and underline will not be defined with CSS. In that case we
use defined tags <b>,<i>,<u> from the content.
:Args:
- ctx (:class:`Context`): Context object
- node (:class:`ooxml.doc.Element`): Node element
- embed (book): True by default.
:Returns:
Returns as string defined CSS for this node
""" |
style = []
if not node:
return
if fontsize in [-1, 2]:
if 'sz' in node.rpr:
size = int(node.rpr['sz']) / 2
if ctx.options['embed_fontsize']:
if ctx.options['scale_to_size']:
multiplier = size-ctx.options['scale_to_size']
scale = 100 + int(math.trunc(8.3*multiplier))
style.append('font-size: {}%'.format(scale))
else:
style.append('font-size: {}pt'.format(size))
if fontsize in [-1, 1]:
# temporarily
if not embed:
if 'b' in node.rpr:
style.append('font-weight: bold')
if 'i' in node.rpr:
style.append('font-style: italic')
if 'u' in node.rpr:
style.append('text-decoration: underline')
if 'small_caps' in node.rpr:
style.append('font-variant: small-caps')
if 'strike' in node.rpr:
style.append('text-decoration: line-through')
if 'color' in node.rpr:
if node.rpr['color'] != '000000':
style.append('color: #{}'.format(node.rpr['color']))
if 'jc' in node.ppr:
# left right both
align = node.ppr['jc']
if align.lower() == 'both':
align = 'justify'
style.append('text-align: {}'.format(align))
if 'ind' in node.ppr:
if 'left' in node.ppr['ind']:
size = int(node.ppr['ind']['left']) / 10
style.append('margin-left: {}px'.format(size))
if 'right' in node.ppr['ind']:
size = int(node.ppr['ind']['right']) / 10
style.append('margin-right: {}px'.format(size))
if 'first_line' in node.ppr['ind']:
size = int(node.ppr['ind']['first_line']) / 10
style.append('text-indent: {}px'.format(size))
if len(style) == 0:
return ''
return '; '.join(style) + ';' |
<SYSTEM_TASK:>
Returns list of styles on which specified style is based on.
<END_TASK>
<USER_TASK:>
Description:
def get_all_styles(document, style):
"""Returns list of styles on which specified style is based on.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
List of style objects.
""" |
classes = []
while True:
classes.insert(0, get_style_name(style))
if style.based_on:
style = document.styles.get_by_id(style.based_on)
else:
break
return classes |
<SYSTEM_TASK:>
Returns CSS classes for this style.
<END_TASK>
<USER_TASK:>
Description:
def get_css_classes(document, style):
"""Returns CSS classes for this style.
This function will check all the styles specified style is based on and return their CSS classes.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
String representing all the CSS classes for this element.
>>> get_css_classes(doc, st)
'header1 normal'
""" |
lst = [st.lower() for st in get_all_styles(document, style)[-1:]] + \
['{}-fontsize'.format(st.lower()) for st in get_all_styles(document, style)[-1:]]
return ' '.join(lst) |
<SYSTEM_TASK:>
Serialize list of elements into HTML string.
<END_TASK>
<USER_TASK:>
Description:
def serialize_elements(document, elements, options=None):
"""Serialize list of elements into HTML string.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- elements (list): List of elements
- options (dict): Optional dictionary with :class:`Context` options
:Returns:
Returns HTML representation of the document.
""" |
ctx = Context(document, options)
tree_root = root = etree.Element('div')
for elem in elements:
_ser = ctx.get_serializer(elem)
if _ser:
root = _ser(ctx, document, elem, root)
# TODO:
# - create footnotes now
return etree.tostring(tree_root, pretty_print=ctx.options.get('pretty_print', True), encoding="utf-8", xml_declaration=False) |
<SYSTEM_TASK:>
Used for checking if specific element is a header or not.
<END_TASK>
<USER_TASK:>
Description:
def is_header(self, elem, font_size, node, style=None):
"""Used for checking if specific element is a header or not.
:Returns:
True or False
""" |
# This logic has been disabled for now. Mark this as header if it has
# been marked during the parsing or mark.
# if hasattr(elem, 'possible_header'):
# if elem.possible_header:
# return True
# if not style:
# return False
if hasattr(style, 'style_id'):
fnt_size = _get_font_size(self.doc, style)
from .importer import calculate_weight
weight = calculate_weight(self.doc, elem)
if weight > 50:
return False
if fnt_size in self.doc.possible_headers_style:
return True
return font_size in self.doc.possible_headers
else:
list_of_sizes = {}
for el in elem.elements:
try:
fs = get_style_fontsize(el)
weight = len(el.value()) if el.value() else 0
list_of_sizes[fs] = list_of_sizes.setdefault(fs, 0) + weight
except:
pass
sorted_list_of_sizes = list(collections.OrderedDict(sorted(six.iteritems(list_of_sizes), key=lambda t: t[0])))
font_size_to_check = font_size
if len(sorted_list_of_sizes) > 0:
if sorted_list_of_sizes[0] != font_size:
return sorted_list_of_sizes[0] in self.doc.possible_headers
return font_size in self.doc.possible_headers |
<SYSTEM_TASK:>
Returns HTML tag representing specific header for this element.
<END_TASK>
<USER_TASK:>
Description:
def get_header(self, elem, style, node):
"""Returns HTML tag representing specific header for this element.
:Returns:
String representation of HTML tag.
""" |
font_size = style
if hasattr(elem, 'possible_header'):
if elem.possible_header:
return 'h1'
if not style:
return 'h6'
if hasattr(style, 'style_id'):
font_size = _get_font_size(self.doc, style)
try:
if font_size in self.doc.possible_headers_style:
return 'h{}'.format(self.doc.possible_headers_style.index(font_size)+1)
return 'h{}'.format(self.doc.possible_headers.index(font_size)+1)
except ValueError:
return 'h6' |
<SYSTEM_TASK:>
Returns serializer for specific element.
<END_TASK>
<USER_TASK:>
Description:
def get_serializer(self, node):
"""Returns serializer for specific element.
:Args:
- node (:class:`ooxml.doc.Element`): Element object
:Returns:
Returns reference to a function which will be used for serialization.
""" |
return self.options['serializers'].get(type(node), None)
if type(node) in self.options['serializers']:
return self.options['serializers'][type(node)]
return None |
<SYSTEM_TASK:>
Set the priority for all threads in this group.
<END_TASK>
<USER_TASK:>
Description:
def priority(self,priority):
"""Set the priority for all threads in this group.
If setting priority fails on any thread, the priority of all threads
is restored to its previous value.
""" |
with self.__lock:
old_priorities = {}
try:
for thread in self.__threads:
old_priorities[thread] = thread.priority
thread.priority = priority
except Exception:
for (thread,old_priority) in old_priorities.iteritems():
try:
thread.priority = old_priority
except Exception:
pass
raise
else:
self.__priority = priority |
<SYSTEM_TASK:>
Set the affinity for all threads in this group.
<END_TASK>
<USER_TASK:>
Description:
def affinity(self,affinity):
"""Set the affinity for all threads in this group.
If setting affinity fails on any thread, the affinity of all threads
is restored to its previous value.
""" |
with self.__lock:
old_affinities = {}
try:
for thread in self.__threads:
old_affinities[thread] = thread.affinity
thread.affinity = affinity
except Exception:
for (thread,old_affinity) in old_affinities.iteritems():
try:
thread.affinity = old_affinity
except Exception:
pass
raise
else:
self.__affinity = affinity |
<SYSTEM_TASK:>
Join all threads in this group.
<END_TASK>
<USER_TASK:>
Description:
def join(self,timeout=None):
"""Join all threads in this group.
If the optional "timeout" argument is given, give up after that many
seconds. This method returns True is the threads were successfully
joined, False if a timeout occurred.
""" |
if timeout is None:
for thread in self.__threads:
thread.join()
else:
deadline = _time() + timeout
for thread in self.__threads:
delay = deadline - _time()
if delay <= 0:
return False
if not thread.join(delay):
return False
return True |
<SYSTEM_TASK:>
Returns length of the content in this element.
<END_TASK>
<USER_TASK:>
Description:
def text_length(elem):
"""Returns length of the content in this element.
Return value is not correct but it is **good enough***.
""" |
if not elem:
return 0
value = elem.value()
try:
value = len(value)
except:
value = 0
try:
for a in elem.elements:
value += len(a.value())
except:
pass
return value |
<SYSTEM_TASK:>
Build rrule dictionary for vRecur class.
<END_TASK>
<USER_TASK:>
Description:
def build_rrule(count=None, interval=None, bysecond=None, byminute=None,
byhour=None, byweekno=None, bymonthday=None, byyearday=None,
bymonth=None, until=None, bysetpos=None, wkst=None, byday=None,
freq=None):
"""
Build rrule dictionary for vRecur class.
:param count: int
:param interval: int
:param bysecond: int
:param byminute: int
:param byhour: int
:param byweekno: int
:param bymonthday: int
:param byyearday: int
:param bymonth: int
:param until: datetime
:param bysetpos: int
:param wkst: str, two-letter weekday
:param byday: weekday
:param freq: str, frequency name ('WEEK', 'MONTH', etc)
:return: dict
""" |
result = {}
if count is not None:
result['COUNT'] = count
if interval is not None:
result['INTERVAL'] = interval
if bysecond is not None:
result['BYSECOND'] = bysecond
if byminute is not None:
result['BYMINUTE'] = byminute
if byhour is not None:
result['BYHOUR'] = byhour
if byweekno is not None:
result['BYWEEKNO'] = byweekno
if bymonthday is not None:
result['BYMONTHDAY'] = bymonthday
if byyearday is not None:
result['BYYEARDAY'] = byyearday
if bymonth is not None:
result['BYMONTH'] = bymonth
if until is not None:
result['UNTIL'] = until
if bysetpos is not None:
result['BYSETPOS'] = bysetpos
if wkst is not None:
result['WKST'] = wkst
if byday is not None:
result['BYDAY'] = byday
if freq is not None:
if freq not in vRecur.frequencies:
raise ValueError('Frequency value should be one of: {0}'
.format(vRecur.frequencies))
result['FREQ'] = freq
return result |
<SYSTEM_TASK:>
Build rrule dictionary for vRecur class from a django_recurrences rrule.
<END_TASK>
<USER_TASK:>
Description:
def build_rrule_from_recurrences_rrule(rule):
"""
Build rrule dictionary for vRecur class from a django_recurrences rrule.
django_recurrences is a popular implementation for recurrences in django.
https://pypi.org/project/django-recurrence/
this is a shortcut to interface between recurrences and icalendar.
""" |
from recurrence import serialize
line = serialize(rule)
if line.startswith('RRULE:'):
line = line[6:]
return build_rrule_from_text(line) |
<SYSTEM_TASK:>
Build rrule dictionary for vRecur class from a dateutil rrule.
<END_TASK>
<USER_TASK:>
Description:
def build_rrule_from_dateutil_rrule(rule):
"""
Build rrule dictionary for vRecur class from a dateutil rrule.
Dateutils rrule is a popular implementation of rrule in python.
https://pypi.org/project/python-dateutil/
this is a shortcut to interface between dateutil and icalendar.
""" |
lines = str(rule).splitlines()
for line in lines:
if line.startswith('DTSTART:'):
continue
if line.startswith('RRULE:'):
line = line[6:]
return build_rrule_from_text(line) |
<SYSTEM_TASK:>
u"""
<END_TASK>
<USER_TASK:>
Description:
def write(self, outfile, encoding):
u"""
Writes the feed to the specified file in the
specified encoding.
""" |
cal = Calendar()
cal.add('version', '2.0')
cal.add('calscale', 'GREGORIAN')
for ifield, efield in FEED_FIELD_MAP:
val = self.feed.get(ifield)
if val is not None:
cal.add(efield, val)
self.write_items(cal)
to_ical = getattr(cal, 'as_string', None)
if not to_ical:
to_ical = cal.to_ical
outfile.write(to_ical()) |
<SYSTEM_TASK:>
Write all events to the calendar
<END_TASK>
<USER_TASK:>
Description:
def write_items(self, calendar):
"""
Write all events to the calendar
""" |
for item in self.items:
event = Event()
for ifield, efield in ITEM_EVENT_FIELD_MAP:
val = item.get(ifield)
if val is not None:
event.add(efield, val)
calendar.add_component(event) |
<SYSTEM_TASK:>
Remove the existing indentation from each line of a chunk of
<END_TASK>
<USER_TASK:>
Description:
def _reindent(s, indent, reformat=True):
"""
Remove the existing indentation from each line of a chunk of
text, s, and then prefix each line with a new indent string.
Also removes trailing whitespace from each line, and leading and
trailing blank lines.
""" |
s = textwrap.dedent(s)
s = s.split('\n')
s = [x.rstrip() for x in s]
while s and (not s[0]):
s = s[1:]
while s and (not s[-1]):
s = s[:-1]
if reformat:
s = '\n'.join(s)
s = textwrap.wrap(s, initial_indent=indent, subsequent_indent=indent)
else:
s = [indent + x for x in s]
return '\n'.join(s) + '\n' |
<SYSTEM_TASK:>
Generate a Python docstr for a given element in the AMQP
<END_TASK>
<USER_TASK:>
Description:
def generate_docstr(element, indent='', wrap=None):
"""
Generate a Python docstr for a given element in the AMQP
XML spec file. The element could be a class or method
The 'wrap' parameter is an optional chunk of text that's
added to the beginning and end of the resulting docstring.
""" |
result = []
txt = element.text and element.text.rstrip()
if txt:
result.append(_reindent(txt, indent))
result.append(indent)
for d in element.findall('doc') + element.findall('rule'):
docval = ''.join(d.textlist()).rstrip()
if not docval:
continue
reformat = True
if 'name' in d.attrib:
result.append(indent + d.attrib['name'].upper() + ':')
result.append(indent)
extra_indent = ' '
if d.attrib['name'] == 'grammar':
reformat = False # Don't want re-indenting to mess this up
elif d.tag == 'rule':
result.append(indent + 'RULE:')
result.append(indent)
extra_indent = ' '
else:
extra_indent = ''
result.append(_reindent(docval, indent + extra_indent, reformat))
result.append(indent)
fields = element.findall('field')
if fields:
result.append(indent + 'PARAMETERS:')
for f in fields:
result.append(indent + ' ' + _fixup_field_name(f) + ': ' + _field_type(f))
field_docs = generate_docstr(f, indent + ' ')
if field_docs:
result.append(indent)
result.append(field_docs)
result.append(indent)
if not result:
return None
if wrap is not None:
result = [wrap] + result + [wrap]
return '\n'.join(x.rstrip() for x in result) + '\n' |
<SYSTEM_TASK:>
Given an AMQP spec parsed into an xml.etree.ElemenTree,
<END_TASK>
<USER_TASK:>
Description:
def generate_module(spec, out):
"""
Given an AMQP spec parsed into an xml.etree.ElemenTree,
and a file-like 'out' object to write to, generate
the skeleton of a Python module.
""" |
#
# HACK THE SPEC so that 'access' is handled by 'channel' instead of 'connection'
#
for amqp_class in spec.findall('class'):
if amqp_class.attrib['name'] == 'access':
amqp_class.attrib['handler'] = 'channel'
#
# Build up some helper dictionaries
#
for domain in spec.findall('domain'):
domains[domain.attrib['name']] = domain.attrib['type']
for amqp_class in spec.findall('class'):
for amqp_method in amqp_class.findall('method'):
method_name_map[(amqp_class.attrib['name'], amqp_method.attrib['name'])] = \
(
amqp_class.attrib['index'],
amqp_method.attrib['index'],
amqp_class.attrib['handler'].capitalize() + '.' +
_fixup_method_name(amqp_class, amqp_method),
)
#### Actually generate output
for amqp_class in spec.findall('class'):
if amqp_class.attrib['handler'] == amqp_class.attrib['name']:
generate_class(spec, amqp_class, out)
out.write('_METHOD_MAP = {\n')
for amqp_class in spec.findall('class'):
print amqp_class.attrib
# for chassis in amqp_class.findall('chassis'):
# print ' ', chassis.attrib
for amqp_method in amqp_class.findall('method'):
# print ' ', amqp_method.attrib
# for chassis in amqp_method.findall('chassis'):
# print ' ', chassis.attrib
chassis = [x.attrib['name'] for x in amqp_method.findall('chassis')]
if 'client' in chassis:
out.write(" (%s, %s): (%s, %s._%s),\n" % (
amqp_class.attrib['index'],
amqp_method.attrib['index'],
amqp_class.attrib['handler'].capitalize(),
amqp_class.attrib['handler'].capitalize(),
_fixup_method_name(amqp_class, amqp_method)))
out.write('}\n\n')
out.write('_METHOD_NAME_MAP = {\n')
for amqp_class in spec.findall('class'):
for amqp_method in amqp_class.findall('method'):
out.write(" (%s, %s): '%s.%s',\n" % (
amqp_class.attrib['index'],
amqp_method.attrib['index'],
amqp_class.attrib['handler'].capitalize(),
_fixup_method_name(amqp_class, amqp_method)))
out.write('}\n') |
<SYSTEM_TASK:>
This method allows the server to send a non-fatal warning to
<END_TASK>
<USER_TASK:>
Description:
def _alert(self, args):
"""
This method allows the server to send a non-fatal warning to
the client. This is used for methods that are normally
asynchronous and thus do not have confirmations, and for which
the server may detect errors that need to be reported. Fatal
errors are handled as channel or connection exceptions; non-
fatal errors are sent through this method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
details: table
detailed information for warning
A set of fields that provide more information about
the problem. The meaning of these fields are defined
on a per-reply-code basis (TO BE DEFINED).
""" |
reply_code = args.read_short()
reply_text = args.read_shortstr()
details = args.read_table()
self.alerts.put((reply_code, reply_text, details)) |
<SYSTEM_TASK:>
request an access ticket
<END_TASK>
<USER_TASK:>
Description:
def access_request(self, realm, exclusive=False,
passive=False, active=False, write=False, read=False):
"""
request an access ticket
This method requests an access ticket for an access realm. The
server responds by granting the access ticket. If the client
does not have access rights to the requested realm this causes
a connection exception. Access tickets are a per-channel
resource.
RULE:
The realm name MUST start with either "/data" (for
application resources) or "/admin" (for server
administration resources). If the realm starts with any
other path, the server MUST raise a connection exception
with reply code 403 (access refused).
RULE:
The server MUST implement the /data realm and MAY
implement the /admin realm. The mapping of resources to
realms is not defined in the protocol - this is a server-
side configuration issue.
PARAMETERS:
realm: shortstr
name of requested realm
RULE:
If the specified realm is not known to the server,
the server must raise a channel exception with
reply code 402 (invalid path).
exclusive: boolean
request exclusive access
Request exclusive access to the realm. If the server
cannot grant this - because there are other active
tickets for the realm - it raises a channel exception.
passive: boolean
request passive access
Request message passive access to the specified access
realm. Passive access lets a client get information
about resources in the realm but not to make any
changes to them.
active: boolean
request active access
Request message active access to the specified access
realm. Acvtive access lets a client get create and
delete resources in the realm.
write: boolean
request write access
Request write access to the specified access realm.
Write access lets a client publish messages to all
exchanges in the realm.
read: boolean
request read access
Request read access to the specified access realm.
Read access lets a client consume messages from queues
in the realm.
The most recently requested ticket is used as the channel's
default ticket for any method that requires a ticket.
""" |
args = AMQPWriter()
args.write_shortstr(realm)
args.write_bit(exclusive)
args.write_bit(passive)
args.write_bit(active)
args.write_bit(write)
args.write_bit(read)
self._send_method((30, 10), args)
return self.wait(allowed_methods=[
(30, 11), # Channel.access_request_ok
]) |
<SYSTEM_TASK:>
declare exchange, create if needed
<END_TASK>
<USER_TASK:>
Description:
def exchange_declare(self, exchange, type, passive=False, durable=False,
auto_delete=True, internal=False, nowait=False,
arguments=None, ticket=None):
"""
declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
internal: boolean
create internal exchange
If set, the exchange may not be used directly by
publishers, but only when bound to other exchanges.
Internal exchanges are used to construct wiring that
is not visible to applications.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
ticket: short
When a client defines a new exchange, this belongs to
the access realm of the ticket used. All further work
done with that exchange must be done with an access
ticket for the same realm.
RULE:
The client MUST provide a valid access ticket
giving "active" access to the realm in which the
exchange exists or will be created, or "passive"
access if the if-exists flag is set.
""" |
if arguments is None:
arguments = {}
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(exchange)
args.write_shortstr(type)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(auto_delete)
args.write_bit(internal)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 10), args)
if not nowait:
return self.wait(allowed_methods=[
(40, 11), # Channel.exchange_declare_ok
]) |
<SYSTEM_TASK:>
delete an exchange
<END_TASK>
<USER_TASK:>
Description:
def exchange_delete(self, exchange, if_unused=False,
nowait=False, ticket=None):
"""
delete an exchange
This method deletes an exchange. When an exchange is deleted
all queue bindings on the exchange are cancelled.
PARAMETERS:
exchange: shortstr
RULE:
The exchange MUST exist. Attempting to delete a
non-existing exchange causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the exchange if it
has no queue bindings. If the exchange has queue
bindings the server does not delete it but raises a
channel exception instead.
RULE:
If set, the server SHOULD delete the exchange but
only if it has no queue bindings.
RULE:
If set, the server SHOULD raise a channel
exception if the exchange is in use.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "active" access rights to the exchange's
access realm.
""" |
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(exchange)
args.write_bit(if_unused)
args.write_bit(nowait)
self._send_method((40, 20), args)
if not nowait:
return self.wait(allowed_methods=[
(40, 21), # Channel.exchange_delete_ok
]) |
<SYSTEM_TASK:>
bind queue to an exchange
<END_TASK>
<USER_TASK:>
Description:
def queue_bind(self, queue, exchange, routing_key='',
nowait=False, arguments=None, ticket=None):
"""
bind queue to an exchange
This method binds a queue to an exchange. Until a queue is
bound it will not receive any messages. In a classic
messaging model, store-and-forward queues are bound to a dest
exchange and subscription queues are bound to a dest_wild
exchange.
RULE:
A server MUST allow ignore duplicate bindings - that is,
two or more bind methods for a specific queue, with
identical arguments - without treating these as an error.
RULE:
If a bind fails, the server MUST raise a connection
exception.
RULE:
The server MUST NOT allow a durable queue to bind to a
transient exchange. If the client attempts this the server
MUST raise a channel exception.
RULE:
Bindings for durable queues are automatically durable and
the server SHOULD restore such bindings after a server
restart.
RULE:
If the client attempts to an exchange that was declared as
internal, the server MUST raise a connection exception
with reply code 530 (not allowed).
RULE:
The server SHOULD support at least 4 bindings per queue,
and ideally, impose no limit except as defined by
available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to bind. If the queue
name is empty, refers to the current queue for the
channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
If the queue does not exist the server MUST raise
a channel exception with reply code 404 (not
found).
exchange: shortstr
The name of the exchange to bind to.
RULE:
If the exchange does not exist the server MUST
raise a channel exception with reply code 404 (not
found).
routing_key: shortstr
message routing key
Specifies the routing key for the binding. The
routing key is used for routing messages depending on
the exchange configuration. Not all exchanges use a
routing key - refer to the specific exchange
documentation. If the routing key is empty and the
queue name is empty, the routing key will be the
current queue for the channel, which is the last
declared queue.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for binding
A set of arguments for the binding. The syntax and
semantics of these arguments depends on the exchange
class.
ticket: short
The client provides a valid access ticket giving
"active" access rights to the queue's access realm.
""" |
if arguments is None:
arguments = {}
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((50, 20), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 21), # Channel.queue_bind_ok
]) |
<SYSTEM_TASK:>
confirms a queue definition
<END_TASK>
<USER_TASK:>
Description:
def _queue_declare_ok(self, args):
"""
confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count.
""" |
queue = args.read_shortstr()
message_count = args.read_long()
consumer_count = args.read_long()
return queue, message_count, consumer_count |
<SYSTEM_TASK:>
start a queue consumer
<END_TASK>
<USER_TASK:>
Description:
def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, ticket=None):
"""
start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
""" |
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
self._send_method((60, 20), args)
if not nowait:
consumer_tag = self.wait(allowed_methods=[
(60, 21), # Channel.basic_consume_ok
])
self.callbacks[consumer_tag] = callback
return consumer_tag |
<SYSTEM_TASK:>
notify the client of a consumer message
<END_TASK>
<USER_TASK:>
Description:
def _basic_deliver(self, args, msg):
"""
notify the client of a consumer message
This method delivers a message to the client, via a consumer.
In the asynchronous message delivery model, the client starts
a consumer using the Consume method, then the server responds
with Deliver methods as and when messages arrive for that
consumer.
RULE:
The server SHOULD track the number of times a message has
been delivered to clients and when a message is
redelivered a certain number of times - e.g. 5 times -
without being acknowledged, the server SHOULD consider the
message to be unprocessable (possibly causing client
applications to abort), and move the message to a dead
letter queue.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
""" |
consumer_tag = args.read_shortstr()
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
msg.delivery_info = {
'channel': self,
'consumer_tag': consumer_tag,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
func = self.callbacks.get(consumer_tag, None)
if func is not None:
func(msg) |
<SYSTEM_TASK:>
direct access to a queue
<END_TASK>
<USER_TASK:>
Description:
def basic_get(self, queue='', no_ack=False, ticket=None):
"""
direct access to a queue
This method provides a direct access to the messages in a
queue using a synchronous dialogue that is designed for
specific types of application where synchronous functionality
is more important than performance.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
Non-blocking, returns a message object, or None.
""" |
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_bit(no_ack)
self._send_method((60, 70), args)
return self.wait(allowed_methods=[
(60, 71), # Channel.basic_get_ok
(60, 72), # Channel.basic_get_empty
]) |
<SYSTEM_TASK:>
publish a message
<END_TASK>
<USER_TASK:>
Description:
def basic_publish(self, msg, exchange='', routing_key='',
mandatory=False, immediate=False, ticket=None):
"""
publish a message
This method publishes a message to a specific exchange. The
message will be routed to queues as defined by the exchange
configuration and distributed to any active consumers when the
transaction, if any, is committed.
PARAMETERS:
exchange: shortstr
Specifies the name of the exchange to publish to. The
exchange name can be empty, meaning the default
exchange. If the exchange name is specified, and that
exchange does not exist, the server will raise a
channel exception.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
RULE:
If the exchange was declared as an internal
exchange, the server MUST raise a channel
exception with a reply code 403 (access refused).
RULE:
The exchange MAY refuse basic content in which
case it MUST raise a channel exception with reply
code 540 (not implemented).
routing_key: shortstr
Message routing key
Specifies the routing key for the message. The
routing key is used for routing messages depending on
the exchange configuration.
mandatory: boolean
indicate mandatory routing
This flag tells the server how to react if the message
cannot be routed to a queue. If this flag is True, the
server will return an unroutable message with a Return
method. If this flag is False, the server silently
drops the message.
RULE:
The server SHOULD implement the mandatory flag.
immediate: boolean
request immediate delivery
This flag tells the server how to react if the message
cannot be routed to a queue consumer immediately. If
this flag is set, the server will return an
undeliverable message with a Return method. If this
flag is zero, the server will queue the message, but
with no guarantee that it will ever be consumed.
RULE:
The server SHOULD implement the immediate flag.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "write" access rights to the access realm
for the exchange.
""" |
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(mandatory)
args.write_bit(immediate)
self._send_method((60, 40), args, msg) |
<SYSTEM_TASK:>
return a failed message
<END_TASK>
<USER_TASK:>
Description:
def _basic_return(self, args, msg):
"""
return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
""" |
reply_code = args.read_short()
reply_text = args.read_shortstr()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
self.returned_messages.put(
(reply_code, reply_text, exchange, routing_key, msg)
) |
<SYSTEM_TASK:>
Wait for a method from the server destined for
<END_TASK>
<USER_TASK:>
Description:
def _wait_method(self, channel_id, allowed_methods):
"""
Wait for a method from the server destined for
a particular channel.
""" |
#
# Check the channel's deferred methods
#
method_queue = self.channels[channel_id].method_queue
for queued_method in method_queue:
method_sig = queued_method[0]
if (allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40)):
method_queue.remove(queued_method)
return queued_method
#
# Nothing queued, need to wait for a method from the peer
#
while True:
channel, method_sig, args, content = \
self.method_reader.read_method()
if (channel == channel_id) \
and ((allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40))):
return method_sig, args, content
#
# Certain methods like basic_return should be dispatched
# immediately rather than being queued, even if they're not
# one of the 'allowed_methods' we're looking for.
#
if (channel != 0) and (method_sig in Channel._IMMEDIATE_METHODS):
self.channels[channel].dispatch_method(method_sig, args, content)
continue
#
# Not the channel and/or method we were looking for. Queue
# this method for later
#
self.channels[channel].method_queue.append((method_sig, args, content))
#
# If we just queued up a method for channel 0 (the Connection
# itself) it's probably a close method in reaction to some
# error, so deal with it right away.
#
if channel == 0:
self.wait() |
<SYSTEM_TASK:>
Fetch a Channel object identified by the numeric channel_id, or
<END_TASK>
<USER_TASK:>
Description:
def channel(self, channel_id=None):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist.
""" |
if channel_id in self.channels:
return self.channels[channel_id]
return Channel(self, channel_id) |
<SYSTEM_TASK:>
request a connection close
<END_TASK>
<USER_TASK:>
Description:
def _close(self, args):
"""
request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
""" |
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._x_close_ok()
raise AMQPConnectionException(reply_code, reply_text, (class_id, method_id)) |
<SYSTEM_TASK:>
open connection to virtual host
<END_TASK>
<USER_TASK:>
Description:
def _x_open(self, virtual_host, capabilities='', insist=False):
"""
open connection to virtual host
This method opens a connection to a virtual host, which is a
collection of resources, and acts to separate multiple
application domains within a server.
RULE:
The client MUST open the context before doing any work on
the connection.
PARAMETERS:
virtual_host: shortstr
virtual host name
The name of the virtual host to work with.
RULE:
If the server supports multiple virtual hosts, it
MUST enforce a full separation of exchanges,
queues, and all associated entities per virtual
host. An application, connected to a specific
virtual host, MUST NOT be able to access resources
of another virtual host.
RULE:
The server SHOULD verify that the client has
permission to access the specified virtual host.
RULE:
The server MAY configure arbitrary limits per
virtual host, such as the number of each type of
entity that may be used, per connection and/or in
total.
capabilities: shortstr
required capabilities
The client may specify a number of capability names,
delimited by spaces. The server can use this string
to how to process the client's connection request.
insist: boolean
insist on connecting to server
In a configuration with multiple load-sharing servers,
the server may respond to a Connection.Open method
with a Connection.Redirect. The insist option tells
the server that the client is insisting on a
connection to the specified server.
RULE:
When the client uses the insist option, the server
SHOULD accept the client connection unless it is
technically unable to do so.
""" |
args = AMQPWriter()
args.write_shortstr(virtual_host)
args.write_shortstr(capabilities)
args.write_bit(insist)
self._send_method((10, 40), args)
return self.wait(allowed_methods=[
(10, 41), # Connection.open_ok
(10, 50), # Connection.redirect
]) |
<SYSTEM_TASK:>
signal that the connection is ready
<END_TASK>
<USER_TASK:>
Description:
def _open_ok(self, args):
"""
signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr
""" |
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts)
return None |
<SYSTEM_TASK:>
asks the client to use a different server
<END_TASK>
<USER_TASK:>
Description:
def _redirect(self, args):
"""
asks the client to use a different server
This method redirects the client to another server, based on
the requested virtual host and/or capabilities.
RULE:
When getting the Connection.Redirect method, the client
SHOULD reconnect to the host specified, and if that host
is not present, to any of the hosts specified in the
known-hosts list.
PARAMETERS:
host: shortstr
server to connect to
Specifies the server to connect to. This is an IP
address or a DNS name, optionally followed by a colon
and a port number. If no port number is specified, the
client should use the default port number for the
protocol.
known_hosts: shortstr
""" |
host = args.read_shortstr()
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Redirected to [%s], known_hosts [%s]' % (host, self.known_hosts))
return host |
<SYSTEM_TASK:>
start connection negotiation
<END_TASK>
<USER_TASK:>
Description:
def _start(self, args):
"""
start connection negotiation
This method starts the connection negotiation process by
telling the client the protocol version that the server
proposes, along with a list of security mechanisms which the
client can use for authentication.
RULE:
If the client cannot handle the protocol version suggested
by the server it MUST close the socket connection.
RULE:
The server MUST provide a protocol version that is lower
than or equal to that requested by the client in the
protocol header. If the server cannot support the
specified protocol it MUST NOT send this method, but MUST
close the socket connection.
PARAMETERS:
version_major: octet
protocol major version
The protocol major version that the server agrees to
use, which cannot be higher than the client's major
version.
version_minor: octet
protocol major version
The protocol minor version that the server agrees to
use, which cannot be higher than the client's minor
version.
server_properties: table
server properties
mechanisms: longstr
available security mechanisms
A list of the security mechanisms that the server
supports, delimited by spaces. Currently ASL supports
these mechanisms: PLAIN.
locales: longstr
available message locales
A list of the message locales that the server
supports, delimited by spaces. The locale defines the
language in which the server will send reply texts.
RULE:
All servers MUST support at least the en_US
locale.
""" |
self.version_major = args.read_octet()
self.version_minor = args.read_octet()
self.server_properties = args.read_table()
self.mechanisms = args.read_longstr().split(' ')
self.locales = args.read_longstr().split(' ')
AMQP_LOGGER.debug('Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s'
% (self.version_major, self.version_minor,
str(self.server_properties), self.mechanisms, self.locales)) |
<SYSTEM_TASK:>
select security mechanism and locale
<END_TASK>
<USER_TASK:>
Description:
def _x_start_ok(self, client_properties, mechanism, response, locale):
"""
select security mechanism and locale
This method selects a SASL security mechanism. ASL uses SASL
(RFC2222) to negotiate authentication and encryption.
PARAMETERS:
client_properties: table
client properties
mechanism: shortstr
selected security mechanism
A single security mechanisms selected by the client,
which must be one of those specified by the server.
RULE:
The client SHOULD authenticate using the highest-
level security profile it can handle from the list
provided by the server.
RULE:
The mechanism field MUST contain one of the
security mechanisms proposed by the server in the
Start method. If it doesn't, the server MUST close
the socket.
response: longstr
security response data
A block of opaque data passed to the security
mechanism. The contents of this data are defined by
the SASL security mechanism. For the PLAIN security
mechanism this is defined as a field table holding two
fields, LOGIN and PASSWORD.
locale: shortstr
selected message locale
A single message local selected by the client, which
must be one of those specified by the server.
""" |
args = AMQPWriter()
args.write_table(client_properties)
args.write_shortstr(mechanism)
args.write_longstr(response)
args.write_shortstr(locale)
self._send_method((10, 11), args) |
<SYSTEM_TASK:>
propose connection tuning parameters
<END_TASK>
<USER_TASK:>
Description:
def _tune(self, args):
"""
propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat.
""" |
self.channel_max = args.read_short() or self.channel_max
self.frame_max = args.read_long() or self.frame_max
self.method_writer.frame_max = self.frame_max
self.heartbeat = args.read_short()
self._x_tune_ok(self.channel_max, self.frame_max, 0) |
<SYSTEM_TASK:>
Send a method for our channel.
<END_TASK>
<USER_TASK:>
Description:
def _send_method(self, method_sig, args=bytes(), content=None):
"""
Send a method for our channel.
""" |
if isinstance(args, AMQPWriter):
args = args.getvalue()
self.connection.method_writer.write_method(self.channel_id,
method_sig, args, content) |
<SYSTEM_TASK:>
Write out an AMQP frame.
<END_TASK>
<USER_TASK:>
Description:
def write_frame(self, frame_type, channel, payload):
"""
Write out an AMQP frame.
""" |
size = len(payload)
self._write(pack('>BHI%dsB' % size,
frame_type, channel, size, payload, 0xce)) |
<SYSTEM_TASK:>
Wrap the socket in an SSL object, either the
<END_TASK>
<USER_TASK:>
Description:
def _setup_transport(self):
"""
Wrap the socket in an SSL object, either the
new Python 2.6 version, or the older Python 2.5 and
lower version.
""" |
if HAVE_PY26_SSL:
if hasattr(self, 'sslopts'):
self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts)
else:
self.sslobj = ssl.wrap_socket(self.sock)
self.sslobj.do_handshake()
else:
self.sslobj = socket.ssl(self.sock) |
<SYSTEM_TASK:>
Write a boolean value.
<END_TASK>
<USER_TASK:>
Description:
def write_bit(self, b):
"""
Write a boolean value.
""" |
if b:
b = 1
else:
b = 0
shift = self.bitcount % 8
if shift == 0:
self.bits.append(0)
self.bits[-1] |= (b << shift)
self.bitcount += 1 |
<SYSTEM_TASK:>
Generate a configuration file.
<END_TASK>
<USER_TASK:>
Description:
def init(name, description, bucket, timeout, memory, stages, requirements,
function, runtime, config_file, **kwargs):
"""Generate a configuration file.""" |
if os.path.exists(config_file):
raise RuntimeError('Please delete the old version {} if you want to '
'reconfigure your project.'.format(config_file))
module, app = function.split(':')
if not name:
name = module.replace('_', '-')
if not re.match('^[a-zA-Z][-a-zA-Z0-9]*$', name):
raise ValueError('The name {} is invalid, only letters, numbers and '
'dashes are allowed.'.format(name))
if not bucket:
random_suffix = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for n in range(8))
bucket = '{}-{}'.format(name, random_suffix)
stages = [s.strip() for s in stages.split(',')]
if runtime is None:
if sys.version_info[0] == 2: # pragma: no cover
runtime = 'python2.7'
else:
runtime = 'python3.6'
# generate slam.yaml
template_file = os.path.join(os.path.dirname(__file__),
'templates/slam.yaml')
with open(template_file) as f:
template = f.read()
template = render_template(template, name=name, description=description,
module=module, app=app, bucket=bucket,
timeout=timeout, memory=memory,
requirements=requirements, stages=stages,
devstage=stages[0], runtime=runtime)
with open(config_file, 'wt') as f:
f.write(template)
# plugins
config = _load_config(config_file)
for name, plugin in plugins.items():
# write plugin documentation as a comment in config file
with open(config_file, 'at') as f:
f.write('\n\n# ' + (plugin.__doc__ or name).replace(
'\n', '\n# ') + '\n')
if hasattr(plugin, 'init'):
arguments = {k: v for k, v in kwargs.items()
if k in getattr(plugin.init, '_argnames', [])}
plugin_config = plugin.init.func(config=config, **arguments)
if plugin_config:
with open(config_file, 'at') as f:
yaml.dump({name: plugin_config}, f,
default_flow_style=False)
print('The configuration file for your project has been generated. '
'Remember to add {} to source control.'.format(config_file)) |
<SYSTEM_TASK:>
Generate a handler.py file for the lambda function start up.
<END_TASK>
<USER_TASK:>
Description:
def _generate_lambda_handler(config, output='.slam/handler.py'):
"""Generate a handler.py file for the lambda function start up.""" |
# Determine what the start up code is. The default is to just run the
# function, but it can be overriden by a plugin such as wsgi for a more
# elaborated way to run the function.
run_function = _run_lambda_function
for name, plugin in plugins.items():
if name in config and hasattr(plugin, 'run_lambda_function'):
run_function = plugin.run_lambda_function
run_code = ''.join(inspect.getsourcelines(run_function)[0][1:])
# generate handler.py
with open(os.path.join(os.path.dirname(__file__),
'templates/handler.py.template')) as f:
template = f.read()
template = render_template(template, module=config['function']['module'],
app=config['function']['app'],
run_lambda_function=run_code,
config_json=json.dumps(config,
separators=(',', ':')))
with open(output, 'wt') as f:
f.write(template + '\n') |
<SYSTEM_TASK:>
Deploy the project to the development stage.
<END_TASK>
<USER_TASK:>
Description:
def deploy(stage, lambda_package, no_lambda, rebuild_deps, config_file):
"""Deploy the project to the development stage.""" |
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
s3 = boto3.client('s3')
cfn = boto3.client('cloudformation')
region = _get_aws_region()
# obtain previous deployment if it exists
previous_deployment = None
try:
previous_deployment = cfn.describe_stacks(
StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
pass
# build lambda package if required
built_package = False
new_package = True
if lambda_package is None and not no_lambda:
print("Building lambda package...")
lambda_package = _build(config, rebuild_deps=rebuild_deps)
built_package = True
elif lambda_package is None:
# preserve package from previous deployment
new_package = False
lambda_package = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
# create S3 bucket if it doesn't exist yet
bucket = config['aws']['s3_bucket']
_ensure_bucket_exists(s3, bucket, region)
# upload lambda package to S3
if new_package:
s3.upload_file(lambda_package, bucket, lambda_package)
if built_package:
# we created the package, so now that is on S3 we can delete it
os.remove(lambda_package)
# prepare cloudformation template
template_body = get_cfn_template(config)
parameters = [
{'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket},
{'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package},
]
stages = list(config['stage_environments'].keys())
stages.sort()
for s in stages:
param = s.title() + 'Version'
if s != stage:
v = _get_from_stack(previous_deployment, 'Parameter', param) \
if previous_deployment else '$LATEST'
v = v or '$LATEST'
else:
v = '$LATEST'
parameters.append({'ParameterKey': param, 'ParameterValue': v})
# run the cloudformation template
if previous_deployment is None:
print('Deploying {}:{}...'.format(config['name'], stage))
cfn.create_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_create_complete')
else:
print('Updating {}:{}...'.format(config['name'], stage))
cfn.update_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_update_complete')
# wait for cloudformation to do its thing
try:
waiter.wait(StackName=config['name'])
except botocore.exceptions.ClientError:
# the update failed, so we remove the lambda package from S3
if built_package:
s3.delete_object(Bucket=bucket, Key=lambda_package)
raise
else:
if previous_deployment and new_package:
# the update succeeded, so it is safe to delete the lambda package
# used by the previous deployment
old_pkg = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
s3.delete_object(Bucket=bucket, Key=old_pkg)
# we are done, show status info and exit
_print_status(config) |
<SYSTEM_TASK:>
Publish a version of the project to a stage.
<END_TASK>
<USER_TASK:>
Description:
def publish(version, stage, config_file):
"""Publish a version of the project to a stage.""" |
config = _load_config(config_file)
cfn = boto3.client('cloudformation')
if version is None:
version = config['devstage']
elif version not in config['stage_environments'].keys() and \
not version.isdigit():
raise ValueError('Invalid version. Use a stage name or a numeric '
'version number.')
if version == stage:
raise ValueError('Cannot deploy a stage into itself.')
# obtain previous deployment
try:
previous_deployment = cfn.describe_stacks(
StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
raise RuntimeError('This project has not been deployed yet.')
# preserve package from previous deployment
bucket = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Bucket')
lambda_package = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
# prepare cloudformation template
template_body = get_cfn_template(config)
parameters = [
{'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket},
{'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package},
]
stages = list(config['stage_environments'].keys())
stages.sort()
for s in stages:
param = s.title() + 'Version'
if s != stage:
v = _get_from_stack(previous_deployment, 'Parameter', param) \
if previous_deployment else '$LATEST'
v = v or '$LATEST'
else:
if version.isdigit():
# explicit version number
v = version
else:
# publish version from a stage
v = _get_from_stack(previous_deployment, 'Parameter',
version.title() + 'Version')
if v == '$LATEST':
# publish a new version from $LATEST
lmb = boto3.client('lambda')
v = lmb.publish_version(FunctionName=_get_from_stack(
previous_deployment, 'Output', 'FunctionArn'))[
'Version']
parameters.append({'ParameterKey': param, 'ParameterValue': v})
# run the cloudformation template
print('Publishing {}:{} to {}...'.format(config['name'], version, stage))
cfn.update_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_update_complete')
# wait for cloudformation to do its thing
try:
waiter.wait(StackName=config['name'])
except botocore.exceptions.ClientError:
raise
# we are done, show status info and exit
_print_status(config) |
<SYSTEM_TASK:>
find any installed plugins and register them.
<END_TASK>
<USER_TASK:>
Description:
def register_plugins():
"""find any installed plugins and register them.""" |
if pkg_resources: # pragma: no cover
for ep in pkg_resources.iter_entry_points('slam_plugins'):
plugin = ep.load()
# add any init options to the main init command
if hasattr(plugin, 'init') and hasattr(plugin.init, '_arguments'):
for arg in plugin.init._arguments:
init.parser.add_argument(*arg[0], **arg[1])
init._arguments += plugin.init._arguments
init._argnames += plugin.init._argnames
plugins[ep.name] = plugin |
<SYSTEM_TASK:>
Synchronizing the component states with AVS
<END_TASK>
<USER_TASK:>
Description:
def synchronise_device_state(self, device_state, authentication_headers):
"""
Synchronizing the component states with AVS
Components state must be synchronised with AVS after establishing the
downchannel stream in order to create a persistent connection with AVS.
Note that currently this function is paying lip-service synchronising
the device state: the device state is hard-coded.
""" |
payload = {
'context': device_state,
'event': {
'header': {
'namespace': 'System',
'name': 'SynchronizeState',
'messageId': ''
},
'payload': {}
}
}
multipart_data = MultipartEncoder(
fields=[
(
'metadata', (
'metadata',
json.dumps(payload),
'application/json',
{'Content-Disposition': "form-data; name='metadata'"}
)
),
],
boundary='boundary'
)
headers = {
**authentication_headers,
'Content-Type': multipart_data.content_type
}
stream_id = self.connection.request(
'GET',
'/v20160207/events',
body=multipart_data,
headers=headers,
)
response = self.connection.get_response(stream_id)
assert response.status in [http.client.NO_CONTENT, http.client.OK] |
<SYSTEM_TASK:>
Send audio to AVS
<END_TASK>
<USER_TASK:>
Description:
def send_audio_file(
self, audio_file, device_state, authentication_headers,
dialog_request_id, distance_profile, audio_format
):
"""
Send audio to AVS
The file-like object are steaming uploaded for improved latency.
Returns:
bytes -- wav audio bytes returned from AVS
""" |
payload = {
'context': device_state,
'event': {
'header': {
'namespace': 'SpeechRecognizer',
'name': 'Recognize',
'messageId': self.generate_message_id(),
'dialogRequestId': dialog_request_id,
},
'payload': {
'profile': distance_profile,
'format': audio_format
}
}
}
multipart_data = MultipartEncoder(
fields=[
(
'request', (
'request',
json.dumps(payload),
'application/json;',
{'Content-Disposition': "form-data; name='request'"}
),
),
(
'audio', (
'audio',
audio_file,
'application/octet-stream',
{'Content-Disposition': "form-data; name='audio'"}
)
),
],
boundary='boundary',
)
headers = {
**authentication_headers,
'Content-Type': multipart_data.content_type
}
stream_id = self.connection.request(
'POST',
'/v20160207/events',
headers=headers,
body=multipart_data,
)
response = self.connection.get_response(stream_id)
return self.parse_response(response) |
<SYSTEM_TASK:>
Retrieve the access token from AVS.
<END_TASK>
<USER_TASK:>
Description:
def retrieve_api_token(self):
"""
Retrieve the access token from AVS.
This function is memoized, so the
value returned by the function will be remembered and returned by
subsequent calls until the memo expires. This is because the access
token lasts for one hour, then a new token needs to be requested.
Decorators:
helpers.expiring_memo
Returns:
str -- The access token for communicating with AVS
""" |
payload = self.oauth2_manager.get_access_token_params(
refresh_token=self.refresh_token
)
response = requests.post(
self.oauth2_manager.access_token_url, json=payload
)
response.raise_for_status()
response_json = json.loads(response.text)
return response_json['access_token'] |
<SYSTEM_TASK:>
if arguments are given, adds a hash of the args to the key.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_key(key, *args, **kwargs):
"""
if arguments are given, adds a hash of the args to the key.
""" |
if not args and not kwargs:
return key
items = sorted(kwargs.items())
hashable_args = (args, tuple(items))
args_key = hashlib.md5(pickle.dumps(hashable_args)).hexdigest()
return "%s/args:%s" % (key, args_key) |
<SYSTEM_TASK:>
Set up dual logging to console and to logfile.
<END_TASK>
<USER_TASK:>
Description:
def setup(logdir='log'):
""" Set up dual logging to console and to logfile.
When this function is called, it first creates the given directory. It then
creates a logfile and passes all log messages to come to it. The logfile
name encodes the date and time when it was created, for example
"20181115-153559.txt". All messages with a log level of at least "WARNING"
are also forwarded to the console.
Args:
logdir: path of the directory where to store the log files. Both a
relative or an absolute path may be specified. If a relative path is
specified, it is interpreted relative to the working directory.
If no directory is given, the logs are written to a folder called
"log" in the working directory.
""" |
# Create the root logger.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Validate the given directory.
logdir = os.path.normpath(logdir)
# Create a folder for the logfiles.
if not os.path.exists(logdir):
os.makedirs(logdir)
# Construct the logfile name.
t = datetime.datetime.now()
logfile = '{year:04d}{mon:02d}{day:02d}-' \
'{hour:02d}{min:02d}{sec:02d}.log'.format(
year=t.year, mon=t.month, day=t.day,
hour=t.hour, min=t.minute, sec=t.second)
logfile = os.path.join(logdir, logfile)
# Set up logging to the logfile.
filehandler = logging.handlers.RotatingFileHandler(
filename=logfile,
maxBytes=10*1024*1024,
backupCount=100)
filehandler.setLevel(logging.DEBUG)
fileformatter = logging.Formatter(
'%(asctime)s %(levelname)-8s: %(message)s')
filehandler.setFormatter(fileformatter)
logger.addHandler(filehandler)
# Set up logging to the console.
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
streamformatter = logging.Formatter('%(levelname)s: %(message)s')
streamhandler.setFormatter(streamformatter)
logger.addHandler(streamhandler) |
<SYSTEM_TASK:>
Search specific project files and extract versions to check.
<END_TASK>
<USER_TASK:>
Description:
def get_versions() -> FileVersionResult:
"""
Search specific project files and extract versions to check.
:return: A FileVersionResult object for reporting.
""" |
version_counter = Counter()
versions_match = False
version_str = None
versions_discovered = OrderedDict()
for version_obj in version_objects:
discovered = version_obj.get_version()
versions_discovered[version_obj.key_name] = discovered
version_counter.update([discovered])
if len(version_counter) == 1:
versions_match = True
version_str = list(version_counter.keys())[0]
return FileVersionResult(
uniform=versions_match,
version_details=versions_discovered,
version_result=version_str,
) |
<SYSTEM_TASK:>
Takes any number of strings, and returns the first one
<END_TASK>
<USER_TASK:>
Description:
def supportedChars(*tests):
"""
Takes any number of strings, and returns the first one
the terminal encoding supports. If none are supported
it returns '?' the length of the first string.
""" |
for test in tests:
try:
test.encode(sys.stdout.encoding)
return test
except UnicodeEncodeError:
pass
return '?' * len(tests[0]) |
<SYSTEM_TASK:>
Build a cached dict with settings.INSTALLED_APPS as keys
<END_TASK>
<USER_TASK:>
Description:
def app_templates_dirs(self):
"""
Build a cached dict with settings.INSTALLED_APPS as keys
and the 'templates' directory of each application as values.
""" |
app_templates_dirs = OrderedDict()
for app_config in apps.get_app_configs():
templates_dir = os.path.join(
getattr(app_config, 'path', '/'), 'templates')
if os.path.isdir(templates_dir):
templates_dir = upath(templates_dir)
app_templates_dirs[app_config.name] = templates_dir
app_templates_dirs[app_config.label] = templates_dir
return app_templates_dirs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.