text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Least squares fitting in numpy with cross-validation.
<END_TASK>
<USER_TASK:>
Description:
def np_lst_sq_xval(vecMdl, aryFuncChnk, aryIdxTrn, aryIdxTst):
"""Least squares fitting in numpy with cross-validation.
""" |
varNumXval = aryIdxTrn.shape[-1]
varNumVoxChnk = aryFuncChnk.shape[-1]
# pre-allocate ary to collect cross-validation
# error for every xval fold
aryResXval = np.empty((varNumVoxChnk,
varNumXval),
dtype=np.float32)
# loop over cross-validation folds
for idxXval in range(varNumXval):
# Get pRF time course models for trn and tst:
vecMdlTrn = vecMdl[aryIdxTrn[:, idxXval], :]
vecMdlTst = vecMdl[aryIdxTst[:, idxXval], :]
# Get functional data for trn and tst:
aryFuncChnkTrn = aryFuncChnk[
aryIdxTrn[:, idxXval], :]
aryFuncChnkTst = aryFuncChnk[
aryIdxTst[:, idxXval], :]
# Numpy linalg.lstsq is used to calculate the
# parameter estimates of the current model:
vecTmpPe = np.linalg.lstsq(vecMdlTrn,
aryFuncChnkTrn,
rcond=-1)[0]
# calculate model prediction time course
aryMdlPrdTc = np.dot(vecMdlTst, vecTmpPe)
# calculate residual sum of squares between
# test data and model prediction time course
aryResXval[:, idxXval] = np.sum(
(np.subtract(aryFuncChnkTst,
aryMdlPrdTc))**2, axis=0)
return aryResXval |
<SYSTEM_TASK:>
Return a list of states.
<END_TASK>
<USER_TASK:>
Description:
async def _raw_state_data(self) -> list:
"""Return a list of states.""" |
data = await self._request('get', 'states')
return [
location for location in data
if location['name'] != 'United States'
] |
<SYSTEM_TASK:>
Decorates a function to be registered as a resource route.
<END_TASK>
<USER_TASK:>
Description:
def resource(self, uri, methods=frozenset({'GET'}), **kwargs):
"""
Decorates a function to be registered as a resource route.
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:param version:
:param name: user defined route name for url_for
:param filters: List of callable that will filter request and
response data
:param validators: List of callable added to the filter list.
:return: A decorated function
""" |
def decorator(f):
if kwargs.get('stream'):
f.is_stream = kwargs['stream']
self.add_resource(f, uri=uri, methods=methods, **kwargs)
return decorator |
<SYSTEM_TASK:>
Register a resource route.
<END_TASK>
<USER_TASK:>
Description:
def add_resource(self, handler, uri, methods=frozenset({'GET'}),
**kwargs):
"""
Register a resource route.
:param handler: function or class instance
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:param filters: List of callable that will filter request and
response data
:param validators: List of callable added to the filter list.
:return: function or class instance
""" |
sanic_args = ('host', 'strict_slashes', 'version', 'name')
view_kwargs = dict((k, v) for k, v in kwargs.items()
if k in sanic_args)
filters = kwargs.get('filters', self.default_filters)
validators = kwargs.get('validators', [])
filter_list = list(filters) + list(validators)
filter_options = {
'filter_list': filter_list,
'handler': handler,
'uri': uri,
'methods': methods
}
filter_options.update(kwargs)
handler = self.init_filters(filter_list, filter_options)(handler)
return self.add_route(handler=handler, uri=uri, methods=methods,
**view_kwargs) |
<SYSTEM_TASK:>
Find all commits between two commit SHAs.
<END_TASK>
<USER_TASK:>
Description:
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs.""" |
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits) |
<SYSTEM_TASK:>
Read OSA role information at a particular commit.
<END_TASK>
<USER_TASK:>
Description:
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit.""" |
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml) |
<SYSTEM_TASK:>
Normalize the YAML from project and role lookups.
<END_TASK>
<USER_TASK:>
Description:
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
""" |
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml |
<SYSTEM_TASK:>
Post the report to a GitHub Gist and return the URL of the gist.
<END_TASK>
<USER_TASK:>
Description:
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist.""" |
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url'] |
<SYSTEM_TASK:>
Prepare the storage directory.
<END_TASK>
<USER_TASK:>
Description:
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory.""" |
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory |
<SYSTEM_TASK:>
Reset repository and optionally update it.
<END_TASK>
<USER_TASK:>
Description:
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it.""" |
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo |
<SYSTEM_TASK:>
Clone the repo if it doesn't exist already, otherwise update it.
<END_TASK>
<USER_TASK:>
Description:
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it.""" |
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo |
<SYSTEM_TASK:>
Test if a commit is valid for the repository.
<END_TASK>
<USER_TASK:>
Description:
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository.""" |
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True |
<SYSTEM_TASK:>
Check if commit range is valid. Flip it if needed.
<END_TASK>
<USER_TASK:>
Description:
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed.""" |
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True |
<SYSTEM_TASK:>
Get release notes between the two revisions.
<END_TASK>
<USER_TASK:>
Description:
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions.""" |
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes |
<SYSTEM_TASK:>
This method provides the functionality of adding text appropriately
<END_TASK>
<USER_TASK:>
Description:
def append_new_text(destination, text, join_str=None):
"""
This method provides the functionality of adding text appropriately
underneath the destination node. This will be either to the destination's
text attribute or to the tail attribute of the last child.
""" |
if join_str is None:
join_str = ' '
if len(destination) > 0: # Destination has children
last = destination[-1]
if last.tail is None: # Last child has no tail
last.tail = text
else: # Last child has a tail
last.tail = join_str.join([last.tail, text])
else: # Destination has no children
if destination.text is None: # Destination has no text
destination.text = text
else: # Destination has a text
destination.text = join_str.join([destination.text, text]) |
<SYSTEM_TASK:>
Compared to xml.dom.minidom, lxml's treatment of text as .text and .tail
<END_TASK>
<USER_TASK:>
Description:
def append_all_below(destination, source, join_str=None):
"""
Compared to xml.dom.minidom, lxml's treatment of text as .text and .tail
attributes of elements is an oddity. It can even be a little frustrating
when one is attempting to copy everything underneath some element to
another element; one has to write in extra code to handle the text. This
method provides the functionality of adding everything underneath the
source element, in preserved order, to the destination element.
""" |
if join_str is None:
join_str = ' '
if source.text is not None: # If source has text
if len(destination) == 0: # Destination has no children
if destination.text is None: # Destination has no text
destination.text = source.text
else: # Destination has a text
destination.text = join_str.join([destination.text, source.text])
else: # Destination has children
#Select last child
last = destination[-1]
if last.tail is None: # Last child has no tail
last.tail = source.text
else: # Last child has a tail
last.tail = join_str.join([last.tail, source.text])
for each_child in source:
destination.append(deepcopy(each_child)) |
<SYSTEM_TASK:>
This method will remove all attributes of any provided element.
<END_TASK>
<USER_TASK:>
Description:
def remove_all_attributes(element, exclude=None):
"""
This method will remove all attributes of any provided element.
A list of strings may be passed to the keyward-argument "exclude", which
will serve as a list of attributes which will not be removed.
""" |
if exclude is None:
exclude = []
for k in element.attrib.keys():
if k not in exclude:
element.attrib.pop(k) |
<SYSTEM_TASK:>
Renames the attributes of the element. Accepts the element and a dictionary
<END_TASK>
<USER_TASK:>
Description:
def rename_attributes(element, attrs):
"""
Renames the attributes of the element. Accepts the element and a dictionary
of string values. The keys are the original names, and their values will be
the altered names. This method treats all attributes as optional and will
not fail on missing attributes.
""" |
for name in attrs.keys():
if name not in element.attrib:
continue
else:
element.attrib[attrs[name]] = element.attrib.pop(name) |
<SYSTEM_TASK:>
A simple way to replace one element node with another.
<END_TASK>
<USER_TASK:>
Description:
def replace(old, new):
"""
A simple way to replace one element node with another.
""" |
parent = old.getparent()
parent.replace(old, new) |
<SYSTEM_TASK:>
A simple way to insert a new element node before the old element node among
<END_TASK>
<USER_TASK:>
Description:
def insert_before(old, new):
"""
A simple way to insert a new element node before the old element node among
its siblings.
""" |
parent = old.getparent()
parent.insert(parent.index(old), new) |
<SYSTEM_TASK:>
Converts the node received to a comment, in place, and will also return the
<END_TASK>
<USER_TASK:>
Description:
def comment(node):
"""
Converts the node received to a comment, in place, and will also return the
comment element.
""" |
parent = node.parentNode
comment = node.ownerDocument.createComment(node.toxml())
parent.replaceChild(comment, node)
return comment |
<SYSTEM_TASK:>
Converts the comment node received to a non-commented element, in place,
<END_TASK>
<USER_TASK:>
Description:
def uncomment(comment):
"""
Converts the comment node received to a non-commented element, in place,
and will return the new node.
This may fail, primarily due to special characters within the comment that
the xml parser is unable to handle. If it fails, this method will log an
error and return None
""" |
parent = comment.parentNode
h = html.parser.HTMLParser()
data = h.unescape(comment.data)
try:
node = minidom.parseString(data).firstChild
except xml.parsers.expat.ExpatError: # Could not parse!
log.error('Could not uncomment node due to parsing error!')
return None
else:
parent.replaceChild(node, comment)
return node |
<SYSTEM_TASK:>
A handy way to serialize an element to text.
<END_TASK>
<USER_TASK:>
Description:
def serialize(element, strip=False):
"""
A handy way to serialize an element to text.
""" |
text = etree.tostring(element, method='text', encoding='utf-8')
if strip:
text = text.strip()
return str(text, encoding='utf-8') |
<SYSTEM_TASK:>
Make data filter definition consistent.
<END_TASK>
<USER_TASK:>
Description:
def _homogenize_data_filter(dfilter):
"""
Make data filter definition consistent.
Create a tuple where first element is the row filter and the second element
is the column filter
""" |
if isinstance(dfilter, tuple) and (len(dfilter) == 1):
dfilter = (dfilter[0], None)
if (dfilter is None) or (dfilter == (None, None)) or (dfilter == (None,)):
dfilter = (None, None)
elif isinstance(dfilter, dict):
dfilter = (dfilter, None)
elif isinstance(dfilter, (list, str)) or (
isinstance(dfilter, int) and (not isinstance(dfilter, bool))
):
dfilter = (None, dfilter if isinstance(dfilter, list) else [dfilter])
elif isinstance(dfilter[0], dict) or (
(dfilter[0] is None) and (not isinstance(dfilter[1], dict))
):
pass
else:
dfilter = (dfilter[1], dfilter[0])
return dfilter |
<SYSTEM_TASK:>
Convert to float if object is a float string.
<END_TASK>
<USER_TASK:>
Description:
def _tofloat(obj):
"""Convert to float if object is a float string.""" |
if "inf" in obj.lower().strip():
return obj
try:
return int(obj)
except ValueError:
try:
return float(obj)
except ValueError:
return obj |
<SYSTEM_TASK:>
Validate that all columns in filter are in header.
<END_TASK>
<USER_TASK:>
Description:
def _validate_rfilter(self, rfilter, letter="d"):
"""Validate that all columns in filter are in header.""" |
if letter == "d":
pexdoc.exh.addai(
"dfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
else:
pexdoc.exh.addai(
"rfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
for key in rfilter:
self._in_header(key)
rfilter[key] = (
[rfilter[key]] if isinstance(rfilter[key], str) else rfilter[key]
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def dsort(self, order):
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
""" |
# Make order conforming to a list of dictionaries
order = order if isinstance(order, list) else [order]
norder = [{item: "A"} if not isinstance(item, dict) else item for item in order]
# Verify that all columns exist in file
self._in_header([list(item.keys())[0] for item in norder])
# Get column indexes
clist = []
for nitem in norder:
for key, value in nitem.items():
clist.append(
(
key
if isinstance(key, int)
else self._header_upper.index(key.upper()),
value.upper() == "D",
)
)
# From the Python documentation:
# "Starting with Python 2.3, the sort() method is guaranteed to be
# stable. A sort is stable if it guarantees not to change the
# relative order of elements that compare equal - this is helpful
# for sorting in multiple passes (for example, sort by department,
# then by salary grade)."
# This means that the sorts have to be done from "minor" column to
# "major" column
for (cindex, rvalue) in reversed(clist):
fpointer = operator.itemgetter(cindex)
self._data.sort(key=fpointer, reverse=rvalue) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def header(self, filtered=False):
r"""
Return data header.
When the raw (input) data is used the data header is a list of the
comma-separated values file header if the file is loaded with header
(each list item is a column header) or a list of column numbers if the
file is loaded without header (column zero is the leftmost column).
When filtered data is used the data header is the active column filter,
if any, otherwise it is the same as the raw (input) data header
:param filtered: Flag that indicates whether the raw (input) data
should be used (False) or whether filtered data
should be used (True)
:type filtered: boolean
:rtype: list of strings or integers
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.header
:raises: RuntimeError (Argument \`filtered\` is not valid)
.. [[[end]]]
""" |
return (
self._header
if (not filtered) or (filtered and self._cfilter is None)
else self._cfilter
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def replace(self, rdata, filtered=False):
r"""
Replace data.
:param rdata: Replacement data
:type rdata: list of lists
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
.. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.replace
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`rdata\` is not valid)
* ValueError (Number of columns mismatch between input and
replacement data)
* ValueError (Number of rows mismatch between input and
replacement data)
.. [[[end]]]
""" |
# pylint: disable=R0914
rdata_ex = pexdoc.exh.addai("rdata")
rows_ex = pexdoc.exh.addex(
ValueError, "Number of rows mismatch between input and replacement data"
)
cols_ex = pexdoc.exh.addex(
ValueError, "Number of columns mismatch between input and replacement data"
)
rdata_ex(any([len(item) != len(rdata[0]) for item in rdata]))
# Use all columns if no specification has been given
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
# Verify column names, has to be done before getting data
col_num = len(self._data[0]) - 1
odata = self._apply_filter(filtered)
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
col_index = [
self._header_upper.index(col_id.upper())
if isinstance(col_id, str)
else col_id
for col_id in cfilter
]
rows_ex(len(odata) != len(rdata))
cols_ex(len(odata[0]) != len(rdata[0]))
df_tuples = self._format_rfilter(self._rfilter)
rnum = 0
for row in self._data:
if (not filtered) or (
filtered
and all([row[col_num] in col_value for col_num, col_value in df_tuples])
):
for col_num, new_data in zip(col_index, rdata[rnum]):
row[col_num] = new_data
rnum = rnum + 1 |
<SYSTEM_TASK:>
Returns the list of documents found on the collection
<END_TASK>
<USER_TASK:>
Description:
def list(self, request):
"""
Returns the list of documents found on the collection
""" |
pipeline = [{'$match': request.args.pop('match', {})}]
sort = request.args.pop('sort', {})
if sort:
pipeline.append({'$sort': sort})
project = request.args.pop('project', {})
if project:
pipeline.append({'$project': project})
return Response(serialize(self.collection.aggregate(pipeline))) |
<SYSTEM_TASK:>
Creates a new document based on the given data
<END_TASK>
<USER_TASK:>
Description:
def create(self, request):
"""
Creates a new document based on the given data
""" |
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) |
<SYSTEM_TASK:>
Returns the document containing the given _id or 404
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self, request, _id):
"""
Returns the document containing the given _id or 404
""" |
_id = deserialize(_id)
retrieved = self.collection.find_one({'_id': _id})
if retrieved:
return Response(serialize(retrieved))
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) |
<SYSTEM_TASK:>
Updates the document with the given _id using the given data
<END_TASK>
<USER_TASK:>
Description:
def update(self, request, _id):
"""
Updates the document with the given _id using the given data
""" |
_id = deserialize(_id)
to_update = self.collection.find_one({'_id': _id})
if to_update:
document = self.collection(dict(to_update, **request.json))
document.updated_at = datetime.utcnow()
updated = document.update()
return Response(
response=serialize(updated),
status=(
200 if not all(
key in updated for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) |
<SYSTEM_TASK:>
Deletes the document with the given _id if it exists
<END_TASK>
<USER_TASK:>
Description:
def delete(self, request, _id):
"""
Deletes the document with the given _id if it exists
""" |
_id = deserialize(_id)
to_delete = self.collection.get({'_id': _id})
if to_delete:
deleted = to_delete.delete()
return Response(
response=serialize(deleted),
status=(
200 if not all(
key in deleted for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=404
) |
<SYSTEM_TASK:>
Starts the twisted reactor if it is not running already.
<END_TASK>
<USER_TASK:>
Description:
def _ensure_reactor_running():
"""
Starts the twisted reactor if it is not running already.
The reactor is started in a new daemon-thread.
Has to perform dirty hacks so that twisted can register
signals even if it is not running in the main-thread.
""" |
if not reactor.running:
# Some of the `signal` API can only be called
# from the main-thread. So we do a dirty workaround.
#
# `signal.signal()` and `signal.wakeup_fd_capture()`
# are temporarily monkey-patched while the reactor is
# starting.
#
# The patched functions record the invocations in
# `signal_registrations`.
#
# Once the reactor is started, the main-thread
# is used to playback the recorded invocations.
signal_registrations = []
# do the monkey patching
def signal_capture(*args, **kwargs):
signal_registrations.append((orig_signal, args, kwargs))
def set_wakeup_fd_capture(*args, **kwargs):
signal_registrations.append((orig_set_wakeup_fd, args, kwargs))
orig_signal = signal.signal
signal.signal = signal_capture
orig_set_wakeup_fd = signal.set_wakeup_fd
signal.set_wakeup_fd = set_wakeup_fd_capture
# start the reactor in a daemon-thread
reactor_thread = threading.Thread(target=reactor.run, name="reactor")
reactor_thread.daemon = True
reactor_thread.start()
while not reactor.running:
time.sleep(0.01)
# Give the reactor a moment to register the signals.
# Apparently the 'running' flag is set before that.
time.sleep(0.01)
# Undo the monkey-paching
signal.signal = orig_signal
signal.set_wakeup_fd = orig_set_wakeup_fd
# Playback the recorded calls
for func, args, kwargs in signal_registrations:
func(*args, **kwargs) |
<SYSTEM_TASK:>
Convert the given list of parameters to a JSON object.
<END_TASK>
<USER_TASK:>
Description:
def save_list(key, *values):
"""Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
""" |
return json.dumps({key: [_get_json(value) for value in values]}) |
<SYSTEM_TASK:>
Convert given progress to a JSON object.
<END_TASK>
<USER_TASK:>
Description:
def progress(progress):
"""Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress}
""" |
if isinstance(progress, int) or isinstance(progress, float):
progress = float(progress)
else:
try:
progress = float(json.loads(progress))
except (TypeError, ValueError):
return warning("Progress must be a float.")
if not 0 <= progress <= 1:
return warning("Progress must be a float between 0 and 1.")
return json.dumps({'proc.progress': progress}) |
<SYSTEM_TASK:>
Prepend the given parameter with ``export``
<END_TASK>
<USER_TASK:>
Description:
def export_file(file_path):
"""Prepend the given parameter with ``export``""" |
if not os.path.isfile(file_path):
return error("Referenced file does not exist: '{}'.".format(file_path))
return "export {}".format(file_path) |
<SYSTEM_TASK:>
calculate mean squared error. Assumes that axis=0 is time
<END_TASK>
<USER_TASK:>
Description:
def calcMse(predTst, yTest, axis=0):
"""calculate mean squared error. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
MSE
""" |
return np.mean((yTest - predTst) ** 2, axis=axis) |
<SYSTEM_TASK:>
Detects reverts that occur in a sequence of revisions. Note that,
<END_TASK>
<USER_TASK:>
Description:
def detect(checksum_revisions, radius=defaults.RADIUS):
"""
Detects reverts that occur in a sequence of revisions. Note that,
`revision` data meta will simply be returned in the case of a revert.
This function serves as a convenience wrapper around calls to
:class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process`
method.
:Parameters:
checksum_revisions : `iterable` ( (checksum, revision) )
an iterable over tuples of checksum and revision meta data
radius : int
a positive integer indicating the maximum revision distance that a
revert can span.
:Return:
a iterator over :class:`mwreverts.Revert`
:Example:
>>> import mwreverts
>>>
>>> checksum_revisions = [
... ("aaa", {'rev_id': 1}),
... ("bbb", {'rev_id': 2}),
... ("aaa", {'rev_id': 3}),
... ("ccc", {'rev_id': 4})
... ]
>>>
>>> list(mwreverts.detect(checksum_revisions))
[Revert(reverting={'rev_id': 3},
reverteds=[{'rev_id': 2}],
reverted_to={'rev_id': 1})]
""" |
revert_detector = Detector(radius)
for checksum, revision in checksum_revisions:
revert = revert_detector.process(checksum, revision)
if revert is not None:
yield revert |
<SYSTEM_TASK:>
Ingests an article and processes it for metadata and elements to provide
<END_TASK>
<USER_TASK:>
Description:
def process(self, article):
"""
Ingests an article and processes it for metadata and elements to provide
proper references in the EPUB spine.
This method may only be called once unless the Package was instantiated
in collection mode using ``Package(collection=True)``. It places entries
in an internal spine list for the Main Content Document, the
Bibliographic Content Document (if there are ref elements in Back), and
the Tables Content Document (if there are table elements). It then
employs the publisher specific methods for extracting article metadata
using the article's publisher attribute (an instance of a Publisher
class).
Parameters
----------
article : openaccess_epub.article.Article instance
An article to be included in the EPUB, to be processed for metadata
and appropriate content document references.
""" |
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Package only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Package cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
#Analyze the article to add entries to the spine
dash_doi = self.article_doi.replace('.', '-')
#Entry for the main content document
main_idref = 'main-{0}-xhtml'.format(dash_doi)
self.spine_list.append(spine_item(main_idref, True))
#Entry for the biblio content document
biblio_idref = 'biblio-{0}-xhtml'.format(dash_doi)
if self.article.root.xpath('./back/ref-list/ref'):
self.spine_list.append(spine_item(biblio_idref, True))
#Entry for the tables content document
tables_idref = 'tables-{0}-xhtml'.format(dash_doi)
if self.article.publisher.has_out_of_flow_tables():
self.spine_list.append(spine_item(tables_idref, False))
self.acquire_metadata() |
<SYSTEM_TASK:>
Handles the acquisition of metadata for both collection mode and single
<END_TASK>
<USER_TASK:>
Description:
def acquire_metadata(self):
"""
Handles the acquisition of metadata for both collection mode and single
mode, uses the metadata methods belonging to the article's publisher
attribute.
""" |
#For space economy
publisher = self.article.publisher
if self.collection: # collection mode metadata gathering
pass
else: # single mode metadata gathering
self.pub_id = publisher.package_identifier()
self.title = publisher.package_title()
for date in publisher.package_date():
self.dates.add(date)
#Common metadata gathering
for lang in publisher.package_language():
self.languages.add(lang) # languages
for contributor in publisher.package_contributors(): # contributors
self.contributors.add(contributor)
self.publishers.add(publisher.package_publisher()) # publisher names
desc = publisher.package_description()
if desc is not None:
self.descriptions.add(desc)
for subj in publisher.package_subject():
self.subjects.add(subj) # subjects
#Rights
art_rights = publisher.package_rights()
self.rights.add(art_rights)
if art_rights not in self.rights_associations:
self.rights_associations[art_rights] = [self.article.doi]
else:
self.rights_associations[art_rights].append(self.article.doi) |
<SYSTEM_TASK:>
An iterator through the files in a location which yields item elements
<END_TASK>
<USER_TASK:>
Description:
def file_manifest(self, location):
"""
An iterator through the files in a location which yields item elements
suitable for insertion into the package manifest.
""" |
#Maps file extensions to mimetypes
mimetypes = {'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.xml': 'application/xhtml+xml',
'.png': 'image/png',
'.css': 'text/css',
'.ncx': 'application/x-dtbncx+xml',
'.gif': 'image/gif',
'.tif': 'image/tif',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.ttf': 'application/vnd.ms-opentype',
'.otf': 'application/vnd.ms-opentype'}
current_dir = os.getcwd()
os.chdir(location)
for dirpath, _dirnames, filenames in os.walk('.'):
dirpath = dirpath[2:] # A means to avoid dirpath prefix of './'
for fn in filenames:
fn_ext = os.path.splitext(fn)[-1]
item = etree.Element('item')
#Here we set three attributes: href, media-type, and id
if not dirpath:
item.attrib['href'] = fn
else:
item.attrib['href'] = '/'.join([dirpath, fn])
item.attrib['media-type'] = mimetypes[fn_ext]
#Special handling for common image types
if fn_ext in ['.jpg', '.png', '.tif', '.jpeg']:
#the following lines assume we are using the convention
#where the article doi is prefixed by 'images-'
item.attrib['id'] = '-'.join([dirpath[7:],
fn.replace('.', '-')])
else:
item.attrib['id'] = fn.replace('.', '-')
yield item
os.chdir(current_dir) |
<SYSTEM_TASK:>
Returns an appropriate Name and File-As-Name for a contrib element.
<END_TASK>
<USER_TASK:>
Description:
def get_contrib_names(self, contrib):
"""
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
""" |
collab = contrib.find('collab')
anon = contrib.find('anonymous')
if collab is not None:
proper_name = serialize(collab, strip=True)
file_as_name = proper_name
elif anon is not None:
proper_name = 'Anonymous'
file_as_name = proper_name
else:
name = contrib.find('name')
surname = name.find('surname').text
given = name.find('given-names')
if given is not None:
if given.text: # Sometimes these tags are empty
proper_name = ' '.join([surname, given.text])
#File-as name is <surname>, <given-initial-char>
file_as_name = ', '.join([surname, given.text[0]])
else:
proper_name = surname
file_as_name = proper_name
else:
proper_name = surname
file_as_name = proper_name
return proper_name, file_as_name |
<SYSTEM_TASK:>
Given an Article class instance, this is responsible for returning an
<END_TASK>
<USER_TASK:>
Description:
def package_description(self):
"""
Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article.
""" |
abstract = self.article.root.xpath('./front/article-meta/abstract')
return serialize(abstract[0], strip=True) if abstract else None |
<SYSTEM_TASK:>
Makes the Article Title for the Heading.
<END_TASK>
<USER_TASK:>
Description:
def heading_title(self):
"""
Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter
""" |
art_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title = deepcopy(art_title)
article_title.tag = 'h1'
article_title.attrib['id'] = 'title'
article_title.attrib['class'] = 'article-title'
return article_title |
<SYSTEM_TASK:>
Constructs the Authors content for the Heading. This should display
<END_TASK>
<USER_TASK:>
Description:
def make_heading_authors(self, authors):
"""
Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter
""" |
author_element = etree.Element('h3', {'class': 'authors'})
#Construct content for the author element
first = True
for author in authors:
if first:
first = False
else:
append_new_text(author_element, ',', join_str='')
collab = author.find('collab')
anon = author.find('anon')
if collab is not None:
append_all_below(author_element, collab)
elif anon is not None: # If anonymous, just add "Anonymous"
append_new_text(author_element, 'Anonymous')
else: # Author is neither Anonymous or a Collaboration
author_name, _ = self.get_contrib_names(author)
append_new_text(author_element, author_name)
#TODO: Handle author footnote references, also put footnotes in the ArticleInfo
#Example: journal.pbio.0040370.xml
first = True
for xref in author.xpath("./xref[@ref-type='corresp' or @ref-type='aff']"):
_sup = xref.find('sup')
sup_text = all_text(_sup) if _sup is not None else ''
auth_sup = etree.SubElement(author_element, 'sup')
sup_link = etree.SubElement(auth_sup,
'a',
{'href': self.main_fragment.format(xref.attrib['rid'])})
sup_link.text = sup_text
if first:
first = False
else:
append_new_text(auth_sup, ', ', join_str='')
#for xref in author.findall('xref'):
#if xref.attrs['ref-type'] in ['corresp', 'aff']:
#try:
#sup_element = xref.sup[0].node
#except IndexError:
#sup_text = ''
#else:
#sup_text = all_text(sup_element)
#new_sup = etree.SubElement(author_element, 'sup')
#sup_link = etree.SubElement(new_sup, 'a')
#sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid'])
#sup_link.text = sup_text
#if first:
#first = False
#else:
#new_sup.text = ','
return author_element |
<SYSTEM_TASK:>
Makes the content for the Author Affiliations, displays after the
<END_TASK>
<USER_TASK:>
Description:
def make_heading_affiliations(self, heading_div):
"""
Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter
""" |
#Get all of the aff element tuples from the metadata
affs = self.article.root.xpath('./front/article-meta/aff')
#Create a list of all those pertaining to the authors
author_affs = [i for i in affs if 'aff' in i.attrib['id']]
#Count them, used for formatting
if len(author_affs) == 0:
return None
else:
affs_list = etree.SubElement(heading_div,
'ul',
{'id': 'affiliations',
'class': 'simple'})
for aff in author_affs:
#Create a span element to accept extracted content
aff_item = etree.SubElement(affs_list, 'li')
aff_item.attrib['id'] = aff.attrib['id']
#Get the first label node and the first addr-line node
label = aff.find('label')
addr_line = aff.find('addr-line')
if label is not None:
bold = etree.SubElement(aff_item, 'b')
bold.text = all_text(label) + ' '
if addr_line is not None:
append_new_text(aff_item, all_text(addr_line))
else:
append_new_text(aff_item, all_text(aff)) |
<SYSTEM_TASK:>
An article may contain data for various kinds of abstracts. This method
<END_TASK>
<USER_TASK:>
Description:
def make_heading_abstracts(self, heading_div):
"""
An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter
""" |
for abstract in self.article.root.xpath('./front/article-meta/abstract'):
#Make a copy of the abstract
abstract_copy = deepcopy(abstract)
abstract_copy.tag = 'div'
#Abstracts are a rather diverse bunch, keep an eye on them!
title_text = abstract_copy.xpath('./title[1]/text()')
for title in abstract_copy.findall('.//title'):
remove(title)
#Create a header for the abstract
abstract_header = etree.Element('h2')
remove_all_attributes(abstract_copy)
#Set the header text and abstract id according to abstract type
abstract_type = abstract.attrib.get('abstract-type')
log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type))
if abstract_type == 'summary':
abstract_header.text = 'Author Summary'
abstract_copy.attrib['id'] = 'author-summary'
elif abstract_type == 'editors-summary':
abstract_header.text = 'Editors\' Summary'
abstract_copy.attrib['id'] = 'editor-summary'
elif abstract_type == 'synopsis':
abstract_header.text = 'Synopsis'
abstract_copy.attrib['id'] = 'synopsis'
elif abstract_type == 'alternate':
#Right now, these will only be included if there is a title to
#give it
if title_text:
abstract_header.text= title_text[0]
abstract_copy.attrib['id'] = 'alternate'
else:
continue
elif abstract_type is None:
abstract_header.text = 'Abstract'
abstract_copy.attrib['id'] = 'abstract'
elif abstract_type == 'toc': # We don't include these
continue
else: # Warn about these, then skip
log.warning('No handling for abstract-type="{0}"'.format(abstract_type))
continue
#abstract_header.text = abstract_type
#abstract_copy.attrib['id'] = abstract_type
heading_div.append(abstract_header)
heading_div.append(abstract_copy) |
<SYSTEM_TASK:>
Creates the element for declaring Funding in the article info.
<END_TASK>
<USER_TASK:>
Description:
def make_article_info_funding(self, article_info_div):
"""
Creates the element for declaring Funding in the article info.
""" |
funding_group = self.article.root.xpath('./front/article-meta/funding-group')
if funding_group:
funding_div = etree.SubElement(article_info_div,
'div',
{'id': 'funding'})
funding_b = etree.SubElement(funding_div, 'b')
funding_b.text = 'Funding: '
#As far as I can tell, PLoS only uses one funding-statement
funding_statement = funding_group[0].find('funding-statement')
append_all_below(funding_div, funding_statement) |
<SYSTEM_TASK:>
Creates the element for declaring competing interests in the article
<END_TASK>
<USER_TASK:>
Description:
def make_article_info_competing_interests(self, article_info_div):
"""
Creates the element for declaring competing interests in the article
info.
""" |
#Check for author-notes
con_expr = "./front/article-meta/author-notes/fn[@fn-type='conflict']"
conflict = self.article.root.xpath(con_expr)
if not conflict:
return
conflict_div = etree.SubElement(article_info_div,
'div',
{'id': 'conflict'})
b = etree.SubElement(conflict_div, 'b')
b.text = 'Competing Interests: '
fn_p = conflict[0].find('p')
if fn_p is not None:
append_all_below(conflict_div, fn_p) |
<SYSTEM_TASK:>
Articles generally provide a first contact, typically an email address
<END_TASK>
<USER_TASK:>
Description:
def make_article_info_correspondences(self, article_info_div):
"""
Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content.
""" |
corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp')
if corresps:
corresp_div = etree.SubElement(article_info_div,
'div',
{'id': 'correspondence'})
for corresp in corresps:
sub_div = etree.SubElement(corresp_div,
'div',
{'id': corresp.attrib['id']})
append_all_below(sub_div, corresp) |
<SYSTEM_TASK:>
Glossaries are a fairly common item in papers for PLoS, but it also
<END_TASK>
<USER_TASK:>
Description:
def make_back_glossary(self, body):
"""
Glossaries are a fairly common item in papers for PLoS, but it also
seems that they are rarely incorporated into the PLoS web-site or PDF
formats. They are included in the ePub output however because they are
helpful and because we can.
""" |
for glossary in self.article.root.xpath('./back/glossary'):
gloss_copy = deepcopy(glossary)
gloss_copy.tag = 'div'
gloss_copy.attrib['class'] = 'back-glossary'
body.append(gloss_copy) |
<SYSTEM_TASK:>
Extract or extended quoted passage from another work, usually made
<END_TASK>
<USER_TASK:>
Description:
def convert_disp_quote_elements(self):
"""
Extract or extended quoted passage from another work, usually made
typographically distinct from surrounding text
<disp-quote> elements have a relatively complex content model, but PLoS
appears to employ either <p>s or <list>s.
""" |
for disp_quote in self.main.getroot().findall('.//disp-quote'):
if disp_quote.getparent().tag == 'p':
elevate_element(disp_quote)
disp_quote.tag = 'div'
disp_quote.attrib['class'] = 'disp-quote' |
<SYSTEM_TASK:>
This function will render a formatted URL for accessing the PLoS' server
<END_TASK>
<USER_TASK:>
Description:
def fetch_single_representation(self, item_xlink_href):
"""
This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object.
""" |
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URl
subjournal_name = self.article.doi.split('.')[2]
base_url = journal_urls[subjournal_name]
#Compose the address for fetchSingleRepresentation
resource = 'fetchSingleRepresentation.action?uri=' + item_xlink_href
return base_url.format(resource) |
<SYSTEM_TASK:>
A song, poem, or verse
<END_TASK>
<USER_TASK:>
Description:
def convert_verse_group_elements(self):
"""
A song, poem, or verse
Implementor’s Note: No attempt has been made to retain the look or
visual form of the original poetry.
This unusual element, <verse-group> is used to convey poetry and is
recursive in nature (it may contain further <verse-group> elements).
Examples of these tags are sparse, so it remains difficult to ensure
full implementation. This method will attempt to handle the label,
title, and subtitle elements correctly, while converting <verse-lines>
to italicized lines.
""" |
for verse_group in self.main.getroot().findall('.//verse-group'):
#Find some possible sub elements for the heading
label = verse_group.find('label')
title = verse_group.find('title')
subtitle = verse_group.find('subtitle')
#Modify the verse-group element
verse_group.tag = 'div'
verse_group.attrib['id'] = 'verse-group'
#Create a title for the verse_group
if label is not None or title is not None or subtitle is not None:
new_verse_title = etree.Element('b')
#Insert it at the beginning
verse_group.insert(0, new_verse_title)
#Induct the title elements into the new title
if label is not None:
append_all_below(new_verse_title, label)
remove(label)
if title is not None:
append_all_below(new_verse_title, title)
remove(title)
if subtitle is not None:
append_all_below(new_verse_title, subtitle)
remove(subtitle)
for verse_line in verse_group.findall('verse-line'):
verse_line.tag = 'p'
verse_line.attrib['class'] = 'verse-line' |
<SYSTEM_TASK:>
A sequence of two or more items, which may or may not be ordered.
<END_TASK>
<USER_TASK:>
Description:
def convert_list_elements(self):
"""
A sequence of two or more items, which may or may not be ordered.
The <list> element has an optional <label> element and optional <title>
element, followed by one or more <list-item> elements. This is element
is recursive as the <list-item> elements may contain further <list> or
<def-list> elements. Much of the potential complexity in dealing with
lists comes from this recursion.
""" |
#I have yet to gather many examples of this element, and may have to
#write a recursive method for the processing of lists depending on how
#PLoS produces their XML, for now this method is ignorant of nesting
#TODO: prefix-words, one possible solution would be to have this method
#edit the CSS to provide formatting support for arbitrary prefixes...
#This is a block level element, so elevate it if found in p
for list_el in self.main.getroot().findall('.//list'):
if list_el.getparent().tag == 'p':
elevate_element(list_el)
#list_el is used instead of list (list is reserved)
for list_el in self.main.getroot().findall('.//list'):
if 'list-type' not in list_el.attrib:
list_el_type = 'order'
else:
list_el_type = list_el.attrib['list-type']
#Unordered lists
if list_el_type in ['', 'bullet', 'simple']:
list_el.tag = 'ul'
#CSS must be used to recognize the class and suppress bullets
if list_el_type == 'simple':
list_el.attrib['class'] = 'simple'
#Ordered lists
else:
list_el.tag = 'ol'
list_el.attrib['class'] = list_el_type
#Convert the list-item element tags to 'li'
for list_item in list_el.findall('list-item'):
list_item.tag = 'li'
remove_all_attributes(list_el, exclude=['id', 'class']) |
<SYSTEM_TASK:>
Some paths must be made absolute, this will attempt to convert them.
<END_TASK>
<USER_TASK:>
Description:
def absolute_path(user_path):
"""
Some paths must be made absolute, this will attempt to convert them.
""" |
if os.path.abspath(user_path):
return unix_path_coercion(user_path)
else:
try:
openaccess_epub.utils.evaluate_relative_path(relative=user_path)
except:
raise ValidationError('This path could not be rendered as absolute') |
<SYSTEM_TASK:>
Returns the function, if any, that encloses a given location.
<END_TASK>
<USER_TASK:>
Description:
def encloses(self,
location: FileLocation
) -> Optional[FunctionDesc]:
"""
Returns the function, if any, that encloses a given location.
""" |
for func in self.in_file(location.filename):
if location in func.location:
return func
return None |
<SYSTEM_TASK:>
Returns an iterator over all of the functions definitions that are
<END_TASK>
<USER_TASK:>
Description:
def in_file(self, filename: str) -> Iterator[FunctionDesc]:
"""
Returns an iterator over all of the functions definitions that are
contained within a given file.
""" |
yield from self.__filename_to_functions.get(filename, []) |
<SYSTEM_TASK:>
Try to load config, to load other journal locations
<END_TASK>
<USER_TASK:>
Description:
def parse_config(args):
"""
Try to load config, to load other journal locations
Otherwise, return default location
Returns journal location
""" |
# Try user config or return default location early
config_path = path.expanduser(args.config_file)
if not path.exists(config_path):
# Complain if they provided non-existant config
if args.config_file != DEFAULT_JOURNAL_RC:
print("journal: error: config file '" + args.config_file + "' not found")
sys.exit()
else:
# If no config file, use default journal location
return DEFAULT_JOURNAL
# If we get here, assume valid config file
config = ConfigParser.SafeConfigParser({
'journal':{'default':'__journal'},
'__journal':{'location':DEFAULT_JOURNAL}
})
config.read(config_path)
journal_location = config.get(config.get('journal', 'default'), 'location');
if args.journal:
journal_location = config.get(args.journal, 'location');
return journal_location |
<SYSTEM_TASK:>
args
<END_TASK>
<USER_TASK:>
Description:
def record_entries(journal_location, entries):
"""
args
entry - list of entries to record
""" |
check_journal_dest(journal_location)
current_date = datetime.datetime.today()
date_header = current_date.strftime("%a %H:%M:%S %Y-%m-%d") + "\n"
with open(build_journal_path(journal_location, current_date), "a") as date_file:
entry_output = date_header
# old style
# for entry in entries:
# entry_output += "-" + entry + "\n"
# new style
entry_output += '-' + ' '.join(entries) + "\n"
entry_output += "\n"
date_file.write(entry_output) |
<SYSTEM_TASK:>
args
<END_TASK>
<USER_TASK:>
Description:
def get_entry(journal_location, date):
"""
args
date - date object
returns entry text or None if entry doesn't exist
""" |
if not isinstance(date, datetime.date):
return None
try:
with open(build_journal_path(journal_location, date), "r") as entry_file:
return entry_file.read()
except IOError:
return None |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def replace(
fname1,
fname2,
dfilter1,
dfilter2,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Replace data in one file with data from another file.
:param fname1: Name of the input comma-separated values file, the file
that contains the columns to be replaced
:type fname1: FileNameExists_
:param fname2: Name of the replacement comma-separated values file, the
file that contains the replacement data
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the input file
:type dfilter1: :ref:`CsvDataFilter`
:param dfilter2: Row and/or column filter for the replacement file
:type dfilter2: :ref:`CsvDataFilter`
:param has_header1: Flag that indicates whether the input comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the replacement
comma-separated values file has column headers in its
first line (True) or not (False)
:type has_header2: boolean
:param frow1: Input comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Replacement comma-separated values file first data row
(starting from 1). If 0 the row where data starts is
auto-detected as the first row that has a number (integer of
float) in at least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the input file data but with some columns
replaced with data from the replacement file. If None the
input file is replaced "in place"
:type ofname: FileName_
:param ocols: Names of the replaced columns in the output comma-separated
values file. If None the column names in the input file are
used if **has_header1** is True, otherwise no header is used
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.replace.replace
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* RuntimeError (Number of input and output columns are different)
* RuntimeError (Number of input and replacement columns are
different)
* ValueError (Column *[column_identifier]* not found)
* ValueError (Number of rows mismatch between input and replacement
data)
.. [[[end]]]
""" |
# pylint: disable=R0913,R0914
irmm_ex = pexdoc.exh.addex(
RuntimeError, "Number of input and replacement columns are different"
)
iomm_ex = pexdoc.exh.addex(
RuntimeError, "Number of input and output columns are different"
)
# Read and validate input data
iobj = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate replacement data
robj = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
icfilter = iobj.header() if iobj.cfilter is None else iobj.cfilter
rcfilter = robj.header() if robj.cfilter is None else robj.cfilter
ocols = icfilter if ocols is None else ocols
# Miscellaneous data validation
irmm_ex(len(icfilter) != len(rcfilter))
iomm_ex(len(icfilter) != len(ocols))
# Replace data
iobj.replace(rdata=robj.data(filtered=True), filtered=True)
iheader_upper = [
item.upper() if isinstance(item, str) else item for item in iobj.header()
]
icfilter_index = [
iheader_upper.index(item.upper() if isinstance(item, str) else item)
for item in icfilter
]
# Create new header
orow = []
if has_header1:
for col_num, idata in enumerate(iobj.header()):
orow.append(
ocols[icfilter_index.index(col_num)]
if col_num in icfilter_index
else idata
)
# Write (new) file
iobj.write(fname=ofname, header=orow if orow else False, append=False) |
<SYSTEM_TASK:>
Normalized SPM HRF function from sum of two gamma PDFs
<END_TASK>
<USER_TASK:>
Description:
def spmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
"""Normalized SPM HRF function from sum of two gamma PDFs
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF function as used in SPM. It
has the following defaults:
- delay of response (relative to onset) : 6s
- delay of undershoot (relative to onset) : 16s
- dispersion of response : 1s
- dispersion of undershoot : 1s
- ratio of response to undershoot : 6s
- onset : 0s
- length of kernel : 32s
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
""" |
return spm_hrf_compat(t, peak_delay=peak_delay, under_delay=under_delay,
peak_disp=peak_disp, under_disp=under_disp,
p_u_ratio=p_u_ratio, normalize=True) |
<SYSTEM_TASK:>
SPM canonical HRF dispersion derivative, values for time values `t`
<END_TASK>
<USER_TASK:>
Description:
def ddspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1,
p_u_ratio=6):
""" SPM canonical HRF dispersion derivative, values for time values `t`
Parameters
----------
t : array-like
vector of times at which to sample HRF
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
[1] This is the canonical HRF dispersion derivative function as used in SPM
[2] It is the numerical difference between the HRF sampled at time `t`, and
values at `t` for another HRF shape with a small change in the peak
dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).
References:
-----
[1] http://nipy.org/
[2] https://github.com/fabianp/hrf_estimation
""" |
_spm_dd_func = partial(spmt, peak_delay=peak_delay,
under_delay=under_delay,
under_disp=under_disp, p_u_ratio=p_u_ratio,
peak_disp=1.01)
return (spmt(t) - _spm_dd_func(t)) / 0.01 |
<SYSTEM_TASK:>
Creation of condition time courses in temporally upsampled space.
<END_TASK>
<USER_TASK:>
Description:
def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol,
aryExclCnd=None, varTmpOvsmpl=1000.):
"""
Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
""" |
if aryExclCnd is not None:
for cond in aryExclCnd:
aryOns = aryOns[aryCnd != cond]
aryDrt = aryDrt[aryCnd != cond]
aryCnd = aryCnd[aryCnd != cond]
resolution = varTr / float(varTmpOvsmpl)
aryCnd = np.asarray(aryCnd)
aryOns = np.asarray(aryOns, dtype=np.float)
unique_conditions = np.sort(np.unique(aryCnd))
boxcar = []
for c in unique_conditions:
tmp = np.zeros(int(varNumVol * varTr/resolution))
onset_c = aryOns[aryCnd == c]
duration_c = aryDrt[aryCnd == c]
onset_idx = np.round(onset_c / resolution).astype(np.int)
duration_idx = np.round(duration_c / resolution).astype(np.int)
aux = np.arange(int(varNumVol * varTr/resolution))
for start, dur in zip(onset_idx, duration_idx):
lgc = np.logical_and(aux >= start, aux < start + dur)
tmp = tmp + lgc
assert np.all(np.less(tmp, 2))
boxcar.append(tmp)
aryBxCrOut = np.array(boxcar).T
if aryBxCrOut.shape[1] == 1:
aryBxCrOut = np.squeeze(aryBxCrOut)
return aryBxCrOut.astype('float16') |
<SYSTEM_TASK:>
Creates a dictionary with the summarized information in job_data, input_files and input_directories
<END_TASK>
<USER_TASK:>
Description:
def create_inputs_to_reference(job_data, input_files, input_directories):
"""
Creates a dictionary with the summarized information in job_data, input_files and input_directories
:param job_data: The job data specifying input parameters other than files and directories.
:param input_files: A dictionary describing the input files.
:param input_directories: A dictionary describing the input directories.
:return: A summarized dictionary containing information about all given inputs.
""" |
return {**deepcopy(job_data), **deepcopy(input_files), **deepcopy(input_directories)} |
<SYSTEM_TASK:>
Splits a given string at a given separator or list of separators.
<END_TASK>
<USER_TASK:>
Description:
def split_all(reference, sep):
"""
Splits a given string at a given separator or list of separators.
:param reference: The reference to split.
:param sep: Separator string or list of separator strings.
:return: A list of split strings
""" |
parts = partition_all(reference, sep)
return [p for p in parts if p not in sep] |
<SYSTEM_TASK:>
Returns the attributes in demand of the input file.
<END_TASK>
<USER_TASK:>
Description:
def _resolve_file(attributes, input_file, input_identifier, input_reference):
"""
Returns the attributes in demand of the input file.
:param attributes: A list of attributes to get from the input_file.
:param input_file: The file from which to get the attributes.
:param input_identifier: The input identifier of the given file.
:param input_reference: The reference string
:return: The attribute in demand
""" |
if input_file['isArray']:
raise InvalidInputReference('Input References to Arrays of input files are currently not supported.\n'
'"{}" is an array of files and can not be resolved for input references:'
'\n{}'.format(input_identifier, input_reference))
single_file = input_file['files'][0]
try:
return _get_dict_element(single_file, attributes)
except KeyError:
raise InvalidInputReference('Could not get attributes "{}" from input file "{}", needed in input reference:'
'\n{}'.format(attributes, input_identifier, input_reference)) |
<SYSTEM_TASK:>
Returns the attributes in demand of the input directory.
<END_TASK>
<USER_TASK:>
Description:
def _resolve_directory(attributes, input_directory, input_identifier, input_reference):
"""
Returns the attributes in demand of the input directory.
:param attributes: A list of attributes to get from the input directory.
:param input_directory: The directory from which to get the attributes.
:param input_identifier: The input identifier of the given directory.
:param input_reference: The reference string
:return: The attribute in demand
""" |
if input_directory['isArray']:
raise InvalidInputReference('Input References to Arrays of input directories are currently not supported.\n'
'input directory "{}" is an array of directories and can not be resolved for input'
'references:\n{}'.format(input_identifier, input_reference))
single_directory = input_directory['directories'][0]
try:
return _get_dict_element(single_directory, attributes)
except KeyError:
raise InvalidInputReference('Could not get attributes "{}" from input directory "{}", needed in input'
'reference:\n{}'.format(attributes, input_identifier, input_reference)) |
<SYSTEM_TASK:>
Replaces a given input_reference by a string extracted from inputs_to_reference.
<END_TASK>
<USER_TASK:>
Description:
def resolve_input_reference(reference, inputs_to_reference):
"""
Replaces a given input_reference by a string extracted from inputs_to_reference.
:param reference: The input reference to resolve.
:param inputs_to_reference: A dictionary containing information about the given inputs.
:raise InvalidInputReference: If the given input reference could not be resolved.
:return: A string which is the resolved input reference.
""" |
if not reference.startswith('{}inputs.'.format(INPUT_REFERENCE_START)):
raise InvalidInputReference('An input reference must have the following form'
'"$(inputs.<input_name>[.<attribute>]".\n'
'The invalid reference is: "{}"'.format(reference))
# remove "$(inputs." and ")"
reference = reference[2:-1]
parts = split_all(reference, ATTRIBUTE_SEPARATOR_SYMBOLS)
if len(parts) < 2:
raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input'
'reference does not comply with it:\n{}'.format(reference))
elif parts[0] != "inputs":
raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input'
' reference does not comply with it:\n$({})'.format(reference))
else:
input_identifier = parts[1]
input_to_reference = inputs_to_reference.get(input_identifier)
if input_to_reference is None:
raise InvalidInputReference('Input identifier "{}" not found in inputs, but needed in input reference:\n{}'
.format(input_identifier, reference))
elif isinstance(input_to_reference, dict):
if 'files' in input_to_reference:
return _resolve_file(parts[2:], input_to_reference, input_identifier, reference)
elif 'directories' in input_to_reference:
return _resolve_directory(parts[2:], input_to_reference, input_identifier, reference)
else:
raise InvalidInputReference('Unknown input type for input identifier "{}"'.format(input_identifier))
else:
if len(parts) > 2:
raise InvalidInputReference('Attribute "{}" of input reference "{}" could not be resolved'
.format(parts[2], reference))
else:
return parts[1] |
<SYSTEM_TASK:>
Resolves input references given in the string to_resolve by using the inputs_to_reference.
<END_TASK>
<USER_TASK:>
Description:
def resolve_input_references(to_resolve, inputs_to_reference):
"""
Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values.
""" |
splitted = split_input_references(to_resolve)
result = []
for part in splitted:
if is_input_reference(part):
result.append(str(resolve_input_reference(part, inputs_to_reference)))
else:
result.append(part)
return ''.join(result) |
<SYSTEM_TASK:>
Try to compile a string into a Django template
<END_TASK>
<USER_TASK:>
Description:
def TemplateValidator(value):
"""Try to compile a string into a Django template""" |
try:
Template(value)
except Exception as e:
raise ValidationError(
_("Cannot compile template (%(exception)s)"),
params={"exception": e}
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def merge(
fname1,
fname2,
dfilter1=None,
dfilter2=None,
has_header1=True,
has_header2=True,
frow1=0,
frow2=0,
ofname=None,
ocols=None,
):
r"""
Merge two comma-separated values files.
Data columns from the second file are appended after data columns from the
first file. Empty values in columns are used if the files have different
number of rows
:param fname1: Name of the first comma-separated values file, the file
whose columns appear first in the output file
:type fname1: FileNameExists_
:param fname2: Name of the second comma-separated values file, the file
whose columns appear last in the output file
:type fname2: FileNameExists_
:param dfilter1: Row and/or column filter for the first file. If None no
data filtering is done on the file
:type dfilter1: :ref:`CsvDataFilter` or None
:param dfilter2: Row and/or column filter for the second file. If None no
data filtering is done on the file
:type dfilter2: :ref:`CsvDataFilter` or None
:param has_header1: Flag that indicates whether the first comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header1: boolean
:param has_header2: Flag that indicates whether the second comma-separated
values file has column headers in its first line (True)
or not (False)
:type has_header2: boolean
:param frow1: First comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow1: NonNegativeInteger_
:param frow2: Second comma-separated values file first data row (starting
from 1). If 0 the row where data starts is auto-detected as
the first row that has a number (integer of float) in at
least one of its columns
:type frow2: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the data from the first and second files.
If None the first file is replaced "in place"
:type ofname: FileName_ or None
:param ocols: Column names of the output comma-separated values file.
If None the column names in the first and second files are
used if **has_header1** and/or **has_header2** are True. The
column labels :code:`'Column [column_number]'` are used when
one of the two files does not have a header, where
:code:`[column_number]` is an integer representing the column
number (column 0 is the leftmost column). No header is used
if **has_header1** and **has_header2** are False
:type ocols: list or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.merge.merge
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`dfilter1\` is not valid)
* RuntimeError (Argument \`dfilter2\` is not valid)
* RuntimeError (Argument \`fname1\` is not valid)
* RuntimeError (Argument \`fname2\` is not valid)
* RuntimeError (Argument \`frow1\` is not valid)
* RuntimeError (Argument \`frow2\` is not valid)
* RuntimeError (Argument \`ocols\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (Combined columns in data files and output columns are
different)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
""" |
# pylint: disable=R0913,R0914
iomm_ex = pexdoc.exh.addex(
RuntimeError, "Combined columns in data files and output columns are different"
)
# Read and validate file 1
obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate file 2
obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
cfilter1 = obj1.header() if obj1.cfilter is None else obj1.cfilter
cfilter2 = obj2.header() if obj1.cfilter is None else obj2.cfilter
# Create new header
cols1 = len(cfilter1)
cols2 = len(cfilter2)
if (ocols is None) and has_header1 and has_header2:
ocols = [cfilter1 + cfilter2]
elif (ocols is None) and has_header1 and (not has_header2):
ocols = [
cfilter1
+ [
"Column {0}".format(item)
for item in range(cols1 + 1, cols1 + cols2 + 1)
]
]
elif (ocols is None) and (not has_header1) and has_header2:
ocols = [["Column {0}".format(item) for item in range(1, cols1 + 1)] + cfilter2]
elif ocols is None:
ocols = []
else:
iomm_ex(cols1 + cols2 != len(ocols))
ocols = [ocols]
# Even out rows
delta = obj1.rows(filtered=True) - obj2.rows(filtered=True)
data1 = obj1.data(filtered=True)
data2 = obj2.data(filtered=True)
if delta > 0:
row = [cols2 * [None]]
data2 += delta * row
elif delta < 0:
row = [cols1 * [None]]
data1 += abs(delta) * row
data = ocols
for item1, item2 in zip(data1, data2):
data.append(item1 + item2)
write(fname=ofname, data=data, append=False) |
<SYSTEM_TASK:>
Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
<END_TASK>
<USER_TASK:>
Description:
def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL):
""" Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:return: List of functions retrieved from the `url` given
:rtype: list
""" |
try:
# Grab the page content
docs_page = urlopen(url)
# Read the contents of the actual url we grabbed and decode them into UTF-8
contents = docs_page.read().decode("utf-8")
# Return the contents loaded as JSON
return loads(contents)
# If we get an exception, simply exit
except URLError as UE:
print(UE)
except Exception as E:
print(E)
return [] |
<SYSTEM_TASK:>
Generates the body for the given function
<END_TASK>
<USER_TASK:>
Description:
def generate_method_definition(func):
""" Generates the body for the given function
:param dict func: dict of a JSON-Formatted function as defined by the API docs
:return: A String containing the definition for the function as it should be written in code
:rtype: str
""" |
indent = 4
# initial definition
method_definition = (" " * indent) + "def " + func["name"]
# Here we just create a queue and put all the parameters
# into the queue in the order that they were given,
params_required = [
param for param in func["arguments"] if param["is_required"]
]
params_optional = [
param for param in func["arguments"]
if not param["is_required"]
]
# Open the parameter definitions
method_definition += "(self, "
for param in params_required:
# Put the parameter into the queue
method_definition += param["name"]
method_definition += ", "
for param in params_optional:
method_definition += param["name"]
# Default methods not required
method_definition += "=None, "
# Peel off the final ", " and close off the parameter definition
method_definition = method_definition.rstrip(", ") + "):\n"
indent += 4
# re-indent
method_definition += " " * indent
# Begin with description.
method_definition += '"""' + func["description"]
# re-indent
method_definition += "\n\n" + " " * indent
# Go through each parameter and insert description & type hint
for param in params_required + params_optional:
# Add the type
method_definition += ":param " + DTYPE_MAPPING[param["type"].lower()]
# Add the name
method_definition += " " + param["name"] + ": "
# Add the description
method_definition += param["description"]
# Add optionality & reindent
method_definition += "\n" if param[
"is_required"] else " (Optional)\n"
method_definition += " " * indent
open_index = func["returns"].find('(')
close_index = func["returns"].find(
')', (open_index if open_index > -1 else 0))
func["returns"] = func["returns"].replace("\t", " " * 4)
return_string = func["returns"].replace("\n", "")
if open_index < close_index and func["returns"][
open_index + 1:close_index] in DTYPE_MAPPING:
method_definition += ":rtype: " + DTYPE_MAPPING[
func["returns"][open_index + 1:close_index]]
func["returns"] = func["returns"].replace(
func["returns"][open_index:close_index + 1], "")
method_definition += "\n" + " " * indent
method_definition += ":return: " + return_string
for i in range(0, len(return_string) + 1, 80 - (indent + 2)):
method_definition += return_string[i:i + (
80 - (indent + 2))] + "\n" + " " * indent
# Close it off & reindent
method_definition += '"""' + "\n" + " " * indent
# Create the params map
params_map = "__params_map = {"
# Save the indent
params_indent, num_params = len(
params_map), len(params_required) + len(params_optional)
# Append the map to the method_definition
method_definition += params_map
# Go through the required parameters first
for i, param in enumerate(params_required + params_optional):
# append the methods to the map
method_definition += "'" + param["name"] + "': " + param["name"]
if not param["is_required"]:
method_definition + " if " + param[
"name"] + "is not None else None"
# add commas or ending bracket if needed & reindent correctly
method_definition += ",\n" + " " * indent + ' ' * params_indent if i + 1 < num_params else ""
method_definition += '}\n\n' + ' ' * indent
method_definition += "return self.make_request(SERVER_ADDRESS, '" + func["name"] + "', " \
+ params_map.rstrip(" = {") + ", timeout=self.timeout)\n\n"
return method_definition |
<SYSTEM_TASK:>
Generates the actual functions for lbryd_api.py based on lbry's documentation
<END_TASK>
<USER_TASK:>
Description:
def generate_lbryd_wrapper(url=LBRY_API_RAW_JSON_URL, read_file=__LBRYD_BASE_FPATH__, write_file=LBRYD_FPATH):
""" Generates the actual functions for lbryd_api.py based on lbry's documentation
:param str url: URL to the documentation we need to obtain,
pybry.constants.LBRY_API_RAW_JSON_URL by default
:param str read_file: This is the path to the file from which we will be reading
:param str write_file: Path from project root to the file we'll be writing to.
""" |
functions = get_lbry_api_function_docs(url)
# Open the actual file for appending
with open(write_file, 'w') as lbry_file:
lbry_file.write("# This file was generated at build time using the generator function\n")
lbry_file.write("# You may edit but do so with caution\n")
with open(read_file, 'r') as template:
header = template.read()
lbry_file.write(header)
# Iterate through all the functions we retrieved
for func in functions:
method_definition = generate_method_definition(func)
# Write to file
lbry_file.write(method_definition)
try:
from yapf.yapflib.yapf_api import FormatFile
# Now we should format the file using the yapf formatter
FormatFile(write_file, in_place=True)
except ImportError as IE:
print("[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard")
print(IE) |
<SYSTEM_TASK:>
Load nii file.
<END_TASK>
<USER_TASK:>
Description:
def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
""" |
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff |
<SYSTEM_TASK:>
Load result parameters from multiple nii files, with optional mask.
<END_TASK>
<USER_TASK:>
Description:
def load_res_prm(lstFunc, lstFlsMsk=None):
"""Load result parameters from multiple nii files, with optional mask.
Parameters
----------
lstFunc : list,
list of str with file names of 3D or 4D nii files
lstFlsMsk : list, optional
list of str with paths to 3D nii files that can act as mask/s
Returns
-------
lstPrmAry : list
The list will contain as many numpy arrays as masks were provided.
Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc]
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
""" |
# load parameter/functional maps into a list
lstPrm = []
for ind, path in enumerate(lstFunc):
aryFnc = load_nii(path)[0].astype(np.float32)
if aryFnc.ndim == 3:
lstPrm.append(aryFnc)
# handle cases where nii array is 4D, in this case split arrays up in
# 3D arrays and appenbd those
elif aryFnc.ndim == 4:
for indAx in range(aryFnc.shape[-1]):
lstPrm.append(aryFnc[..., indAx])
# load mask/s if available
if lstFlsMsk is not None:
lstMsk = [None] * len(lstFlsMsk)
for ind, path in enumerate(lstFlsMsk):
aryMsk = load_nii(path)[0].astype(np.bool)
lstMsk[ind] = aryMsk
else:
print('------------No masks were provided')
if lstFlsMsk is None:
# if no mask was provided we just flatten all parameter array in list
# and return resulting list
lstPrmAry = [ary.flatten() for ary in lstPrm]
else:
# if masks are available, we loop over masks and then over parameter
# maps to extract selected voxels and parameters
lstPrmAry = [None] * len(lstFlsMsk)
for indLst, aryMsk in enumerate(lstMsk):
# prepare array that will hold parameter values of selected voxels
aryPrmSel = np.empty((np.sum(aryMsk), len(lstPrm)),
dtype=np.float32)
# loop over different parameter maps
for indAry, aryPrm in enumerate(lstPrm):
# get voxels specific to this mask
aryPrmSel[:, indAry] = aryPrm[aryMsk, ...]
# put array away in list, if only one parameter map was provided
# the output will be squeezed
lstPrmAry[indLst] = aryPrmSel
# also get header object and affine array
# we simply take it for the first functional nii file, cause that is the
# only file that has to be provided by necessity
objHdr, aryAff = load_nii(lstFunc[0])[1:]
return lstPrmAry, objHdr, aryAff |
<SYSTEM_TASK:>
Remap coordinates from cartesian to polar
<END_TASK>
<USER_TASK:>
Description:
def map_crt_to_pol(aryXCrds, aryYrds):
"""Remap coordinates from cartesian to polar
Parameters
----------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
Returns
-------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
""" |
aryRad = np.sqrt(aryXCrds**2+aryYrds**2)
aryTht = np.arctan2(aryYrds, aryXCrds)
return aryTht, aryRad |
<SYSTEM_TASK:>
Remap coordinates from polar to cartesian
<END_TASK>
<USER_TASK:>
Description:
def map_pol_to_crt(aryTht, aryRad):
"""Remap coordinates from polar to cartesian
Parameters
----------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
Returns
-------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
""" |
aryXCrds = aryRad * np.cos(aryTht)
aryYrds = aryRad * np.sin(aryTht)
return aryXCrds, aryYrds |
<SYSTEM_TASK:>
Return index of nearest expected polar angle.
<END_TASK>
<USER_TASK:>
Description:
def find_near_pol_ang(aryEmpPlrAng, aryExpPlrAng):
"""Return index of nearest expected polar angle.
Parameters
----------
aryEmpPlrAng : 1D numpy array
Empirically found polar angle estimates
aryExpPlrAng : 1D numpy array
Theoretically expected polar angle estimates
Returns
-------
aryXCrds : 1D numpy array
Indices of nearest theoretically expected polar angle.
aryYrds : 1D numpy array
Distances to nearest theoretically expected polar angle.
""" |
dist = np.abs(np.subtract(aryEmpPlrAng[:, None],
aryExpPlrAng[None, :]))
return np.argmin(dist, axis=-1), np.min(dist, axis=-1) |
<SYSTEM_TASK:>
Remap values in an array from one range to another.
<END_TASK>
<USER_TASK:>
Description:
def rmp_rng(aryVls, varNewMin, varNewMax, varOldThrMin=None,
varOldAbsMax=None):
"""Remap values in an array from one range to another.
Parameters
----------
aryVls : 1D numpy array
Array with values that need to be remapped.
varNewMin : float
Desired minimum value of new, remapped array.
varNewMax : float
Desired maximum value of new, remapped array.
varOldThrMin : float
Theoretical minimum of old distribution. Can be specified if this
theoretical minimum does not occur in empirical distribution but
should be considered nontheless.
varOldThrMin : float
Theoretical maximum of old distribution. Can be specified if this
theoretical maximum does not occur in empirical distribution but
should be considered nontheless.
Returns
-------
aryVls : 1D numpy array
Array with remapped values.
""" |
if varOldThrMin is None:
varOldMin = aryVls.min()
else:
varOldMin = varOldThrMin
if varOldAbsMax is None:
varOldMax = aryVls.max()
else:
varOldMax = varOldAbsMax
aryNewVls = np.empty((aryVls.shape), dtype=aryVls.dtype)
for ind, val in enumerate(aryVls):
aryNewVls[ind] = (((val - varOldMin) * (varNewMax - varNewMin)) /
(varOldMax - varOldMin)) + varNewMin
return aryNewVls |
<SYSTEM_TASK:>
Remap x, y, sigma parameters from degrees to pixel.
<END_TASK>
<USER_TASK:>
Description:
def rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize,
varExtXmin, varExtXmax, varExtYmin, varExtYmax):
"""Remap x, y, sigma parameters from degrees to pixel.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : float
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in pixel
vecY : 1D numpy array
Array with possible y parametrs in pixel
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixel
""" |
# Remap modelled x-positions of the pRFs:
vecXpxl = rmp_rng(vecX, 0.0, (tplPngSize[0] - 1), varOldThrMin=varExtXmin,
varOldAbsMax=varExtXmax)
# Remap modelled y-positions of the pRFs:
vecYpxl = rmp_rng(vecY, 0.0, (tplPngSize[1] - 1), varOldThrMin=varExtYmin,
varOldAbsMax=varExtYmax)
# We calculate the scaling factor from degrees of visual angle to
# pixels separately for the x- and the y-directions (the two should
# be the same).
varDgr2PixX = np.divide(tplPngSize[0], (varExtXmax - varExtXmin))
varDgr2PixY = np.divide(tplPngSize[1], (varExtYmax - varExtYmin))
# Check whether varDgr2PixX and varDgr2PixY are similar:
strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \
'stimulus space (in degrees of visual angle) and the ' + \
'ratio of X and Y dimensions in the upsampled visual space' + \
'do not agree'
assert 0.5 > np.absolute((varDgr2PixX - varDgr2PixY)), strErrMsg
# Convert prf sizes from degrees of visual angles to pixel
vecPrfSdpxl = np.multiply(vecPrfSd, varDgr2PixX)
# Return new values in column stack.
# Since values are now in pixel, they should be integer
return np.column_stack((vecXpxl, vecYpxl, vecPrfSdpxl)).astype(np.int32) |
<SYSTEM_TASK:>
Spatially convolve input with 2D Gaussian model.
<END_TASK>
<USER_TASK:>
Description:
def cnvl_2D_gauss(idxPrc, aryMdlParamsChnk, arySptExpInf, tplPngSize, queOut,
strCrd='crt'):
"""Spatially convolve input with 2D Gaussian model.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params]
Array with the model parameter combinations for this chunk.
arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions]
All spatial conditions stacked along second axis.
tplPngSize : tuple, 2.
Pixel dimensions of the visual space (width, height).
queOut : multiprocessing.queues.Queue
Queue to put the results on. If this is None, the user is not running
multiprocessing but is just calling the function
strCrd, string, either 'crt' or 'pol'
Whether model parameters are provided in cartesian or polar coordinates
Returns
-------
data : 2d numpy array, shape [n_models, n_conditions]
Closed data.
Reference
---------
[1]
""" |
# Number of combinations of model parameters in the current chunk:
varChnkSze = aryMdlParamsChnk.shape[0]
# Number of conditions / time points of the input data
varNumLstAx = arySptExpInf.shape[-1]
# Output array with results of convolution:
aryOut = np.zeros((varChnkSze, varNumLstAx))
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
if strCrd == 'pol':
# Position was given in polar coordinates
varTmpEcc = aryMdlParamsChnk[idxMdl, 0]
varTmpPlrAng = aryMdlParamsChnk[idxMdl, 1]
# Convert from polar to to cartesian coordinates
varTmpX = varTmpEcc * np.cos(varTmpPlrAng) + tplPngSize[0]/2.
varTmpY = varTmpEcc * np.sin(varTmpPlrAng) + tplPngSize[1]/2.
elif strCrd == 'crt':
varTmpX = aryMdlParamsChnk[idxMdl, 0]
varTmpY = aryMdlParamsChnk[idxMdl, 1]
# Standard deviation does not depend on coordinate system
varTmpSd = aryMdlParamsChnk[idxMdl, 2]
# Create pRF model (2D):
aryGauss = crt_2D_gauss(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryCndTcTmp = np.multiply(arySptExpInf, aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'.
aryCndTcTmp = np.sum(aryCndTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, :] = aryCndTcTmp
if queOut is None:
# if user is not using multiprocessing, return the array directly
return aryOut
else:
# Put column with the indices of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses
# into the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut) |
<SYSTEM_TASK:>
Process a new revision and detect a revert if it occurred. Note that
<END_TASK>
<USER_TASK:>
Description:
def process(self, checksum, revision=None):
"""
Process a new revision and detect a revert if it occurred. Note that
you can pass whatever you like as `revision` and it will be returned in
the case that a revert occurs.
:Parameters:
checksum : str
Any identity-machable string-based hash of revision content
revision : `mixed`
Revision metadata. Note that any data will just be returned
in the case of a revert.
:Returns:
a :class:`~mwreverts.Revert` if one occured or `None`
""" |
revert = None
if checksum in self: # potential revert
reverteds = list(self.up_to(checksum))
if len(reverteds) > 0: # If no reverted revisions, this is a noop
revert = Revert(revision, reverteds, self[checksum])
self.insert(checksum, revision)
return revert |
<SYSTEM_TASK:>
Prepare pRF model time courses.
<END_TASK>
<USER_TASK:>
Description:
def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True):
"""
Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
""" |
if lgcPrint:
print('------Prepare pRF time course models')
# Define temporal smoothing of pRF time course models
def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following
dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float, positive
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If
`zero`, no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same dimension
as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
# adjust the input, if necessary, such that input is 2D, with last
# dim time
tplInpShp = aryPrfTc.shape
aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1]))
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1)
aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean),
axis=-1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp,
axis=-1, order=0, mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryPrfTc = aryPrfTc[..., 1:-1]
# Output array:
return aryPrfTc.reshape(tplInpShp).astype('float16')
# Perform temporal smoothing of pRF time course models
if 0.0 < varSdSmthTmp:
if lgcPrint:
print('---------Temporal smoothing on pRF time course models')
print('------------SD tmp smooth is: ' + str(varSdSmthTmp))
aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp)
# Z-score the prf time course models
if lgcPrint:
print('---------Zscore the pRF time course models')
# De-mean the prf time course models:
aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None])
# Standardize the prf time course models:
# In order to avoid devision by zero, only divide those voxels with a
# standard deviation greater than zero:
aryTmpStd = np.std(aryPrfTc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
return aryPrfTc |
<SYSTEM_TASK:>
what's the value of a style at the current stack level
<END_TASK>
<USER_TASK:>
Description:
def get(self,style):
""" what's the value of a style at the current stack level""" |
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None |
<SYSTEM_TASK:>
converts a value to the attribute's type
<END_TASK>
<USER_TASK:>
Description:
def enforce_type(self, attr, val):
"""converts a value to the attribute's type""" |
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val) |
<SYSTEM_TASK:>
overrides style values at the current stack level
<END_TASK>
<USER_TASK:>
Description:
def set(self, style={}):
"""overrides style values at the current stack level""" |
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr]) |
<SYSTEM_TASK:>
converts the current style to an escpos command string
<END_TASK>
<USER_TASK:>
Description:
def to_escpos(self):
""" converts the current style to an escpos command string """ |
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd |
<SYSTEM_TASK:>
starts an inline entity with an optional style definition
<END_TASK>
<USER_TASK:>
Description:
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """ |
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack) |
<SYSTEM_TASK:>
starts a block entity with an optional style definition
<END_TASK>
<USER_TASK:>
Description:
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """ |
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack) |
<SYSTEM_TASK:>
puts a string of text in the entity keeping the whitespace intact
<END_TASK>
<USER_TASK:>
Description:
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """ |
if text:
self.escpos.text(text)
self.dirty = True |
<SYSTEM_TASK:>
puts text in the entity. Whitespace and newlines are stripped to single spaces.
<END_TASK>
<USER_TASK:>
Description:
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """ |
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text) |
<SYSTEM_TASK:>
Check and fix the size of the image to 32 bits
<END_TASK>
<USER_TASK:>
Description:
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """ |
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1) |
<SYSTEM_TASK:>
Parse image and prepare it to a printable format
<END_TASK>
<USER_TASK:>
Description:
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """ |
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size) |
<SYSTEM_TASK:>
Print QR Code for the provided string
<END_TASK>
<USER_TASK:>
Description:
def qr(self,text):
""" Print QR Code for the provided string """ |
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im) |
<SYSTEM_TASK:>
Set text properties
<END_TASK>
<USER_TASK:>
Description:
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """ |
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL) |
Subsets and Splits