text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drilldown_tree(self, session=None, json=False, json_fields=None):
""" This method generate a branch from a tree, begining with current node. For example: node7.drilldown_tree() .. code:: level Nested sets example _______________|_________|_________ | | | | | | 2 2(2)5 6(4)11 | 12(7)21 | | ^ | ^ | 3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 | | | | | 4 | 14(9)15 18(11)19 | | | Example in tests: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree` """ |
if not session:
session = object_session(self)
return self.get_tree(
session,
json=json,
json_fields=json_fields,
query=self._drilldown_query
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def path_to_root(self, session=None, order=desc):
"""Generate path from a leaf or intermediate node to the root. For example: node11.path_to_root() .. code:: level Nested sets example 1 | 1(1)22 | ________|______|_____________________ | | | | | | | ------+--------- | | 2 2(2)5 6(4)11 | -- 12(7)21 | | ^ | / \ | 3 3(3)4 7(5)8 9(6)10 ---/---- \ | 13(8)16 | 17(10)20 | | | | | 4 14(9)15 | 18(11)19 | | | """ |
table = self.__class__
query = self._base_query_obj(session=session)
query = query.filter(table.is_ancestor_of(self, inclusive=True))
return self._base_order(query, order=order) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rebuild_tree(cls, session, tree_id):
""" This method rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`):
SQLAlchemy session tree_id (int or str):
id of tree Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild` """ |
session.query(cls).filter_by(tree_id=tree_id)\
.update({cls.left: 0, cls.right: 0, cls.level: 0})
top = session.query(cls).filter_by(parent_id=None)\
.filter_by(tree_id=tree_id).one()
top.left = left = 1
top.right = right = 2
top.level = level = cls.get_default_level()
def recursive(children, left, right, level):
level = level + 1
for i, node in enumerate(children):
same_level_right = children[i - 1].right
left = left + 1
if i > 0:
left = left + 1
if same_level_right:
left = same_level_right + 1
right = left + 1
node.left = left
node.right = right
parent = node.parent
j = 0
while parent:
parent.right = right + 1 + j
parent = parent.parent
j += 1
node.level = level
recursive(node.children, left, right, level)
recursive(top.children, left, right, level) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rebuild(cls, session, tree_id=None):
""" This function rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`):
SQLAlchemy session Kwargs: tree_id (int or str):
id of tree, default None Example: * :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild` """ |
trees = session.query(cls).filter_by(parent_id=None)
if tree_id:
trees = trees.filter_by(tree_id=tree_id)
for tree in trees:
cls.rebuild_tree(session, tree.tree_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dx(mt, x):
""" Returns the number of dying at begining of age x """ |
end_x_val = mt.lx.index(0)
if x < end_x_val:
return mt.lx[x] - mt.lx[x + 1]
else:
return 0.0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Sx(mt, x):
""" Return the Sx """ |
n = len(mt.Nx)
sum1 = 0
for j in range(x, n):
k = mt.Nx[j]
sum1 += k
return sum1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Cx(mt, x):
""" Return the Cx """ |
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Mx(mt, x):
""" Return the Mx """ |
n = len(mt.Cx)
sum1 = 0
for j in range(x, n):
k = mt.Cx[j]
sum1 += k
return sum1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def qAx(mt, x, q):
""" This function evaluates the APV of a geometrically increasing annual annuity-due """ |
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Ax(mtj, x) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _meanvalueattr(self,v):
""" find new position of vertex v according to adjacency in prevlayer. position is given by the mean value of adjacent positions. experiments show that meanvalue heuristic performs better than median. """ |
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N rounds, and finally perform the edge routing. """ |
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer. The Layer is created if it is the first vertex with this rank. """ |
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict of the associated edge and layer. Arguments: r (int):
rank value ctrl (dict):
the edge's control vertices Returns: DummyVertex : the created DummyVertex. """ |
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e. """ |
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state. """ |
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_png(varNumVol, strPathPng, tplVslSpcSze=(200, 200), varStrtIdx=0, varZfill=3):
""" Load PNGs with stimulus information for pRF model creation. Parameters varNumVol : int Number of PNG files. strPathPng : str Parent directory of PNG files. PNG files need to be organsied in numerical order (e.g. `file_001.png`, `file_002.png`, etc.). tplVslSpcSze : tuple Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it is useful to sample at a lower than the original resolution. varStrtIdx : int Start index of PNG files. For instance, `varStrtIdx = 0` if the name of the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is `file_001.png`. varZfill : int Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is `file_0007.png`. Returns ------- aryPngData : np.array 3D Numpy array with the following structure: aryPngData[x-pixel-index, y-pixel-index, PngNumber] Notes ----- Part of py_pRF_mapping library. """ |
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng +
str(idx01 + varStrtIdx).zfill(varZfill) +
'.png')
# The png data will be saved in a numpy array of the following order:
# aryPngData[x-pixel, y-pixel, PngNumber].
aryPngData = np.zeros((tplVslSpcSze[0],
tplVslSpcSze[1],
varNumVol))
# Open first image in order to check dimensions (greyscale or RGB, i.e. 2D
# or 3D).
objIm = Image.open(lstPngPaths[0])
aryTest = np.array(objIm.resize((objIm.size[0], objIm.size[1]),
Image.ANTIALIAS))
varNumDim = aryTest.ndim
del(aryTest)
# Loop trough PNG files:
for idx01 in range(0, varNumVol):
# Old version of reading images with scipy
# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0]
# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :]
# Load & resize image:
objIm = Image.open(lstPngPaths[idx01])
objIm = objIm.resize((tplVslSpcSze[0],
tplVslSpcSze[1]),
resample=Image.NEAREST)
# Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D
# or 3D):
if varNumDim == 2:
aryPngData[:, :, idx01] = np.array(objIm.resize(
(objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :]
elif varNumDim == 3:
aryPngData[:, :, idx01] = np.array(objIm.resize(
(objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :, 0]
else:
# Error message:
strErrMsg = ('ERROR: PNG files for model creation need to be RGB '
+ 'or greyscale.')
raise ValueError(strErrMsg)
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 200).astype(np.int8)
return aryPngData |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_ev_txt(strPthEv):
"""Load information from event text file. Parameters input1 : str Path to event text file Returns ------- aryEvTxt : 2d numpy array, shape [n_measurements, 3] Array with info about conditions: type, onset, duration Notes ----- Part of py_pRF_mapping library. """ |
aryEvTxt = np.loadtxt(strPthEv, dtype='float', comments='#', delimiter=' ',
skiprows=0, usecols=(0, 1, 2))
return aryEvTxt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adjust_status(info: dict) -> dict: """Apply status mapping to a raw API result.""" |
modified_info = deepcopy(info)
modified_info.update({
'level':
get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])),
'level2':
STATUS_MAP[99] if info['level2'] is None else
get_nearest_by_numeric_key(STATUS_MAP, int(info['level2']))
})
return modified_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def status_by_state(self, state: str) -> dict: """Return the CDC status for the specified state.""" |
data = await self.raw_cdc_data()
try:
info = next((v for k, v in data.items() if state in k))
except StopIteration:
return {}
return adjust_status(info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def brief_exception_text(exception, secret_values):
""" Returns the Exception class and the message of the exception as string. :param exception: The exception to format :param secret_values: Values to hide in output """ |
exception_text = _hide_secret_values(str(exception), secret_values)
return '[{}]\n{}'.format(type(exception).__name__, exception_text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_exception(exception, secret_values=None):
""" Prints the exception message and the name of the exception class to stderr. :param exception: The exception to print :param secret_values: Values to hide in output """ |
print(brief_exception_text(exception, secret_values), file=sys.stderr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert(self, **kwargs):
""" Saves the Document to the database if it is valid. Returns errors otherwise. """ |
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, **kwargs):
""" Updates the document with the given _id saved in the collection if it is valid. Returns errors otherwise. """ |
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, **kwargs):
""" Deletes the document if it is saved in the collection. """ |
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_one(cls, filter=None, *args, **kwargs):
""" Returns one document dict if one passes the filter. Returns None otherwise. """ |
return cls.collection.find_one(filter, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find(cls, *args, **kwargs):
""" Returns all document dicts that pass the filter """ |
return list(cls.collection.find(*args, **kwargs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def aggregate(cls, pipeline=None, **kwargs):
""" Returns the document dicts returned from the Aggregation Pipeline """ |
return list(cls.collection.aggregate(pipeline or [], **kwargs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert_many(cls, documents, ordered=True):
""" Inserts a list of documents into the Collection and returns their _ids """ |
return cls.collection.insert_many(documents, ordered).inserted_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_one(cls, filter, update, upsert=False):
""" Updates a document that passes the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ |
return cls.collection.update_one(filter, update, upsert).raw_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_many(cls, filter, update, upsert=False):
""" Updates all documents that pass the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ |
return cls.collection.update_many(filter, update, upsert).raw_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_one(cls, filter, replacement, upsert=False):
""" Replaces a document that passes the filter. Will upsert a new document if upsert=True and no document is filtered """ |
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(cls, filter=None, **kwargs):
""" Returns a Document if any document is filtered, returns None otherwise """ |
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def documents(cls, filter=None, **kwargs):
""" Returns a list of Documents if any document is filtered """ |
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_file(self, fn: str) -> Iterator[Statement]: """ Returns an iterator over all of the statements belonging to a file. """ |
yield from self.__file_to_statements.get(fn, []) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def at_line(self, line: FileLine) -> Iterator[Statement]: """ Returns an iterator over all of the statements located at a given line. """ |
num = line.num
for stmt in self.in_file(line.filename):
if stmt.location.start.line == num:
yield stmt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wrap(text, width=70, **kwargs):
"""Wrap multiple paragraphs of text, returning a list of wrapped lines. Reformat the multiple paragraphs 'text' so they fit in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ |
w = ParagraphWrapper(width=width, **kwargs)
return w.wrap(text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill(text, width=70, **kwargs):
"""Fill multiple paragraphs of text, returning a new string. Reformat multiple paragraphs in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped text. As with wrap(), tabs are expanded and other whitespace characters converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ |
w = ParagraphWrapper(width=width, **kwargs)
return w.fill(text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_outdir(outdir):
""" Creates the output directory if not existing. If outdir is None or if no output_files are provided nothing happens. :param outdir: The output directory to create. """ |
if outdir:
outdir = os.path.expanduser(outdir)
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except os.error as e:
raise JobExecutionError('Failed to create outdir "{}".\n{}'.format(outdir, str(e))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def model_node(**kwargs):
""" Decorates a ``schematics.Model`` class to add it as a field of type ``schematic.types.ModelType``. Keyword arguments are passed to ``schematic.types.ModelType``. Example: .. code-block:: python :emphasize-lines: 8,13 from schematics import Model, types from rafter.contrib.schematics.helpers import model_node class MyModel(Model):
name = types.StringType() @model_node() class options(Model):
status = types.IntType() # With arguments and another name @model_node(serialized_name='extra', required=True) class _extra(Model):
test = types.StringType() """ |
kwargs.setdefault('default', {})
def decorator(model):
return types.ModelType(model, **kwargs)
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_file_read_only(file_path):
""" Removes the write permissions for the given file for owner, groups and others. :param file_path: The file whose privileges are revoked. :raise FileNotFoundError: If the given file does not exist. """ |
old_permissions = os.stat(file_path).st_mode
os.chmod(file_path, old_permissions & ~WRITE_PERMISSIONS) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def status_by_zip(self, zip_code: str) -> dict: """Get symptom data for the provided ZIP code.""" |
try:
location = next((
d for d in await self.user_reports()
if d['zip'] == zip_code))
except StopIteration:
return {}
return await self.status_by_coordinates(
float(location['latitude']), float(location['longitude'])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_request(request):
""" Prints a prepared request to give the user info as to what they're sending :param request.PreparedRequest request: PreparedRequest object to be printed :return: Nothing """ |
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
request.method + ' ' + request.url,
'\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()),
request.body,
)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_validate_schemas(get_response, params):
""" This filter validates input data against the resource's ``request_schema`` and fill the request's ``validated`` dict. Data from ``request.params`` and ``request.body`` (when the request body is of a form type) will be converted using the schema in order to get proper lists or unique values. .. important:: The request validation is only effective when a ``request_schema`` has been provided by the resource definition. """ |
request_schema = params.get('request_schema')
if request_schema is None:
return get_response
def _convert_params(schema, data):
for sc in schema.fields.values():
name = sc.serialized_name or sc.name
val = data.getlist(name)
if val is None:
continue
if len(val) == 1 and not isinstance(sc, ListType):
val = val[0]
data[name] = val
async def decorated_filter(request, *args, **kwargs):
data = {
'headers': CIDict(request.headers),
'path': request.app.router.get(request)[2],
'params': RequestParameters(request.args),
'body': {}
}
if request.body:
# Get body if we have something there
if request.form:
data['body'] = RequestParameters(request.form)
else:
# will raise 400 if cannot parse json
data['body'] = deepcopy(request.json)
if hasattr(request_schema, 'body') and request.form:
_convert_params(request_schema.body, data['body'])
if hasattr(request_schema, 'params') and data['params']:
_convert_params(request_schema.params, data['params'])
# Now, validate the whole thing
try:
model = request_schema(data, strict=False, validate=False)
model.validate()
request.validated = model.to_native()
except BaseError as e:
raise ValidationErrors(e.to_primitive())
return await get_response(request, *args, **kwargs)
return decorated_filter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_EPUB(parsed_article, output_directory, input_path, image_directory, config_module=None, epub_version=None, batch=False):
""" Standard workflow for creating an EPUB document. make_EPUB is used to produce an EPUB file from a parsed article. In addition to the article it also requires a path to the appropriate image directory which it will insert into the EPUB file, as well the output directory location for the EPUB file. Parameters article : openaccess_epub.article.Article instance `article` is an Article instance for the XML document to be converted to EPUB. output_directory : str `output_directory` is a string path to the directory in which the EPUB will be produced. The name of the directory will be used as the EPUB's filename. input_path : str `input_path` is a string absolute path to the input XML file, used to locate input-relative images. image_directory : str `image_directory` is a string path indicating an explicit image directory. If supplied, other image input methods will not be used. config_module : config module, optional `config_module` is a pre-loaded config module for OpenAccess_EPUB; if not used then this function will load the global config file. Might be useful in certain cases to dynamically alter configuration. epub_version : {None, 2, 3} `epub_version` dictates which version of EPUB to be created. An error will be raised if the specified version is not supported for the publisher. If left to the default, the created version will defer to the publisher default version. batch : bool, optional `batch` indicates that batch creation is being used (such as with the `oaepub batch` command). In this case, directory conflicts will be automatically resolved (in favor of keeping previous data, skipping creation of EPUB). Returns False in the case of a fatal error, True if successful. """ |
#command_log.info('Creating {0}.epub'.format(output_directory))
if config_module is None:
config_module = openaccess_epub.utils.load_config_module()
if epub_version not in (None, 2, 3):
log.error('Invalid EPUB version: {0}'.format(epub_version))
raise ValueError('Invalid EPUB version. Should be 2 or 3')
if epub_version is None:
epub_version = parsed_article.publisher.epub_default
#Handle directory output conflicts
if os.path.isdir(output_directory):
if batch: # No user prompt, default to protect previous data
log.error('Directory conflict during batch conversion, skipping.')
return False
else: # User prompting
openaccess_epub.utils.dir_exists(output_directory)
else:
try:
os.makedirs(output_directory)
except OSError as err:
if err.errno != 17:
log.exception('Unable to recursively create output directories')
#Copy over the basic epub directory
make_epub_base(output_directory)
#Get the images, if possible, fail gracefully if not
success = openaccess_epub.utils.images.get_images(output_directory,
image_directory,
input_path,
config_module,
parsed_article)
if not success:
log.critical('Images for the article were not located! Aborting!')
return False
#Instantiate Navigation and Package
epub_nav = Navigation()
epub_package = Package()
#Process the article for navigation and package info
epub_nav.process(parsed_article)
epub_package.process(parsed_article)
#Render the content using publisher-specific methods
parsed_article.publisher.render_content(output_directory, epub_version)
if epub_version == 2:
epub_nav.render_EPUB2(output_directory)
epub_package.render_EPUB2(output_directory)
elif epub_version == 3:
epub_nav.render_EPUB3(output_directory)
epub_package.render_EPUB3(output_directory)
#Zip the directory into EPUB
epub_zip(output_directory)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_epub_base(location):
""" Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters location : str A path string to a local directory in which the EPUB is to be built """ |
log.info('Making EPUB base files in {0}'.format(location))
with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file
out.write('application/epub+zip')
#Create EPUB and META-INF directorys
os.mkdir(os.path.join(location, 'META-INF'))
os.mkdir(os.path.join(location, 'EPUB'))
os.mkdir(os.path.join(location, 'EPUB', 'css'))
with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out:
out.write('''\
<?xml version="1.0" encoding="UTF-8"?>
<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>''')
with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out:
out.write(bytes(DEFAULT_CSS, 'UTF-8')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def epub_zip(outdirect):
""" Zips up the input file directory into an EPUB file. """ |
def recursive_zip(zipf, directory, folder=None):
if folder is None:
folder = ''
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item),
os.path.join(directory, item))
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item),
os.path.join(folder, item))
log.info('Zipping up the directory {0}'.format(outdirect))
epub_filename = outdirect + '.epub'
epub = zipfile.ZipFile(epub_filename, 'w')
current_dir = os.getcwd()
os.chdir(outdirect)
epub.write('mimetype')
log.info('Recursively zipping META-INF and EPUB')
for item in os.listdir('.'):
if item == 'mimetype':
continue
recursive_zip(epub, item)
os.chdir(current_dir)
epub.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_int(fname, data, append=True):
"""Write data to CSV file with validation.""" |
# pylint: disable=W0705
data_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file")
fos_ex = pexdoc.exh.addex(
OSError, "File *[fname]* could not be created: *[reason]*"
)
data_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0)))
try:
pmisc.make_dir(fname)
mode = "w" if append is False else "a"
if sys.hexversion < 0x03000000: # pragma: no cover, no branch
with open(fname, mode) as file_handle:
csv.writer(file_handle, delimiter=",").writerows(data)
else: # pragma: no cover
with open(fname, mode, newline="") as file_handle:
csv.writer(file_handle, delimiter=",").writerows(data)
except (IOError, OSError) as eobj:
fos_ex(True, _MF("fname", fname, "reason", eobj.strerror)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _input_directory_description(input_identifier, arg_item, input_dir):
""" Produces a directory description. A directory description is a dictionary containing the following information. - 'path': An array containing the paths to the specified directories. - 'debugInfo': A field to possibly provide debug information. - 'found': A boolean that indicates, if the directory exists in the local filesystem. - 'listing': A listing that shows which files are in the given directory. This could be None. :param input_identifier: The input identifier in the cwl description file :param arg_item: The corresponding job information :param input_dir: TODO :return: A directory description :raise DirectoryError: If the given directory does not exist or is not a directory. """ |
description = {
'path': None,
'found': False,
'debugInfo': None,
'listing': None,
'basename': None
}
try:
path = location(input_identifier, arg_item)
if input_dir and not os.path.isabs(path):
path = os.path.join(os.path.expanduser(input_dir), path)
description['path'] = path
if not os.path.exists(path):
raise DirectoryError('path does not exist')
if not os.path.isdir(path):
raise DirectoryError('path is not a directory')
description['listing'] = arg_item.get('listing')
description['basename'] = os.path.basename(path)
description['found'] = True
except:
description['debugInfo'] = exception_format()
return description |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_input_directory_listing(base_directory, listing):
""" Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. :param base_directory: The path to the directory to check :param listing: A listing given as dictionary :raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in the listing. """ |
for sub in listing:
path = os.path.join(base_directory, sub['basename'])
if sub['class'] == 'File':
if not os.path.isfile(path):
raise DirectoryError('File \'{}\' not found but specified in listing.'.format(path))
if sub['class'] == 'Directory':
if not os.path.isdir(path):
raise DirectoryError('Directory \'{}\' not found but specified in listing'.format(path))
sub_listing = sub.get('listing')
if sub_listing:
_check_input_directory_listing(path, sub_listing) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_cwl_type(cwl_type_string):
""" Parses cwl type information from a cwl type string. Examples: - "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False} - "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True} :param cwl_type_string: The cwl type string to extract information from :return: A dictionary containing information about the parsed cwl type string """ |
is_optional = cwl_type_string.endswith('?')
if is_optional:
cwl_type_string = cwl_type_string[:-1]
is_array = cwl_type_string.endswith('[]')
if is_array:
cwl_type_string = cwl_type_string[:-2]
return {'type': cwl_type_string, 'isArray': is_array, 'isOptional': is_optional} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cwl_input_directories(cwl_data, job_data, input_dir=None):
""" Searches for Directories and in the cwl data and produces a dictionary containing input file information. :param cwl_data: The cwl data as dictionary :param job_data: The job data as dictionary :param input_dir: TODO :return: Returns the a dictionary containing information about input files. The keys of this dictionary are the input/output identifiers of the files specified in the cwl description. The corresponding value is a dictionary again with the following keys and values: - 'isOptional': A bool indicating whether this input directory is optional - 'isArray': A bool indicating whether this could be a list of directories - 'files': A list of input file descriptions A input file description is a dictionary containing the following information - 'path': The path to the specified directory - 'debugInfo': A field to possibly provide debug information """ |
results = {}
for input_identifier, input_data in cwl_data['inputs'].items():
cwl_type = parse_cwl_type(input_data['type'])
(is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type)
if cwl_type == 'Directory':
result = {
'isOptional': is_optional,
'isArray': is_array,
'directories': None
}
if input_identifier in job_data:
arg = job_data[input_identifier]
if is_array:
result['directories'] = [_input_directory_description(input_identifier, i, input_dir) for i in arg]
else:
result['directories'] = [_input_directory_description(input_identifier, arg, input_dir)]
results[input_identifier] = result
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cwl_output_files(cwl_data, inputs_to_reference, output_dir=None):
""" Returns a dictionary containing information about the output files given in cwl_data. :param cwl_data: The cwl data from where to extract the output file information. :param inputs_to_reference: Inputs which are used to resolve input references. :param output_dir: Path to the directory where output files are expected. :return: A dictionary containing information about every output file. """ |
results = {}
for key, val in cwl_data['outputs'].items():
cwl_type = parse_cwl_type(val['type'])
(is_optional, is_array, cwl_type) = itemgetter('isOptional', 'isArray', 'type')(cwl_type)
if not cwl_type == 'File':
continue
result = {
'isOptional': is_optional,
'path': None,
'size': None,
'debugInfo': None
}
glob_path = os.path.expanduser(val['outputBinding']['glob'])
if output_dir and not os.path.isabs(glob_path):
glob_path = os.path.join(os.path.expanduser(output_dir), glob_path)
glob_path = resolve_input_references(glob_path, inputs_to_reference)
matches = glob(glob_path)
try:
if len(matches) != 1:
raise FileError('glob path "{}" does not match exactly one file'.format(glob_path))
file_path = matches[0]
result['path'] = file_path
if not os.path.isfile(file_path):
raise FileError('path is not a file')
result['size'] = os.path.getsize(file_path) / (1024 * 1024)
except:
result['debugInfo'] = exception_format()
results[key] = result
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, length=-1):
""" Reads from the FIFO. Reads as much data as possible from the FIFO up to the specified length. If the length argument is negative or ommited all data currently available in the FIFO will be read. If there is no data available in the FIFO an empty string is returned. Args: length: The amount of data to read from the FIFO. Defaults to -1. """ |
if 0 <= length < len(self):
newpos = self.pos + length
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return data
data = self.buf[self.pos:]
self.clear()
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readuntil(self, token, size=0):
""" Reads data from the FIFO until a token is encountered. If no token is encountered as much data is read from the FIFO as possible keeping in mind that the FIFO must retain enough data to perform matches for the token across writes. Args: token: The token to read until. size: The minimum amount of data that should be left in the FIFO. This is only used if it is greater than the length of the token. When ommited this value will default to the length of the token. Returns: A tuple of (found, data) where found is a boolean indicating whether the token was found, and data is all the data that could be read from the FIFO. Note: When a token is found the token is also read from the buffer and returned in the data. """ |
self.__append()
i = self.buf.find(token, self.pos)
if i < 0:
index = max(len(token) - 1, size)
newpos = max(len(self.buf) - index, self.pos)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return False, data
newpos = i + len(token)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return True, data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peekline(self):
""" Peeks a line into the FIFO. Perfroms the same function as readline() without removing data from the FIFO. See readline() for further information. """ |
self.__append()
i = self.buf.find(self.eol, self.pos)
if i < 0:
return ''
newpos = i + len(self.eol)
return self.buf[self.pos:newpos] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peekuntil(self, token, size=0):
""" Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information. """ |
self.__append()
i = self.buf.find(token, self.pos)
if i < 0:
index = max(len(token) - 1, size)
newpos = max(len(self.buf) - index, self.pos)
return False, self.buf[self.pos:newpos]
newpos = i + len(token)
return True, self.buf[self.pos:newpos] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False, resort=False, verbose=False):
""" Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff """ |
page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page'))
for page_doc, rev_docs in page_rev_docs:
if verbose:
sys.stderr.write(page_doc.get('title') + ": ")
sys.stderr.flush()
if resort:
if verbose:
sys.stderr.write("(sorting) ")
sys.stderr.flush()
rev_docs = sorted(
rev_docs, key=lambda r: (r.get('timestamp'), r.get('id')))
detector = Detector(radius=radius)
for rev_doc in rev_docs:
if not use_sha1 and 'text' not in rev_doc:
logger.warn("Skipping {0}: 'text' field not found in {0}"
.format(rev_doc['id'], rev_doc))
continue
if use_sha1:
checksum = rev_doc.get('sha1') or DummyChecksum()
elif 'text' in rev_doc:
text_bytes = bytes(rev_doc['text'], 'utf8', 'replace')
checksum = hashlib.sha1(text_bytes).digest()
revert = detector.process(checksum, rev_doc)
if revert:
yield revert.to_json()
if verbose:
sys.stderr.write("r")
sys.stderr.flush()
else:
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
sys.stderr.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6, normalize=True, ):
""" SPM HRF function from sum of two gamma PDFs This function is designed to be partially compatible with SPMs `spm_hrf.m` function. The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay` and dispersion `under_disp`, and divided by the `p_u_ratio`). Parameters t : array-like vector of times at which to sample HRF peak_delay : float, optional delay of peak peak_disp : float, optional width (dispersion) of peak under_delay : float, optional delay of undershoot under_disp : float, optional width (dispersion) of undershoot p_u_ratio : float, optional peak to undershoot ratio. Undershoot divided by this value before subtracting from peak. normalize : {True, False}, optional If True, divide HRF values by their sum before returning. SPM does this by default. Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- See ``spm_hrf.m`` in the SPM distribution. """ |
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]
if v <= 0]):
raise ValueError("delays and dispersions must be > 0")
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay / peak_disp,
loc=0,
scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t,
under_delay / under_disp,
loc=0,
scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dsort(fname, order, has_header=True, frow=0, ofname=None):
r""" Sort file data. :param fname: Name of the comma-separated values file to sort :type fname: FileNameExists_ :param order: Sort order :type order: :ref:`CsvColFilter` :param has_header: Flag that indicates whether the comma-separated values file to sort has column headers in its first line (True) or not (False) :type has_header: boolean :param frow: First data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the sorted data. If None the sorting is done "in place" :type ofname: FileName_ or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.dsort.dsort :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`fname\` is not valid) * RuntimeError (Argument \`frow\` is not valid) * RuntimeError (Argument \`has_header\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ |
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call(cls, method, params=None, timeout=600):
""" Makes a Call to the LBRY API :param str method: Method to call from the LBRY API. See the full list of methods at https://lbryio.github.io/lbry/cli/ :param dict params: Parameters to give the method selected :param float timeout: The number of seconds to wait for a connection until we time out; 600 By Default. :raises LBRYException: If the request returns an error when calling the API :return: A Python `dict` object containing the data requested from the API :rtype: dict """ |
params = [] if params is None else params
return cls.make_request(SERVER_ADDRESS, method, params, timeout=timeout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_document(self, titlestring):
""" This method may be used to create a new document for writing as xml to the OPS subdirectory of the ePub structure. """ |
#root = etree.XML('''<?xml version="1.0"?>\
#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\
#<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\
#</html>''')
root = etree.XML('''<?xml version="1.0"?>\
<!DOCTYPE html>\
<html xmlns="http://www.w3.org/1999/xhtml">\
</html>''')
document = etree.ElementTree(root)
html = document.getroot()
head = etree.SubElement(html, 'head')
etree.SubElement(html, 'body')
title = etree.SubElement(head, 'title')
title.text = titlestring
#The href for the css stylesheet is a standin, can be overwritten
etree.SubElement(head,
'link',
{'href': 'css/default.css',
'rel': 'stylesheet',
'type': 'text/css'})
return document |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_document(self, name, document):
""" This function will write a document to an XML file. """ |
with open(name, 'wb') as out:
out.write(etree.tostring(document,
encoding='utf-8',
pretty_print=True)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_date_string(self, date_tuple):
""" Receives a date_tuple object, and outputs a string for placement in the article content. """ |
months = ['', 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
date_string = ''
if date_tuple.season:
return '{0}, {1}'.format(date_tuple.season, date_tuple.year)
else:
if not date_tuple.month and not date_tuple.day:
return '{0}'.format(date_tuple.year)
if date_tuple.month:
date_string += months[int(date_tuple.month)]
if date_tuple.day:
date_string += ' ' + date_tuple.day
return ', '.join([date_string, date_tuple.year]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_out_of_flow_tables(self):
""" Returns True if the article has out-of-flow tables, indicates separate tables document. This method is used to indicate whether rendering this article's content will result in the creation of out-of-flow HTML tables. This method has a base class implementation representing a common logic; if an article has a graphic(image) representation of a table then the HTML representation will be placed out-of-flow if it exists, if there is no graphic(image) represenation then the HTML representation will be placed in-flow. Returns ------- bool True if there are out-of-flow HTML tables, False otherwise """ |
if self.article.body is None:
return False
for table_wrap in self.article.body.findall('.//table-wrap'):
graphic = table_wrap.xpath('./graphic | ./alternatives/graphic')
table = table_wrap.xpath('./table | ./alternatives/table')
if graphic and table:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process(self, article):
""" Ingests an Article to create navigation structures and parse global metadata. """ |
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Navigation only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Navigation cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
if self.collection:
pass
else:
self.title = self.article.publisher.nav_title()
for author in self.article.publisher.nav_contributors():
self.contributors.add(author)
#Analyze the structure of the article to create internal mapping
self.map_navigation() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_navigation(self):
""" This is a wrapper for depth-first recursive analysis of the article """ |
#All articles should have titles
title_id = 'titlepage-{0}'.format(self.article_doi)
title_label = self.article.publisher.nav_title()
title_source = 'main.{0}.xhtml#title'.format(self.article_doi)
title_navpoint = navpoint(title_id, title_label, self.play_order,
title_source, [])
self.nav.append(title_navpoint)
#When processing a collection of articles, we will want all subsequent
#navpoints for this article to be located under the title
if self.collection:
nav_insertion = title_navpoint.children
else:
nav_insertion = self.nav
#If the article has a body, we'll need to parse it for navigation
if self.article.body is not None:
#Here is where we invoke the recursive parsing!
for nav_pt in self.recursive_article_navmap(self.article.body):
nav_insertion.append(nav_pt)
#Add a navpoint to the references if appropriate
if self.article.root.xpath('./back/ref'):
ref_id = 'references-{0}'.format(self.article_doi)
ref_label = 'References'
ref_source = 'biblio.{0}.xhtml#references'.format(self.article_doi)
ref_navpoint = navpoint(ref_id, ref_label, self.play_order,
ref_source, [])
nav_insertion.append(ref_navpoint) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recursive_article_navmap(self, src_element, depth=0, first=True):
""" This function recursively traverses the content of an input article to add the correct elements to the NCX file's navMap and Lists. """ |
if depth > self.nav_depth:
self.nav_depth = depth
navpoints = []
tagnames = ['sec', 'fig', 'table-wrap']
for child in src_element:
try:
tagname = child.tag
except AttributeError:
continue
else:
if tagname not in tagnames:
continue
#Safely handle missing id attributes
if 'id' not in child.attrib:
child.attrib['id'] = self.auto_id
#If in collection mode, we'll prepend the article DOI to avoid
#collisions
if self.collection:
child_id = '-'.join([self.article_doi,
child.attrib['id']])
else:
child_id = child.attrib['id']
#Attempt to infer the correct text as a label
#Skip the element if we cannot
child_title = child.find('title')
if child_title is None:
continue # If there is no immediate title, skip this element
label = element_methods.all_text(child_title)
if not label:
continue # If no text in the title, skip this element
source = 'main.{0}.xhtml#{1}'.format(self.article_doi,
child.attrib['id'])
if tagname == 'sec':
children = self.recursive_article_navmap(child, depth=depth + 1)
navpoints.append(navpoint(child_id,
label,
self.play_order,
source,
children))
#figs and table-wraps do not have children
elif tagname == 'fig': # Add navpoints to list_of_figures
self.figures_list.append(navpoint(child.attrib['id'],
label,
None,
source,
[]))
elif tagname == 'table-wrap': # Add navpoints to list_of_tables
self.tables_list.append(navpoint(child.attrib['id'],
label,
None,
source,
[]))
return navpoints |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def funcConvPar(aryDm, vecHrf, varNumVol):
""" Function for convolution of pixel-wise 'design matrix' with HRF model. """ |
# In order to avoid an artefact at the end of the time series, we have to
# concatenate an empty array to both the design matrix and the HRF model
# before convolution.
aryDm = np.concatenate((aryDm, np.zeros((aryDm.shape[0], 100))), axis=1)
vecHrf = np.concatenate((vecHrf, np.zeros((100,))))
aryDmConv = np.empty((aryDm.shape[0], varNumVol))
for idx in range(0, aryDm.shape[0]):
vecDm = aryDm[idx, :]
# Convolve design matrix with HRF model:
aryDmConv[idx, :] = np.convolve(vecDm, vecHrf,
mode='full')[:varNumVol]
return aryDmConv |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def funcNrlTcMotPred(idxPrc, varPixX, varPixY, NrlMdlChunk, varNumTP, aryBoxCar, # aryCond path, varNumNrlMdls, varNumMtDrctn, varPar, queOut):
""" Function for creating neural time course models. This function should be used to create neural models if different predictors for every motion direction are included. """ |
# # if hd5 method is used: open file for reading
# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'
# hdf5_path = os.path.join(path, filename)
# fileH = tables.openFile(hdf5_path, mode='r')
# Output array with pRF model time courses at all modelled standard
# deviations for current pixel position:
aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn),
dtype='float32')
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 1:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumLoops = varNumNrlMdls/varPar
# Vector with pRF values at which to give status feedback:
vecStatus = np.linspace(0,
varNumLoops,
num=(varStsStpSze+1),
endpoint=True)
vecStatus = np.ceil(vecStatus)
vecStatus = vecStatus.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatusPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatusPrc = np.ceil(vecStatusPrc)
vecStatusPrc = vecStatusPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through all Gauss parameters that are in this chunk
for idx, NrlMdlTrpl in enumerate(NrlMdlChunk):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Status indicator:
if varCntSts02 == vecStatus[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatusPrc[varCntSts01]) +
' % --- ' +
str(vecStatus[varCntSts01]) +
' loops out of ' +
str(varNumLoops))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# x pos of Gauss model: NrlMdlTrpl[0]
# y pos of Gauss model: NrlMdlTrpl[1]
# std of Gauss model: NrlMdlTrpl[2]
# index of tng crv model: NrlMdlTrpl[3]
varTmpX = int(np.around(NrlMdlTrpl[0], 0))
varTmpY = int(np.around(NrlMdlTrpl[1], 0))
# Create pRF model (2D):
aryGauss = funcGauss2D(varPixX,
varPixY,
varTmpX,
varTmpY,
NrlMdlTrpl[2])
# Multiply pixel-wise box car model with Gaussian pRF models:
aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# neural time course model (i.e. not yet scaled for the size of
# the pRF).
aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1))
# Normalise the nrl time course model to the size of the pRF. This
# gives us the ratio of 'activation' of the pRF at each time point,
# or, in other words, the neural time course model.
aryNrlTcTmp = np.divide(aryNrlTcTmp,
np.sum(aryGauss, axis=(0, 1)))
# Put model time courses into the function's output array:
aryOut[idx, :, :] = aryNrlTcTmp
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
aryOut,
]
queOut.put(lstOut) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def funcFindPrf(idxPrc, aryFuncChnk, aryPrfTc, aryMdls, queOut):
""" Function for finding best pRF model for voxel time course. This function should be used if there is only one predictor. """ |
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnk.shape[0]
# Number of volumes:
varNumVol = aryFuncChnk.shape[1]
# Vectors for pRF finding results [number-of-voxels times one]:
vecBstXpos = np.zeros(varNumVoxChnk)
vecBstYpos = np.zeros(varNumVoxChnk)
vecBstSd = np.zeros(varNumVoxChnk)
# vecBstR2 = np.zeros(varNumVoxChnk)
# Vector for best R-square value. For each model fit, the R-square value is
# compared to this, and updated if it is lower than the best-fitting
# solution so far. We initialise with an arbitrary, high value
vecBstRes = np.add(np.zeros(varNumVoxChnk),
100000.0)
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFuncChnk = aryFuncChnk.T
# Constant term for the model:
vecConst = np.ones((varNumVol), dtype=np.float32)
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# Number of pRF models to fit:
varNumMdls = len(aryMdls)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through pRF models:
for idxMdls in range(0, varNumMdls):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Current pRF time course model:
vecMdlTc = aryPrfTc[idxMdls, :].flatten()
# We create a design matrix including the current pRF time
# course model, and a constant term:
aryDsgn = np.vstack([vecMdlTc,
vecConst]).T
# Calculation of the ratio of the explained variance (R square)
# for the current model for all voxel time courses.
# print('------------np.linalg.lstsq on pRF: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- START')
# varTmeTmp01 = time.time()
# Change type to float32:
# aryDsgn = aryDsgn.astype(np.float32)
# Calculate the least-squares solution for all voxels:
vecTmpRes = np.linalg.lstsq(aryDsgn, aryFuncChnk)[1]
# varTmeTmp02 = time.time()
# varTmeTmp03 = np.around((varTmeTmp02 - varTmeTmp01),
# decimals=2)
# print('------------np.linalg.lstsq on pRF: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- DONE elapsed time: ' +
# str(varTmeTmp03) +
# 's')
# Check whether current residuals are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes)
# Replace best x and y position values, and SD values.
vecBstXpos[vecLgcTmpRes] = aryMdls[idxMdls][0]
vecBstYpos[vecLgcTmpRes] = aryMdls[idxMdls][1]
vecBstSd[vecLgcTmpRes] = aryMdls[idxMdls][2]
# Replace best residual values:
vecBstRes[vecLgcTmpRes] = vecTmpRes[vecLgcTmpRes]
# varTmeTmp04 = time.time()
# varTmeTmp05 = np.around((varTmeTmp04 - varTmeTmp02),
# decimals=2)
# print('------------selection of best-fitting pRF model: ' +
# str(idxX) +
# 'x ' +
# str(idxY) +
# 'y ' +
# str(idxSd) +
# 'z --- elapsed time: ' +
# str(varTmeTmp05) +
# 's')
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# After finding the best fitting model for each voxel, we still have to
# calculate the coefficient of determination (R-squared) for each voxel. We
# start by calculating the total sum of squares (i.e. the deviation of the
# data from the mean). The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(vecFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2]
queOut.put(lstOut) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def concatenate( fname1, fname2, dfilter1=None, dfilter2=None, has_header1=True, has_header2=True, frow1=0, frow2=0, ofname=None, ocols=None, ):
r""" Concatenate two comma-separated values file. Data rows from the second file are appended at the end of the data rows from the first file :param fname1: Name of the first comma-separated values file, the file whose data appears first in the output file :type fname1: FileNameExists_ :param fname2: Name of the second comma-separated values file, the file whose data appears last in the output file :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the first file. If None no data filtering is done on the file :type dfilter1: :ref:`CsvDataFilter` or None :param dfilter2: Row and/or column filter for the second file. If None no data filtering is done on the file :type dfilter2: :ref:`CsvDataFilter` or None :param has_header1: Flag that indicates whether the first comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the second comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: First comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Second comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the data from the first and second files. If None the first file is replaced "in place" :type ofname: FileName_ or None :param ocols: Column names of the output comma-separated values file. If None the column names in the first file are used if **has_header1** is True or the column names in the second files are used if **has_header1** is False and **has_header2** is True, otherwise no header is used :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. pcsv.concatenate.concatenate :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Files have different number of columns) * RuntimeError (Invalid column specification) * RuntimeError (Number of columns in data files and output columns are different) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ |
# pylint: disable=R0913,R0914
iro = pexdoc.exh.addex(RuntimeError, "Files have different number of columns")
iom = pexdoc.exh.addex(
RuntimeError, "Number of columns in data files and output columns are different"
)
# Read and validate file 1
obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1)
# Read and validate file 2
obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2)
# Assign output data structure
ofname = fname1 if ofname is None else ofname
# Create new header
if (ocols is None) and has_header1:
ocols = [obj1.header()] if obj1.cfilter is None else [obj1.cfilter]
elif (ocols is None) and has_header2:
ocols = [obj2.header()] if obj2.cfilter is None else [obj2.cfilter]
elif ocols is None:
ocols = []
else:
iom((obj1.cfilter is not None) and (len(obj1.cfilter) != len(ocols)))
ocols = [ocols]
# Miscellaneous data validation
iro(_C(obj1.cfilter, obj2.cfilter) and (len(obj1.cfilter) != len(obj2.cfilter)))
# Write final output
data = ocols + obj1.data(filtered=True) + obj2.data(filtered=True)
write(fname=ofname, data=data, append=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_images_to_cache(source, destination):
""" Handles the movement of images to the cache. Must be helpful if it finds that the folder for this article already exists. """ |
if os.path.isdir(destination):
log.debug('Cached images for this article already exist')
return
else:
log.debug('Cache location: {0}'.format(destination))
try:
shutil.copytree(source, destination)
except:
log.exception('Images could not be moved to cache')
else:
log.info('Moved images to cache'.format(destination)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def explicit_images(images, image_destination, rootname, config):
""" The method used to handle an explicitly defined image directory by the user as a parsed argument. """ |
log.info('Explicit image directory specified: {0}'.format(images))
if '*' in images:
images = images.replace('*', rootname)
log.debug('Wildcard expansion for image directory: {0}'.format(images))
try:
shutil.copytree(images, image_destination)
except:
#The following is basically a recipe for log.exception() but with a
#CRITICAL level if the execution should be killed immediately
#log.critical('Unable to copy from indicated directory', exc_info=True)
log.exception('Unable to copy from indicated directory')
return False
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def input_relative_images(input_path, image_destination, rootname, config):
""" The method used to handle Input-Relative image inclusion. """ |
log.debug('Looking for input relative images')
input_dirname = os.path.dirname(input_path)
for path in config.input_relative_images:
if '*' in path:
path = path.replace('*', rootname)
log.debug('Wildcard expansion for image directory: {0}'.format(path))
images = os.path.normpath(os.path.join(input_dirname, path))
if os.path.isdir(images):
log.info('Input-Relative image directory found: {0}'.format(images))
shutil.copytree(images, image_destination)
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_images(output_directory, explicit, input_path, config, parsed_article):
""" Main logic controller for the placement of images into the output directory Controlling logic for placement of the appropriate imager files into the EPUB directory. This function interacts with interface arguments as well as the local installation config.py file. These may change behavior of this function in terms of how it looks for images relative to the input, where it finds explicit images, whether it will attempt to download images, and whether successfully downloaded images will be stored in the cache. Parameters output_directory : str The directory path where the EPUB is being constructed/output explicit : str A directory path to a user specified directory of images. Allows * wildcard expansion. input_path : str The absolute path to the input XML file. config : config module The imported configuration module parsed_article : openaccess_epub.article.Article object The Article instance for the article being converted to EPUB """ |
#Split the DOI
journal_doi, article_doi = parsed_article.doi.split('/')
log.debug('journal-doi : {0}'.format(journal_doi))
log.debug('article-doi : {0}'.format(article_doi))
#Get the rootname for wildcard expansion
rootname = utils.file_root_name(input_path)
#Specify where to place the images in the output
img_dir = os.path.join(output_directory,
'EPUB',
'images-{0}'.format(article_doi))
log.info('Using {0} as image directory target'.format(img_dir))
#Construct path to cache for article
article_cache = os.path.join(config.image_cache, journal_doi, article_doi)
#Use manual image directory, explicit images
if explicit:
success = explicit_images(explicit, img_dir, rootname, config)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
#Explicit images prevents all other image methods
return success
#Input-Relative import, looks for any one of the listed options
if config.use_input_relative_images:
#Prevents other image methods only if successful
if input_relative_images(input_path, img_dir, rootname, config):
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
#Use cache for article if it exists
if config.use_image_cache:
#Prevents other image methods only if successful
if image_cache(article_cache, img_dir):
return True
#Download images from Internet
if config.use_image_fetching:
os.mkdir(img_dir)
if journal_doi == '10.3389':
fetch_frontiers_images(article_doi, img_dir)
if config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return True
elif journal_doi == '10.1371':
success = fetch_plos_images(article_doi, img_dir, parsed_article)
if success and config.use_image_cache:
move_images_to_cache(img_dir, article_cache)
return success
else:
log.error('Fetching images for this publisher is not supported!')
return False
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_image_cache(img_cache):
""" Initiates the image cache if it does not exist """ |
log.info('Initiating the image cache at {0}'.format(img_cache))
if not os.path.isdir(img_cache):
utils.mkdir_p(img_cache)
utils.mkdir_p(os.path.join(img_cache, '10.1371'))
utils.mkdir_p(os.path.join(img_cache, '10.3389')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_plos_images(article_doi, output_dir, document):
""" Fetch the images for a PLoS article from the internet. PLoS images are known through the inspection of <graphic> and <inline-graphic> elements. The information in these tags are then parsed into appropriate URLs for downloading. """ |
log.info('Processing images for {0}...'.format(article_doi))
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URL
subjournal_name = article_doi.split('.')[1]
base_url = journal_urls[subjournal_name]
#Acquire <graphic> and <inline-graphic> xml elements
graphics = document.document.getroot().findall('.//graphic')
graphics += document.document.getroot().findall('.//inline-graphic')
#Begin to download
log.info('Downloading images, this may take some time...')
for graphic in graphics:
nsmap = document.document.getroot().nsmap
xlink_href = graphic.attrib['{' + nsmap['xlink'] + '}' + 'href']
#Equations are handled a bit differently than the others
#Here we decide that an image name starting with "e" is an equation
if xlink_href.split('.')[-1].startswith('e'):
resource = 'fetchObject.action?uri=' + xlink_href + '&representation=PNG'
else:
resource = xlink_href + '/largerimage'
full_url = base_url.format(resource)
try:
image = urllib.request.urlopen(full_url)
except urllib.error.HTTPError as e:
if e.code == 503: # Server overload error
time.sleep(1) # Wait a second
try:
image = urllib.request.urlopen(full_url)
except:
return False # Happened twice, give up
else:
log.error('urllib.error.HTTPError {0}'.format(e.code))
return False
else:
img_name = xlink_href.split('.')[-1] + '.png'
img_path = os.path.join(output_dir, img_name)
with open(img_path, 'wb') as output:
output.write(image.read())
log.info('Downloaded image {0}'.format(img_name))
log.info('Done downloading images')
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_nearest_by_numeric_key(data: dict, key: int) -> Any: """Return the dict element whose numeric key is closest to a target.""" |
return data.get(key, data[min(data.keys(), key=lambda k: abs(k - key))]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resource(self, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, stream=False, version=None, name=None, **kwargs):
""" Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """ |
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
self.resources.append((
FutureRoute(handler, uri, methods, host, strict_slashes,
stream, version, name),
kwargs))
return handler
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_resource(self, handler, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, version=None, name=None, **kwargs):
""" Create a blueprint resource route from a function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """ |
self.resource(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, version=version,
name=name, **kwargs)(handler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_db():
""" Returns the connection to the database using the settings. This function should not be called outside of this file. Use db instead. """ |
from .settings import settings
mongo = settings.MONGODB
if 'URI' in mongo and mongo['URI']:
uri = mongo['URI']
else:
uri = 'mongodb://'
if all(mongo.get(key) for key in ('USERNAME', 'PASSWORD')):
uri += '{0}:{1}@'.format(mongo['USERNAME'], mongo['PASSWORD'])
if 'HOSTS' in mongo and mongo['HOSTS']:
uri += ','.join(
'{0}:{1}'.format(host, port)
for (host, port) in zip(mongo['HOSTS'], mongo['PORTS']),
)
else:
uri += '{0}:{1}'.format(mongo['HOST'], mongo.get('PORT', 27017))
uri += '/' + mongo['DATABASE']
if 'OPTIONS' in mongo and mongo['OPTIONS']:
uri += '?{0}'.format('&'.join(mongo['OPTIONS']))
client = ConnectionFailureProxy(MongoClient(uri, connect=False))
database = client[parse_uri(uri)['database']]
return database |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plos_doi_to_xmlurl(doi_string):
""" Attempts to resolve a PLoS DOI into a URL path to the XML file. """ |
#Create URL to request DOI resolution from http://dx.doi.org
doi_url = 'http://dx.doi.org/{0}'.format(doi_string)
log.debug('DOI URL: {0}'.format(doi_url))
#Open the page, follow the redirect
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print('Unable to resolve DOI URL, or could not connect')
raise err
else:
#Given the redirection, attempt to shape new request for PLoS servers
resolved_address = resolved_page.geturl()
log.debug('DOI resolved to {0}'.format(resolved_address))
parsed = urllib.parse.urlparse(resolved_address)
xml_url = '{0}://{1}'.format(parsed.scheme, parsed.netloc)
xml_url += '/article/fetchObjectAttachment.action?uri='
xml_path = parsed.path.replace(':', '%3A').replace('/', '%2F')
xml_path = xml_path.split('article%2F')[1]
xml_url += '{0}{1}'.format(xml_path, '&representation=XML')
log.debug('Shaped PLoS request for XML {0}'.format(xml_url))
#Return this url to the calling function
return xml_url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def doi_input(doi_string, download=True):
""" This method accepts a DOI string and attempts to download the appropriate xml file. If successful, it returns a path to that file. As with all URL input types, the success of this method depends on supporting per-publisher conventions and will fail on unsupported publishers """ |
log.debug('DOI Input - {0}'.format(doi_string))
doi_string = doi_string[4:]
if '10.1371' in doi_string: # Corresponds to PLoS
log.debug('DOI string shows PLoS')
xml_url = plos_doi_to_xmlurl(doi_string)
else:
log.critical('DOI input for this publisher is not supported')
sys.exit('This publisher is not yet supported by OpenAccess_EPUB')
return url_input(xml_url, download) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url_input(url_string, download=True):
""" This method expects a direct URL link to an xml file. It will apply no modifications to the received URL string, so ensure good input. """ |
log.debug('URL Input - {0}'.format(url_string))
try:
open_xml = urllib.request.urlopen(url_string)
except urllib.error.URLError as err:
print('utils.input.url_input received a bad URL, or could not connect')
raise err
else:
#Employ a quick check on the mimetype of the link
if not open_xml.headers['Content-Type'] == 'text/xml':
sys.exit('URL request does not appear to be XML')
filename = open_xml.headers['Content-Disposition'].split('\"')[1]
if download:
with open(filename, 'wb') as xml_file:
xml_file.write(open_xml.read())
return openaccess_epub.utils.file_root_name(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def frontiersZipInput(zip_path, output_prefix, download=None):
""" This method provides support for Frontiers production using base zipfiles as the input for ePub creation. It expects a valid pathname for one of the two zipfiles, and that both zipfiles are present in the same directory. """ |
log.debug('frontiersZipInput called')
#If there is a problem with the input, it should clearly describe the issue
pathname, pathext = os.path.splitext(zip_path)
path, name = os.path.split(pathname)
if not pathext == '.zip': # Checks for a path to zipfile
log.error('Pathname provided does not end with .zip')
print('Invalid file path: Does not have a zip extension.')
sys.exit(1)
#Construct the pair of zipfile pathnames
file_root = name.split('-r')[0]
zipname1 = "{0}-r{1}.zip".format(file_root, '1')
zipname2 = "{0}-r{1}.zip".format(file_root, '2')
#Construct the pathnames for output
output = os.path.join(output_prefix, file_root)
if os.path.isdir(output):
shutil.rmtree(output) # Delete previous output
output_meta = os.path.join(output, 'META-INF')
images_output = os.path.join(output, 'EPUB', 'images')
with zipfile.ZipFile(os.path.join(path, zipname1), 'r') as xml_zip:
zip_dir = '{0}-r1'.format(file_root)
xml = '/'.join([zip_dir, '{0}.xml'.format(file_root)])
try:
xml_zip.extract(xml)
except KeyError:
log.critical('There is no item {0} in the zipfile'.format(xml))
sys.exit('There is no item {0} in the zipfile'.format(xml))
else:
if not os.path.isdir(output_meta):
os.makedirs(output_meta)
shutil.copy(xml, os.path.join(output_meta))
os.remove(xml)
os.rmdir(zip_dir)
with zipfile.ZipFile(os.path.join(path, zipname2), 'r') as image_zip:
zip_dir = '{0}-r2'.format(file_root)
for i in image_zip.namelist():
if 'image_m' in i:
image_zip.extract(i)
if not os.path.isdir(images_output):
os.makedirs(images_output)
unzipped_images = os.path.join(zip_dir, 'images', 'image_m')
for i in os.listdir(unzipped_images):
shutil.copy(os.path.join(unzipped_images, i), images_output)
shutil.rmtree(zip_dir)
return file_root |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _red_listing_validation(key, listing):
""" Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema. If listing is None or an empty list, no exception is thrown. :param key: The input key to build an error message if needed. :param listing: The listing to validate :raise RedValidationError: If the given listing does not comply with cwl_job_listing_schema """ |
if listing:
try:
jsonschema.validate(listing, cwl_job_listing_schema)
except ValidationError as e:
raise RedValidationError('REDFILE listing of input "{}" does not comply with jsonschema: {}'
.format(key, e.context)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def red_get_mount_connectors(red_data, ignore_outputs):
""" Returns a list of mounting connectors :param red_data: The red data to be searched :param ignore_outputs: If outputs should be ignored :return: A list of connectors with active mount option. """ |
keys = []
batches = red_data.get('batches')
inputs = red_data.get('inputs')
if batches:
for batch in batches:
keys.extend(red_get_mount_connectors_from_inputs(batch['inputs']))
elif inputs:
keys.extend(red_get_mount_connectors_from_inputs(inputs))
if not ignore_outputs:
outputs = red_data.get('outputs')
if batches:
for batch in batches:
batch_outputs = batch.get('outputs')
if batch_outputs:
keys.extend(red_get_mount_connectors_from_outputs(batch_outputs))
elif outputs:
keys.extend(red_get_mount_connectors_from_outputs(outputs))
return keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cleanup(connector_manager, red_data, tmp_dir):
""" Invokes the cleanup functions for all inputs. """ |
for key, arg in red_data['inputs'].items():
val = arg
if isinstance(arg, list):
for index, i in enumerate(arg):
if not isinstance(i, dict):
continue
# connector_class should be one of 'File' or 'Directory'
connector_class = i['class']
input_key = '{}_{}'.format(key, index)
path = os.path.join(tmp_dir, input_key)
connector_data = i['connector']
internal = {URL_SCHEME_IDENTIFIER: path}
if connector_class == 'File':
connector_manager.receive_cleanup(connector_data, input_key, internal)
elif connector_class == 'Directory':
connector_manager.receive_directory_cleanup(connector_data, input_key, internal)
elif isinstance(arg, dict):
# connector_class should be one of 'File' or 'Directory'
connector_class = arg['class']
input_key = key
path = os.path.join(tmp_dir, input_key)
connector_data = val['connector']
internal = {URL_SCHEME_IDENTIFIER: path}
if connector_class == 'File':
connector_manager.receive_cleanup(connector_data, input_key, internal)
elif connector_class == 'Directory':
connector_manager.receive_directory_cleanup(connector_data, input_key, internal)
try:
os.rmdir(tmp_dir)
except (OSError, FileNotFoundError):
# Maybe, raise a warning here, because not all connectors have cleaned up their contents correctly.
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _execute_connector(connector_command, top_level_argument, *file_contents, listing=None):
""" Executes a connector by executing the given connector_command. The content of args will be the content of the files handed to the connector cli. :param connector_command: The connector command to execute. :param top_level_argument: The top level command line argument for the connector cli. (Like 'receive' or 'send_validate') :param file_contents: A dict of information handed over to the connector cli. :param listing: A listing to provide to the connector cli. Will be ignored if None. :return: A tuple containing the return code of the connector and the stderr of the command as str. """ |
# create temp_files for every file_content
temp_files = []
for file_content in file_contents:
if file_content is None:
continue
tmp_file = tempfile.NamedTemporaryFile('w')
json.dump(file_content, tmp_file)
tmp_file.flush()
temp_files.append(tmp_file)
tmp_listing_file = None
if listing:
tmp_listing_file = tempfile.NamedTemporaryFile('w')
json.dump(listing, tmp_listing_file)
tmp_listing_file.flush()
command = [connector_command, top_level_argument]
command.extend([t.name for t in temp_files])
if tmp_listing_file:
command.append('--listing {}'.format(tmp_listing_file.name))
result = execute(' '.join(command))
# close temp_files
for temp_file in temp_files:
temp_file.close()
if tmp_listing_file:
tmp_listing_file.close()
return result['returnCode'], result['stdErr'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def directory_listing_content_check(directory_path, listing):
""" Checks if a given listing is present under the given directory path. :param directory_path: The path to the base directory :param listing: The listing to check :return: None if no errors could be found, otherwise a string describing the error """ |
if listing:
for sub in listing:
path = os.path.join(directory_path, sub['basename'])
if sub['class'] == 'File':
if not os.path.isfile(path):
return 'listing contains "{}" but this file could not be found on disk.'.format(path)
elif sub['class'] == 'Directory':
if not os.path.isdir(path):
return 'listing contains "{}" but this directory could not be found on disk'.format(path)
listing = sub.get('listing')
if listing:
return ConnectorManager.directory_listing_content_check(path, listing)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_gpus(available_devices, requirements):
""" Determines sufficient GPUs for the given requirements and returns a list of GPUDevices. If there aren't sufficient GPUs a InsufficientGPUException is thrown. :param available_devices: A list of GPUDevices :param requirements: A list of GPURequirements :return: A list of sufficient devices """ |
if not requirements:
return []
if not available_devices:
raise InsufficientGPUError("No GPU devices available, but {} devices required.".format(len(requirements)))
available_devices = available_devices.copy()
used_devices = []
for req in requirements:
dev = search_device(req, available_devices)
if dev:
used_devices.append(dev)
available_devices.remove(dev)
else:
raise InsufficientGPUError("Not all GPU requirements could be fulfilled.")
return used_devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_gpu_requirements(gpus_reqs):
""" Extracts the GPU from a dictionary requirements as list of GPURequirements. :return: A list of GPURequirements """ |
requirements = []
if gpus_reqs:
if type(gpus_reqs) is dict:
count = gpus_reqs.get('count')
if count:
for i in range(count):
requirements.append(GPURequirement())
elif type(gpus_reqs) is list:
for gpu_req in gpus_reqs:
requirements.append(GPURequirement(min_vram=gpu_req['minVram']))
return requirements
else:
# If no requirements are supplied
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_nvidia_environment_variables(environment, gpu_ids):
""" Updates a dictionary containing environment variables to setup Nvidia-GPUs. :param environment: The environment variables to update :param gpu_ids: A list of GPU ids """ |
if gpu_ids:
nvidia_visible_devices = ""
for gpu_id in gpu_ids:
nvidia_visible_devices += "{},".format(gpu_id)
environment["NVIDIA_VISIBLE_DEVICES"] = nvidia_visible_devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_sufficient(self, device):
""" Returns whether the device is sufficient for this requirement. :param device: A GPUDevice instance. :type device: GPUDevice :return: True if the requirement is fulfilled otherwise False """ |
sufficient = True
if (self.min_vram is not None) and (device.vram < self.min_vram):
sufficient = False
return sufficient |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cache_location():
'''Cross-platform placement of cached files'''
plat = platform.platform()
log.debug('Platform read as: {0}'.format(plat))
if plat.startswith('Windows'):
log.debug('Windows platform detected')
return os.path.join(os.environ['APPDATA'], 'OpenAccess_EPUB')
elif plat.startswith('Darwin'):
log.debug('Mac platform detected')
elif plat.startswith('Linux'):
log.debug('Linux platform detected')
else:
log.warning('Unhandled platform for cache_location')
#This code is written for Linux and Mac, don't expect success for others
path = os.path.expanduser('~')
if path == '~':
path = os.path.expanduser('~user')
if path == '~user':
log.critical('Could not resolve the correct cache location')
sys.exit('Could not resolve the correct cache location')
cache_loc = os.path.join(path, '.OpenAccess_EPUB')
log.debug('Cache located: {0}'.format(cache_loc))
return cache_loc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evaluate_relative_path(working=os.getcwd(), relative=''):
""" This function receives two strings representing system paths. The first is the working directory and it should be an absolute path. The second is the relative path and it should not be absolute. This function will render an OS-appropriate absolute path, which is the normalized path from working to relative. """ |
return os.path.normpath(os.path.join(working, relative)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_absolute_path(some_path):
""" This function will return an appropriate absolute path for the path it is given. If the input is absolute, it will return unmodified; if the input is relative, it will be rendered as relative to the current working directory. """ |
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file_root_name(name):
""" Returns the root name of a file from a full file path. It will not raise an error if the result is empty, but an warning will be issued. """ |
base = os.path.basename(name)
root = os.path.splitext(base)[0]
if not root:
warning = 'file_root_name returned an empty root name from \"{0}\"'
log.warning(warning.format(name))
return root |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def files_with_ext(extension, directory='.', recursive=False):
""" Generator function that will iterate over all files in the specified directory and return a path to the files which possess a matching extension. You should include the period in your extension, and matching is not case sensitive: '.xml' will also match '.XML' and vice versa. An empty string passed to extension will match extensionless files. """ |
if recursive:
log.info('Recursively searching {0} for files with extension "{1}"'.format(directory, extension))
for dirname, subdirnames, filenames in os.walk(directory):
for filename in filenames:
filepath = os.path.join(dirname, filename)
_root, ext = os.path.splitext(filepath)
if extension.lower() == ext.lower():
yield filepath
else:
log.info('Looking in {0} for files with extension: "{1}"'.format(directory, extension))
for name in os.listdir(directory):
filepath = os.path.join(directory, name)
if not os.path.isfile(filepath): # Skip non-files
continue
_root, ext = os.path.splitext(filepath)
if extension.lower() == ext.lower():
yield filepath |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def epubcheck(epubname, config=None):
""" This method takes the name of an epub file as an argument. This name is the input for the java execution of a locally installed epubcheck-.jar. The location of this .jar file is configured in config.py. """ |
if config is None:
config = load_config_module()
r, e = os.path.splitext(epubname)
if not e:
log.warning('Missing file extension, appending ".epub"')
e = '.epub'
epubname = r + e
elif not e == '.epub':
log.warning('File does not have ".epub" extension, appending it')
epubname += '.epub'
subprocess.call(['java', '-jar', config.epubcheck_jarfile, epubname]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.