text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw):
""" Set Analyses to an AR :param instance: Analysis Request :param value: Single AS UID or a list of dictionaries containing AS UIDs :param kw: Additional keyword parameters passed to the field """ |
if not isinstance(value, (list, tuple)):
value = [value]
uids = []
for item in value:
uid = None
if isinstance(item, dict):
uid = item.get("uid")
if api.is_uid(value):
uid = item
if uid is None:
logger.warn("Could extract UID of value")
continue
uids.append(uid)
analyses = map(api.get_object_by_uid, uids)
self._set(instance, analyses, **kw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw):
# noqa """Set the value of the uid reference field """ |
ref = []
# The value is an UID
if api.is_uid(value):
ref.append(value)
# The value is a dictionary, get the UIDs.
if u.is_dict(value):
ref = ref.append(value.get("uid"))
# The value is already an object
if api.is_at_content(value):
ref.append(value)
# The value is a list
if u.is_list(value):
for item in value:
# uid
if api.is_uid(item):
ref.append(item)
# dict (catalog query)
elif u.is_dict(item):
# If there is UID of objects, just use it.
uid = item.get('uid', None)
if uid:
ref.append(uid)
# Handle non multi valued fields
if not self.multi_valued:
if len(ref) > 1:
raise ValueError("Multiple values given for single valued "
"field {}".format(repr(self.field)))
else:
ref = ref[0]
return self._set(instance, ref, **kw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" extract the data of the content and return it as a dictionary """ |
# 1. extract the schema fields
data = self.extract_fields()
# 2. include custom key-value pairs listed in the mapping dictionary
for key, attr in self.attributes.iteritems():
if key in self.ignore:
continue # skip ignores
# fetch the mapped attribute
value = getattr(self.context, attr, None)
if value is None:
value = getattr(self, attr, None)
# handle function calls
if callable(value):
value = value()
# map the value to the given key from the mapping
data[key] = api.to_json_value(self.context, key, value)
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_fields(self):
"""Extract the given fieldnames from the object :returns: Schema name/value mapping :rtype: dict """ |
# get the proper data manager for the object
dm = IDataManager(self.context)
# filter out ignored fields
fieldnames = filter(lambda name: name not in self.ignore, self.keys)
# schema mapping
out = dict()
for fieldname in fieldnames:
try:
# get the field value with the data manager
fieldvalue = dm.json_data(fieldname)
# https://github.com/collective/plone.jsonapi.routes/issues/52
# -> skip restricted fields
except Unauthorized:
logger.debug("Skipping restricted field '%s'" % fieldname)
continue
except ValueError:
logger.debug("Skipping invalid field '%s'" % fieldname)
continue
out[fieldname] = api.to_json_value(self.context, fieldname, fieldvalue)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _x_get_physical_path(self):
"""Generate the physical path """ |
path = self.context.getPath()
portal_path = api.get_path(api.get_portal())
if portal_path not in path:
return "{}/{}".format(portal_path, path)
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def snake2ucamel(value):
"""Casts a snake_case string to an UpperCamelCase string.""" |
UNDER, LETTER, OTHER = object(), object(), object()
def group_key_function(char):
if char == "_":
return UNDER
if char in string.ascii_letters:
return LETTER
return OTHER
def process_group(idx, key, chars):
if key is LETTER:
return "".join([chars[0].upper()] + chars[1:])
if key is OTHER \
or len(chars) != 1 \
or idx in [0, last] \
or LETTER not in (groups[idx-1][1], groups[idx+1][1]):
return "".join(chars)
return ""
raw_groups_gen = itertools.groupby(value, key=group_key_function)
groups = [(idx, key, list(group_gen))
for idx, (key, group_gen) in enumerate(raw_groups_gen)]
last = len(groups) - 1
return "".join(itertools.starmap(process_group, groups)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ucamel_method(func):
""" Decorator to ensure the given snake_case method is also written in UpperCamelCase in the given namespace. That was mainly written to avoid confusion when using wxPython and its UpperCamelCaseMethods. """ |
frame_locals = inspect.currentframe().f_back.f_locals
frame_locals[snake2ucamel(func.__name__)] = func
return func |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_plain_text(fname, encoding="utf-8"):
"""Reads a file as a list of strings.""" |
with io.open(fname, encoding=encoding) as f:
result = list(f)
if result:
if result[-1][-1:] == "\n":
result.append("\n")
else:
result[-1] += "\n"
return [line[:-1] for line in result]
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def maxcardsearch(A, ve = None):
""" Maximum cardinality search ordering of a sparse chordal matrix. Returns the maximum cardinality search ordering of a symmetric chordal matrix :math:`A`. Only the lower triangular part of :math:`A` is accessed. The maximum cardinality search ordering is a perfect elimination ordering in the factorization :math:`PAP^T = LL^T`. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is n-1). :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) """ |
n = A.size[0]
assert A.size[1] == n, "A must be a square matrix"
assert type(A) is spmatrix, "A must be a sparse matrix"
if ve is None:
ve = n-1
else:
assert type(ve) is int and 0<=ve<n,\
"ve must be an integer between 0 and A.size[0]-1"
As = symmetrize(A)
cp,ri,_ = As.CCS
# permutation vector
p = matrix(0,(n,1))
# weight array
w = matrix(0,(n,1))
max_w = 0
S = [list(range(ve))+list(range(ve+1,n))+[ve]] + [[] for i in range(n-1)]
for i in range(n-1,-1,-1):
while True:
if len(S[max_w]) > 0:
v = S[max_w].pop()
if w[v] >= 0: break
else:
max_w -= 1
p[i] = v
w[v] = -1 # set w[v] = -1 to mark that node v has been numbered
# increase weights for all unnumbered neighbors
for r in ri[cp[v]:cp[v+1]]:
if w[r] >= 0:
w[r] += 1
S[w[r]].append(r) # bump r up to S[w[r]]
max_w = max(max_w,w[r])
return p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_logging():
"""Setup logging to log to nowhere by default. For details, see: http://docs.python.org/3/howto/logging.html#library-config Internal function. """ |
import logging
logger = logging.getLogger('spotify-connect')
handler = logging.NullHandler()
logger.addHandler(handler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialized(f):
"""Decorator that serializes access to all decorated functions. The decorator acquires pyspotify's single global lock while calling any wrapped function. It is used to serialize access to: - All calls to functions on :attr:`spotify.lib`. - All code blocks working on pointers returned from functions on :attr:`spotify.lib`. - All code blocks working on other internal data structures in pyspotify. Together this is what makes pyspotify safe to use from multiple threads and enables convenient features like the :class:`~spotify.EventLoop`. Internal function. """ |
import functools
@functools.wraps(f)
def wrapper(*args, **kwargs):
with _lock:
return f(*args, **kwargs)
if not hasattr(wrapper, '__wrapped__'):
# Workaround for Python < 3.2
wrapper.__wrapped__ = f
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_pocketmod_pages(elements, page_edge_bottom=True, first_page_vertical=True):
""" Creates one or more managers that wraps the given elements into one or more Pocket Mod-style page sets. Each manager in the list that is returned corresponds to one page. This imposer is designed to work with portrait oriented content pages, laid out onto a landscape oriented page. Arguments: ``elements`` The elements to lay out. PocketMod uses sheets with 8 pages on them, but you can pass in fewer elements - additional space will be left blank. The number of pages output is just the ceiling of the number of pages passed in divided by 8. ``page_edge_bottom`` If true the pages should be arranged so that, when folded, the bottom of each page touches the edge of the sheet of paper. This is normal, because the edge of the paper is where a normal printer blank-margin is located, and the bottom edge of a page usually has the largest margin. ``first_page_vertical`` If true then the fold on the first page will be vertical. Each 'page' in the book has either a fold on the outside edge or on one of the two horizontal edges (the top edge if page_edge_bottom is set, the bottom otherwise). The horizontal fold keeps the page together more strongly, so is normally used for the first page. The original Pocket Mod software has the first page with a horizontal fold. The returned page layouts can be given to a PageLM for rendering onto individual pages of output. This method isn't a layout manager in its own right. """ |
pages = {
(False, False):[2,3,4,5,1,8,7,6],
(False, True):[4,5,6,7,3,2,1,8],
(True, False):[5,4,3,2,6,7,8,1],
(True, True):[7,6,5,4,8,1,2,3]
}[page_edge_bottom, first_page_vertical]
output = []
num_pages = len(elements)
for index in range(0, num_pages, 8):
sglm = grid.SimpleGridLM(4, 2)
for cell_index, cell in enumerate(pages):
if index + cell - 1 < num_pages:
element = elements[index+cell-1]
if (cell_index > 3) != page_edge_bottom:
element = transform.RotateLM(2, element)
sglm.add_element(element)
else:
sglm.add_element(None)
output.append(sglm)
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
""" Close the internal epoll file descriptor if it isn't closed :raises OSError: If the underlying ``close(2)`` fails. The error message matches those found in the manual page. """ |
with self._close_lock:
epfd = self._epfd
if epfd >= 0:
self._epfd = -1
close(epfd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromfd(cls, fd):
""" Create a new epoll object from a given file descriptor :param fd: A pre-made file descriptor obtained from ``epoll_create(2)`` or ``epoll_create1(2)`` :raises ValueError: If fd is not a valid file descriptor :returns: A new epoll object .. note:: If the passed descriptor is incorrect then various methods will fail and raise OSError with an appropriate message. """ |
if fd < 0:
_err_closed()
self = cls.__new__()
object.__init__(self)
self._epfd = fd
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, fd, eventmask=None):
""" Register a new descriptor :param fd: The descriptor to register. :param eventmask: Bit-mask of events that will be monitored. By default EPOLLIN, EPOLLOUT and EPOLLPRI are used. Note that EPOLLHUP is implicit and doesn't need to be provided. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. """ |
if self._epfd < 0:
_err_closed()
if eventmask is None:
eventmask = EPOLLIN | EPOLLOUT | EPOLLPRI
ev = epoll_event()
ev.events = eventmask
ev.data.fd = fd
epoll_ctl(self._epfd, EPOLL_CTL_ADD, fd, byref(ev)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unregister(self, fd):
""" Unregister a previously registered descriptor :param fd: The descriptor to unregister :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. .. note:: For feature parity with Python 3.4, unlike what ``epoll_ctl(2)`` would do, we are silently ignoring ``EBADF`` which is raised if """ |
if self._epfd < 0:
_err_closed()
ev = epoll_event()
try:
epoll_ctl(self._epfd, EPOLL_CTL_DEL, fd, byref(ev))
except OSError as exc:
# Allow fd to be closed, matching Python 3.4
if exc.errno != EBADF:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify(self, fd, eventmask):
""" Change the bit-mask of events associated with a previously-registered descriptor. :param fd: The descriptor to modify. :param eventmask: New bit-mask of events that will be monitored. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. """ |
if self._epfd < 0:
_err_closed()
ev = epoll_event()
ev.events = eventmask
ev.data.fd = fd
epoll_ctl(self._epfd, EPOLL_CTL_MOD, fd, byref(ev)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poll(self, timeout=-1, maxevents=-1):
""" Poll for events :param timeout: The amount of seconds to wait for events before giving up. The default value, -1, represents infinity. Note that unlike the underlying ``epoll_wait()`` timeout is a fractional number representing **seconds**. :param maxevents: The maximum number of events to report. The default is a reasonably-sized maximum, identical to the one selected by Python 3.4. :returns: A list of (fd, events) that were reported or an empty list if the timeout elapsed. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_wait(2)`` fails. The error message matches those found in the manual page. """ |
if self._epfd < 0:
_err_closed()
if timeout != -1:
# 1000 because epoll_wait(2) uses milliseconds
timeout = int(timeout * 1000)
if maxevents == -1:
maxevents = FD_SETSIZE - 1
events = (epoll_event * maxevents)()
num_events = epoll_wait(
self._epfd, cast(byref(events), POINTER(epoll_event)),
maxevents, timeout)
return [(events[i].data.fd, events[i].events)
for i in range(num_events)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, schema_str, xml_valid):
"""Compare the valid information on an xml from given schema. :param str schema_str: content string from schema file. :param str xml_valid: content string from xml file. :returns: If it is Valid or Not. :rtype: bool """ |
# TODO: be able to get doc for error given an xsd.
# Changed path to allow have xsd that are imported by others xsd in the
# same library, and not call to SAT page each time that is generated
# a new XML.
with change_path():
path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'templates')
os.chdir(path)
schema_root = etree.parse(StringIO(schema_str))
schema = etree.XMLSchema(schema_root)
try:
tree = etree.parse(StringIO(xml_valid.encode('UTF-8')))
schema.assertValid(tree)
except etree.DocumentInvalid as ups:
self.ups = ups
finally:
if self.ups:
self.valid = False
else:
self.valid = True
return self.valid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_xml(self):
"""Set document xml just rendered already validated against xsd to be signed. :params boolean debug_mode: Either if you want the rendered template to be saved either it is valid or not with the given schema. :returns boolean: Either was valid or not the generated document. """ |
cached = StringIO()
document = u''
try:
document = self.template.render(inv=self)
except UndefinedError as ups:
self.ups = ups
# TODO: Here should be called the cleanup 'Just before the validation'.
valid = self.validate(self.schema, document)
self.document = document
if valid:
document = etree.XML(document)
self.document = etree.tostring(document,
pretty_print=True,
xml_declaration=True,
encoding='utf-8')
# TODO: When Document Generated, this this should not fail either.
# Caching just when valid then.
cached.write(self.document is not None and self.document or u'')
cached.seek(0)
self.document_path = cached |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_record(uid=None):
"""Get a single record """ |
obj = None
if uid is not None:
obj = get_object_by_uid(uid)
else:
obj = get_object_by_request()
if obj is None:
fail(404, "No object found")
complete = req.get_complete(default=_marker)
if complete is _marker:
complete = True
items = make_items_for([obj], complete=complete)
return u.first(items) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_batched(portal_type=None, uid=None, endpoint=None, **kw):
"""Get batched results """ |
# fetch the catalog results
results = get_search_results(portal_type=portal_type, uid=uid, **kw)
# fetch the batch params from the request
size = req.get_batch_size()
start = req.get_batch_start()
# check for existing complete flag
complete = req.get_complete(default=_marker)
if complete is _marker:
# if the uid is given, get the complete information set
complete = uid and True or False
# return a batched record
return get_batch(results, size, start, endpoint=endpoint,
complete=complete) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_info(brain_or_object, endpoint=None, complete=False):
"""Extract the data from the catalog brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: Data mapping for the object/catalog brain :rtype: dict """ |
# also extract the brain data for objects
if not is_brain(brain_or_object):
brain_or_object = get_brain(brain_or_object)
if brain_or_object is None:
logger.warn("Couldn't find/fetch brain of {}".format(brain_or_object))
return {}
complete = True
# When querying uid catalog we have to be sure that we skip the objects
# used to relate two or more objects
if is_relationship_object(brain_or_object):
logger.warn("Skipping relationship object {}".format(repr(brain_or_object)))
return {}
# extract the data from the initial object with the proper adapter
info = IInfo(brain_or_object).to_dict()
# update with url info (always included)
url_info = get_url_info(brain_or_object, endpoint)
info.update(url_info)
# include the parent url info
parent = get_parent_info(brain_or_object)
info.update(parent)
# add the complete data of the object if requested
# -> requires to wake up the object if it is a catalog brain
if complete:
# ensure we have a full content object
obj = api.get_object(brain_or_object)
# get the compatible adapter
adapter = IInfo(obj)
# update the data set with the complete information
info.update(adapter.to_dict())
# update the data set with the workflow information
# -> only possible if `?complete=yes&workflow=yes`
if req.get_workflow(False):
info.update(get_workflow_info(obj))
# # add sharing data if the user requested it
# # -> only possible if `?complete=yes`
# if req.get_sharing(False):
# sharing = get_sharing_info(obj)
# info.update({"sharing": sharing})
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parent_info(brain_or_object, endpoint=None):
"""Generate url information for the parent object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :returns: URL information mapping :rtype: dict """ |
# special case for the portal object
if is_root(brain_or_object):
return {}
# get the parent object
parent = get_parent(brain_or_object)
portal_type = get_portal_type(parent)
resource = portal_type_to_resource(portal_type)
# fall back if no endpoint specified
if endpoint is None:
endpoint = get_endpoint(parent)
return {
"parent_id": get_id(parent),
"parent_uid": get_uid(parent),
"parent_url": url_for(endpoint, resource=resource, uid=get_uid(parent))
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_children_info(brain_or_object, complete=False):
"""Generate data items of the contained contents :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: info mapping of contained content items :rtype: list """ |
# fetch the contents (if folderish)
children = get_contents(brain_or_object)
def extract_data(brain_or_object):
return get_info(brain_or_object, complete=complete)
items = map(extract_data, children)
return {
"children_count": len(items),
"children": items
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_file_info(obj, fieldname, default=None):
"""Extract file data from a file field :param obj: Content object :type obj: ATContentType/DexterityContentType :param fieldname: Schema name of the field :type fieldname: str/unicode :returns: File data mapping :rtype: dict """ |
# extract the file field from the object if omitted
field = get_field(obj, fieldname)
# get the value with the fieldmanager
fm = IFieldManager(field)
# return None if we have no file data
if fm.get_size(obj) == 0:
return None
out = {
"content_type": fm.get_content_type(obj),
"filename": fm.get_filename(obj),
"download": fm.get_download_url(obj),
}
# only return file data only if requested (?filedata=yes)
if req.get_filedata(False):
data = fm.get_data(obj)
out["data"] = data.encode("base64")
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_workflow_info(brain_or_object, endpoint=None):
"""Generate workflow information of the assigned workflows :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :returns: Workflows info :rtype: dict """ |
# ensure we have a full content object
obj = get_object(brain_or_object)
# get the portal workflow tool
wf_tool = get_tool("portal_workflow")
# the assigned workflows of this object
workflows = wf_tool.getWorkflowsFor(obj)
# no worfkflows assigned -> return
if not workflows:
return []
def to_transition_info(transition):
""" return the transition information
"""
return {
"title": transition["title"],
"value": transition["id"],
"display": transition["description"],
"url": transition["url"],
}
def to_review_history_info(review_history):
""" return the transition information
"""
converted = DT2dt(review_history.get('time')).\
strftime("%Y-%m-%d %H:%M:%S")
review_history['time'] = converted
return review_history
out = []
for workflow in workflows:
# get the status info of the current state (dictionary)
info = wf_tool.getStatusOf(workflow.getId(), obj)
if info is None:
continue
# get the current review_status
review_state = info.get("review_state", None)
inactive_state = info.get("inactive_state", None)
cancellation_state = info.get("cancellation_state", None)
worksheetanalysis_review_state = info.get("worksheetanalysis_review_state", None)
state = review_state or \
inactive_state or \
cancellation_state or \
worksheetanalysis_review_state
if state is None:
logger.warn("No state variable found for {} -> {}".format(
repr(obj), info))
continue
# get the wf status object
status_info = workflow.states[state]
# get the title of the current status
status = status_info.title
# get the transition informations
transitions = map(to_transition_info, wf_tool.getTransitionsFor(obj))
# get the review history
rh = map(to_review_history_info,
workflow.getInfoFor(obj, 'review_history', ''))
out.append({
"workflow": workflow.getId(),
"status": status,
"review_state": state,
"transitions": transitions,
"review_history": rh,
})
return {"workflow_info": out} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(**kw):
"""Search the catalog adapter :returns: Catalog search results :rtype: iterable """ |
portal = get_portal()
catalog = ICatalog(portal)
catalog_query = ICatalogQuery(catalog)
query = catalog_query.make_query(**kw)
return catalog(query) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_search_results(portal_type=None, uid=None, **kw):
"""Search the catalog and return the results :returns: Catalog search results :rtype: iterable """ |
# If we have an UID, return the object immediately
if uid is not None:
logger.info("UID '%s' found, returning the object immediately" % uid)
return u.to_list(get_object_by_uid(uid))
# allow to search search for the Plone Site with portal_type
include_portal = False
if u.to_string(portal_type) == "Plone Site":
include_portal = True
# The request may contain a list of portal_types, e.g.
# `?portal_type=Document&portal_type=Plone Site`
if "Plone Site" in u.to_list(req.get("portal_type")):
include_portal = True
# Build and execute a catalog query
results = search(portal_type=portal_type, uid=uid, **kw)
if include_portal:
results = list(results) + u.to_list(get_portal())
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_brain(brain_or_object):
"""Return a ZCatalog brain for the object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: True if the object is a catalog brain :rtype: bool """ |
if is_brain(brain_or_object):
return brain_or_object
if is_root(brain_or_object):
return brain_or_object
# fetch the brain by UID
uid = get_uid(brain_or_object)
uc = get_tool("uid_catalog")
results = uc({"UID": uid}) or search(query={'UID': uid})
if len(results) == 0:
return None
if len(results) > 1:
fail(500, "More than one object with UID={} found in portal_catalog".format(uid))
return results[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field(brain_or_object, name, default=None):
"""Return the named field """ |
fields = get_fields(brain_or_object)
return fields.get(name, default) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_behaviors(brain_or_object):
"""Iterate over all behaviors that are assigned to the object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Behaviors :rtype: list """ |
obj = get_object(brain_or_object)
if not is_dexterity_content(obj):
fail(400, "Only Dexterity contents can have assigned behaviors")
assignable = IBehaviorAssignable(obj, None)
if not assignable:
return {}
out = {}
for behavior in assignable.enumerateBehaviors():
for name, field in getFields(behavior.interface).items():
out[name] = field
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_path(path):
"""Checks if the passed in path is a valid Path within the portal :param path: The path to check :type uid: string :return: True if the path is a valid path within the portal :rtype: bool """ |
if not isinstance(path, basestring):
return False
portal_path = get_path(get_portal())
if not path.startswith(portal_path):
return False
obj = get_object_by_path(path)
if obj is None:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_json_value(obj, fieldname, value=_marker, default=None):
"""JSON save value encoding :param obj: Content object :type obj: ATContentType/DexterityContentType :param fieldname: Schema name of the field :type fieldname: str/unicode :param value: The field value :type value: depends on the field type :returns: JSON encoded field value :rtype: field dependent """ |
# This function bridges the value of the field to a probably more complex
# JSON structure to return to the client.
# extract the value from the object if omitted
if value is _marker:
value = IDataManager(obj).json_data(fieldname)
# convert objects
if isinstance(value, ImplicitAcquisitionWrapper):
return get_url_info(value)
# check if the value is callable
if callable(value):
value = value()
# convert dates
if is_date(value):
return to_iso_date(value)
# check if the value is JSON serializable
if not is_json_serializable(value):
logger.warn("Output {} is not JSON serializable".format(repr(value)))
return default
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_date(thing):
"""Checks if the given thing represents a date :param thing: The object to check if it is a date :type thing: arbitrary object :returns: True if we have a date object :rtype: bool """ |
# known date types
date_types = (datetime.datetime,
datetime.date,
DateTime)
return isinstance(thing, date_types) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_iso_date(date, default=None):
"""ISO representation for the date object :param date: A date object :type field: datetime/DateTime :returns: The ISO format of the date :rtype: str """ |
# not a date
if not is_date(date):
return default
# handle Zope DateTime objects
if isinstance(date, (DateTime)):
return date.ISO8601()
# handle python datetime objects
return date.isoformat() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_contents(brain_or_object):
"""Lookup folder contents for this object. :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: List of contained contents :rtype: list/Products.ZCatalog.Lazy.LazyMap """ |
# Nothing to do if the object is contentish
if not is_folderish(brain_or_object):
return []
# Returning objects (not brains) to make sure we do not miss any child.
# It may happen when children belong to different catalogs and not
# found on 'portal_catalog'.
ret = filter(lambda obj: api.is_object(obj),
api.get_object(brain_or_object).objectValues())
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_resource_mapping():
"""Map resources used in the routes to portal types :returns: Mapping of resource->portal_type :rtype: dict """ |
portal_types = get_portal_types()
resources = map(portal_type_to_resource, portal_types)
return dict(zip(resources, portal_types)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resource_to_portal_type(resource):
"""Converts a resource to a portal type :param resource: Resource name as it is used in the content route :type name: string :returns: Portal type name :rtype: string """ |
if resource is None:
return None
resource_mapping = get_resource_mapping()
portal_type = resource_mapping.get(resource.lower())
if portal_type is None:
logger.warn("Could not map the resource '{}' "
"to any known portal type".format(resource))
return portal_type |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_for(portal_type):
"""Returns the single holding container object of this content type :param portal_type: The portal type requested :type portal_type: string :returns: Folderish container where the portal type can be created :rtype: AT content object """ |
container_paths = config.CONTAINER_PATHS_FOR_PORTAL_TYPES
container_path = container_paths.get(portal_type)
if container_path is None:
return None
portal_path = get_path(get_portal())
return get_object_by_path("/".join([portal_path, container_path])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_endpoint(brain_or_object, default=DEFAULT_ENDPOINT):
"""Calculate the endpoint for this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Endpoint for this object :rtype: string """ |
portal_type = get_portal_type(brain_or_object)
resource = portal_type_to_resource(portal_type)
# Try to get the right namespaced endpoint
endpoints = router.DefaultRouter.view_functions.keys()
if resource in endpoints:
return resource # exact match
endpoint_candidates = filter(lambda e: e.endswith(resource), endpoints)
if len(endpoint_candidates) == 1:
# only return the namespaced endpoint, if we have an exact match
return endpoint_candidates[0]
return default |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_object_by_record(record):
"""Find an object by a given record Inspects request the record to locate an object :param record: A dictionary representation of an object :type record: dict :returns: Found Object or None :rtype: object """ |
# nothing to do here
if not record:
return None
if record.get("uid"):
return get_object_by_uid(record["uid"])
if record.get("path"):
return get_object_by_path(record["path"])
if record.get("parent_path") and record.get("id"):
path = "/".join([record["parent_path"], record["id"]])
return get_object_by_path(path)
logger.warn("get_object_by_record::No object found! record='%r'" % record)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_object_by_path(path):
"""Find an object by a given physical path :param path: The physical path of the object to find :type path: string :returns: Found Object or None :rtype: object """ |
# nothing to do here
if not isinstance(path, basestring):
return None
# path must be a string
path = str(path)
portal = get_portal()
portal_path = get_path(portal)
if path == portal_path:
return portal
if path.startswith(portal_path):
segments = path.split("/")
path = "/".join(segments[2:])
try:
return portal.restrictedTraverse(str(path))
except (KeyError, AttributeError):
fail(404, "No object could be found at {}".format(str(path))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_member_ids():
"""Return all member ids of the portal. """ |
pm = get_tool("portal_membership")
member_ids = pm.listMemberIds()
# How can it be possible to get member ids with None?
return filter(lambda x: x, member_ids) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_objects(uid=None):
"""Find the object by its UID 1. get the object from the given uid 2. fetch objects specified in the request parameters 3. fetch objects located in the request body :param uid: The UID of the object to find :type uid: string :returns: List of found objects :rtype: list """ |
# The objects to cut
objects = []
# get the object by the given uid or try to find it by the request
# parameters
obj = get_object_by_uid(uid) or get_object_by_request()
if obj:
objects.append(obj)
else:
# no uid -> go through the record items
records = req.get_request_data()
for record in records:
# try to get the object by the given record
obj = get_object_by_record(record)
# no object found for this record
if obj is None:
continue
objects.append(obj)
return objects |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_target_container(portal_type, record):
"""Locates a target container for the given portal_type and record :param record: The dictionary representation of a content object :type record: dict :returns: folder which contains the object :rtype: object """ |
portal_type = portal_type or record.get("portal_type")
container = get_container_for(portal_type)
if container:
return container
parent_uid = record.pop("parent_uid", None)
parent_path = record.pop("parent_path", None)
target = None
# Try to find the target object
if parent_uid:
target = get_object_by_uid(parent_uid)
elif parent_path:
target = get_object_by_path(parent_path)
else:
fail(404, "No target UID/PATH information found")
if not target:
fail(404, "No target container found")
return target |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_object(container, portal_type, **data):
"""Creates an object slug :returns: The new created content object :rtype: object """ |
if "id" in data:
# always omit the id as senaite LIMS generates a proper one
id = data.pop("id")
logger.warn("Passed in ID '{}' omitted! Senaite LIMS "
"generates a proper ID for you" .format(id))
try:
# Special case for ARs
# => return immediately w/o update
if portal_type == "AnalysisRequest":
obj = create_analysisrequest(container, **data)
# Omit values which are already set through the helper
data = u.omit(data, "SampleType", "Analyses")
# Set the container as the client, as the AR lives in it
data["Client"] = container
# Standard content creation
else:
# we want just a minimun viable object and set the data later
obj = api.create(container, portal_type)
# obj = api.create(container, portal_type, **data)
except Unauthorized:
fail(401, "You are not allowed to create this content")
# Update the object with the given data, but omit the id
try:
update_object_with_data(obj, data)
except APIError:
# Failure in creation process, delete the invalid object
container.manage_delObjects(obj.id)
# reraise the error
raise
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_analysisrequest(container, **data):
"""Create a minimun viable AnalysisRequest :param container: A single folderish catalog brain or content object :type container: ATContentType/DexterityContentType/CatalogBrain """ |
container = get_object(container)
request = req.get_request()
# we need to resolve the SampleType to a full object
sample_type = data.get("SampleType", None)
if sample_type is None:
fail(400, "Please provide a SampleType")
# TODO We should handle the same values as in the DataManager for this field
# (UID, path, objects, dictionaries ...)
results = search(portal_type="SampleType", title=sample_type)
values = {
"Analyses": data.get("Analyses", []),
"SampleType": results and get_object(results[0]) or None,
}
return create_ar(container, request, values) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_object_with_data(content, record):
"""Update the content with the record data :param content: A single folderish catalog brain or content object :type content: ATContentType/DexterityContentType/CatalogBrain :param record: The data to update :type record: dict :returns: The updated content object :rtype: object :raises: APIError, :class:`~plone.jsonapi.routes.exceptions.APIError` """ |
# ensure we have a full content object
content = get_object(content)
# get the proper data manager
dm = IDataManager(content)
if dm is None:
fail(400, "Update for this object is not allowed")
# Iterate through record items
for k, v in record.items():
try:
success = dm.set(k, v, **record)
except Unauthorized:
fail(401, "Not allowed to set the field '%s'" % k)
except ValueError, exc:
fail(400, str(exc))
if not success:
logger.warn("update_object_with_data::skipping key=%r", k)
continue
logger.debug("update_object_with_data::field %r updated", k)
# Validate the entire content object
invalid = validate_object(content, record)
if invalid:
fail(400, u.to_json(invalid))
# do a wf transition
if record.get("transition", None):
t = record.get("transition")
logger.debug(">>> Do Transition '%s' for Object %s", t, content.getId())
do_transition_for(content, t)
# reindex the object
content.reindexObject()
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_object(brain_or_object, data):
"""Validate the entire object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param data: The sharing dictionary as returned from the API :type data: dict :returns: invalidity status :rtype: dict """ |
obj = get_object(brain_or_object)
# Call the validator of AT Content Types
if is_at_content(obj):
return obj.validate(data=data)
return {} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deactivate_object(brain_or_object):
"""Deactivate the given object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Nothing :rtype: None """ |
obj = get_object(brain_or_object)
# we do not want to delete the site root!
if is_root(obj):
fail(401, "Deactivating the Portal is not allowed")
try:
do_transition_for(brain_or_object, "deactivate")
except Unauthorized:
fail(401, "Not allowed to deactivate object '%s'" % obj.getId()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_settings_by_keyword(keyword=None):
"""Get the settings associated to the specified keyword or, if keyword is None, get all the settings. :param keyword: settings to be retrieved :return: dictionary with the settings plus a key to identify from which keyword where retrieved. """ |
settings = []
if keyword is None:
# iterate over all the schemas to return all settings
for key, ischemas in CONTROLPANEL_INTERFACE_MAPPING.items():
settings_from_ifaces = map(get_settings_from_interface, ischemas)
settings_from_key = {k: v for d in settings_from_ifaces for k, v in d.items()}
settings.append({key: settings_from_key,
"api_url": url_for("senaite.jsonapi.v1.settings", key=key)})
return settings
# if keyword has value then get only the settings associated to the key
settings_from_ifaces = map(get_settings_from_interface, CONTROLPANEL_INTERFACE_MAPPING[keyword])
settings_from_key = {k: v for d in settings_from_ifaces for k, v in d.items()}
settings.append({keyword: settings_from_key,
"api_url": url_for("senaite.jsonapi.v1.settings", key=keyword)})
return settings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_settings_from_interface(iface):
"""Get the configuration settings associated to a list of schema interfaces :param iface: The schema interface from which we want to get its fields :return: Dictionary with iface name as key and as value a dictionary with the setting names (keys) linked to that schema and its values. """ |
settings = {}
schema_id = iface.getName()
settings[schema_id] = {}
schema = getAdapter(api.get_portal(), iface)
for setting in getFieldNames(iface):
value = getattr(schema, setting, None)
if is_json_serializable(value):
settings[schema_id][setting] = value
return settings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_columns(self):
"""Assuming the number of rows is constant, work out the best number of columns to use.""" |
self.cols = int(math.ceil(len(self.elements) / float(self.rows))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_rule(self, start_col, start_row, end_col, end_row, width=0.5, color=(0,0,0)):
"""Adds a rule to the grid. The row and column numbers are those on the top left of the corresponding cell in the grid. So if the grid is 10x10, then the right hand edge of the grid will be column 10, and the bottom will be column 10. In other words there is one more rule-row and column than there are cell rows and columns.""" |
self.rules.append(
(start_col, start_row, end_col, end_row, width, color)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compile_dimension_size(self, base_index, array, property, sized_elements):
"""Build one set of col widths or row heights.""" |
sort_index = base_index + 2
sized_elements.sort(key=lambda x: x[sort_index])
for element_data in sized_elements:
start, end = element_data[base_index], element_data[sort_index]
end += start
element, size = element_data[4:6]
# Find the total current size of the set
set_size = sum(array[start:end]) + (end-start-1)*self.margin
# Work out the extra space we need
extra_space_needed = getattr(size, property) - set_size
if extra_space_needed < 0: continue
# Distribute it among the entries
extra_space_each = extra_space_needed / (end-start)
for index in range(start, end):
array[index] += extra_space_each |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_minimum_size(self, data):
"""Finds the minimum size of the grid.""" |
# Gat a list of elements with their sizes, so we don't have to
# recalculate that each time.
sized_elements = [
(col, row, cols, rows, element, element.get_minimum_size(data))
for col, row, cols, rows, element in self.elements
]
# Create the heights and widths for each cell.
self.col_widths = [0] * self.cols
self.row_heights = [0] * self.rows
self._compile_dimension_size(0, self.col_widths, 'x', sized_elements)
self._compile_dimension_size(1, self.row_heights, 'y', sized_elements)
# The final size is the total width and height
om = 2*self.outside_margin
return datatypes.Point(
sum(self.col_widths) + (self.cols-1)*self.margin + om,
sum(self.row_heights) + (self.rows-1)*self.margin + om
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(self, rect, data):
"""Draws the cells in grid.""" |
size = self.get_minimum_size(data)
# Find how much extra space we have.
extra_width = rect.w - size.x
extra_height = rect.h - size.y
# Distribute the extra space into the correct rows and columns.
if self.scaling_col is None or not 0 <= self.scaling_col < self.cols:
width_per_col = extra_width / float(self.cols)
col_widths = [
width + width_per_col
for width in self.col_widths
]
else:
col_widths = self.col_widths[:]
col_widths[self.scaling_col] += extra_width
if self.scaling_row is None or not 0 <= self.scaling_row < self.rows:
height_per_row = extra_height / float(self.rows)
row_heights = [
height + height_per_row
for height in self.row_heights
]
else:
row_heights = self.row_heights[:]
row_heights[self.scaling_row] += extra_height
# Find the (start, end) positions of each row and column.
col_xs = []
last_x = rect.left + self.outside_margin
for width in col_widths:
col_xs.append((last_x, last_x + width))
last_x += width + self.margin
row_ys = []
last_y = rect.top - self.outside_margin
for height in row_heights:
row_ys.append((last_y, last_y - height))
last_y -= height + self.margin
# Now we can loop over the elements and have them rendered.
for col, row, cols, rows, element in self.elements:
x_start = col_xs[col][0]
y_start = row_ys[row][0]
x_end = col_xs[col+cols-1][1]
y_end = row_ys[row+rows-1][1]
element.render(datatypes.Rectangle(
x_start, y_end, x_end-x_start, y_start-y_end
), data)
# And finally we can draw the rules
def _get_value(array, index, sign):
"""Returns the value of the index in the given array, where
the array (like col_xs and row_ys), consists of start-end pairs
of values."""
if index <= 0:
# Special case, it is the start of the first range
return array[0][0]-self.outside_margin*sign
elif index >= len(array):
# Special case, it is the end of the last range
return array[-1][1]+self.outside_margin*sign
else:
# Otherwise it is the blend of a start and end.
return (array[index-1][1] + array[index][0])*0.5
for start_col, start_row, end_col, end_row, width, color in self.rules:
x_start = _get_value(col_xs, start_col, 1)
y_start = _get_value(row_ys, start_row, -1)
x_end = _get_value(col_xs, end_col, 1)
y_end = _get_value(row_ys, end_row, -1)
data['output'].line(
x_start, y_start, x_end, y_end,
stroke=color,
stroke_width=width
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_block(name, data, newline="\n"):
""" First block in a list of one line strings containing reStructuredText data. The result is as a joined string with the given newline, or a line generator if it's None. The BLOCK_START and BLOCK_END delimiters are selected with the given name and aren't included in the result. """ |
lines = itertools.dropwhile(not_eq(BLOCK_START % name), data)
gen = itertools.takewhile(not_eq(BLOCK_END % name), tail(lines))
return gen if newline is None else newline.join(gen) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all_but_blocks(names, data, newline="\n", remove_empty_next=True, remove_comments=True):
""" Multiline string from a list of strings data, removing every block with any of the given names, as well as their delimiters. Removes the empty lines after BLOCK_END when ``remove_empty_next`` is True. Returns a joined string with the given newline, or a line generator if it's None. If desired, this function use ``commentless`` internally to remove the remaining comments. """ |
@allow_implicit_stop
def remove_blocks(name, iterable):
start, end = BLOCK_START % name, BLOCK_END % name
it = iter(iterable)
while True:
line = next(it)
while line != start:
yield line
line = next(it)
it = tail(itertools.dropwhile(not_eq(end), it))
if remove_empty_next:
it = itertools.dropwhile(lambda el: not el.strip(), it)
if isinstance(names, str):
names = [names]
processors = [functools.partial(remove_blocks, name) for name in names]
if remove_comments:
processors.append(commentless)
gen = functools.reduce(lambda result, func: func(result),
processors, data)
return gen if newline is None else newline.join(gen) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commentless(data):
""" Generator that removes from a list of strings the double dot reStructuredText comments and its contents based on indentation, removing trailing empty lines after each comment as well. """ |
it = iter(data)
while True:
line = next(it)
while ":" in line or not line.lstrip().startswith(".."):
yield line
line = next(it)
indent = indent_size(line)
it = itertools.dropwhile(lambda el: indent_size(el) > indent
or not el.strip(), it) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __tdfs(j, k, head, next, post, stack):
""" Depth-first search and postorder of a tree rooted at node j. """ |
top = 0
stack[0] = j
while (top >= 0):
p = stack[top]
i = head[p]
if i == -1:
top -= 1
post[k] = p
k += 1
else:
head[p] = next[i]
top += 1
stack[top] = i
return k |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_order(parent):
""" Post order a forest. """ |
n = len(parent)
k = 0
p = matrix(0,(n,1))
head = matrix(-1,(n,1))
next = matrix(0,(n,1))
stack = matrix(0,(n,1))
for j in range(n-1,-1,-1):
if (parent[j] == j): continue
next[j] = head[parent[j]]
head[parent[j]] = j
for j in range(n):
if (parent[j] != j): continue
k = __tdfs(j, k, head, next, p, stack)
return p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def etree(A):
""" Compute elimination tree from upper triangle of A. """ |
assert isinstance(A,spmatrix), "A must be a sparse matrix"
assert A.size[0] == A.size[1], "A must be a square matrix"
n = A.size[0]
cp,ri,_ = A.CCS
parent = matrix(0,(n,1))
w = matrix(0,(n,1))
for k in range(n):
parent[k] = k
w[k] = -1
for p in range(cp[k],cp[k+1]):
i = ri[p]
while ((not i == -1) and (i < k)):
inext = w[i]
w[i] = k
if inext == -1: parent[i] = k
i = inext;
return parent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __leaf(i, j, first, maxfirst, prevleaf, ancestor):
""" Determine if j is leaf of i'th row subtree. """ |
jleaf = 0
if i<=j or first[j] <= maxfirst[i]: return -1, jleaf
maxfirst[i] = first[j]
jprev = prevleaf[i]
prevleaf[i] = j
if jprev == -1: jleaf = 1
else: jleaf = 2
if jleaf == 1: return i, jleaf
q = jprev
while q != ancestor[q]: q = ancestor[q]
s = jprev
while s != q:
sparent = ancestor[s]
ancestor[s] = q
s = sparent
return q, jleaf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def counts(A, parent, post):
""" Compute column counts. """ |
n = A.size[0]
colcount = matrix(0,(n,1))
ancestor = matrix(range(n),(n,1))
maxfirst = matrix(-1,(n,1))
prevleaf = matrix(-1,(n,1))
first = matrix(-1,(n,1))
for k in range(n):
j = post[k]
if first[j] == -1:
colcount[j] = 1
else:
colcount[j] = 0
while j != -1 and first[j] == -1:
first[j] = k;
j = parent[j]
cp,ri,_ = A.CCS
for k in range(n):
j = post[k]
if parent[j] != j:
colcount[parent[j]] -= 1
for p in range(cp[j],cp[j+1]):
i = ri[p]
if i <= j: continue
q, jleaf = __leaf(i, j, first, maxfirst, prevleaf, ancestor)
if jleaf >= 1: colcount[j] += 1
if jleaf == 2: colcount[q] -= 1
if parent[j] != j: ancestor[j] = parent[j]
for j in range(n):
if parent[j] != j: colcount[parent[j]] += colcount[j]
return colcount |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pothen_sun(par, post, colcount):
""" Find supernodes and supernodal etree. ARGUMENTS par parent array post array with post ordering colcount array with column counts RETURNS snpar supernodal parent structure flag integer vector of length n; if flag[i] < 0, then -flag[i] is the degree of the supernode with repr. vertex i; if flag[i] >= 0, then flag[i] is the repr. vertex to which node i belongs. """ |
n = len(par)
flag = matrix(-1, (n, 1))
snpar = matrix(-1, (n, 1))
snodes = n
ch = {}
for j in post:
if par[j] in ch: ch[par[j]].append(j)
else: ch[par[j]] = [j]
mdeg = colcount[j] - 1
if par[j] != j:
if mdeg == colcount[par[j]] and flag[par[j]] == -1:
# par[j] not assigned to supernode
snodes -= 1
if flag[j] < 0: # j is a repr. vertex
flag[par[j]] = j
flag[j] -= 1
else: # j is not a repr. vertex
flag[par[j]] = flag[j]
flag[flag[j]] -= 1
else:
if flag[j] < 0: snpar[j] = j
else: snpar[flag[j]] = flag[j]
if flag[j] < 0: k = j
else: k = flag[j]
if j in ch:
for i in ch[j]:
if flag[i] < 0: l = i
else: l = flag[i]
if not l == k: snpar[l] = k
repr = matrix([i for i in range(n) if flag[i] < 0])
deg = matrix([-flag[i] for i in range(n) if flag[i] < 0])
# renumber etree with number of supernodes
sn = matrix(-1, (n+1, 1))
for k, r in enumerate(repr): sn[r] = k
snpar = sn[snpar[repr]]
return snpar, flag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def supernodes(par, post, colcount):
""" Find supernodes. ARGUMENTS par parent array post array with post ordering colcount array with column counts RETURNS snode array with supernodes; snode[snptr[k]:snptr[k+1]] contains the indices of supernode k snptr pointer array; snptr[k] is the index of the representative vertex of supernode k in the snode array snpar supernodal parent structure """ |
snpar, flag = pothen_sun(par, post, colcount)
n = len(par)
N = len(snpar)
snode = matrix(0, (n,1))
snptr = matrix(0, (N+1,1))
slist = [[] for i in range(n)]
for i in range(n):
f = flag[i]
if f < 0:
slist[i].append(i)
else:
slist[f].append(i)
k = 0; j = 0
for i,sl in enumerate(slist):
nsl = len(sl)
if nsl > 0:
snode[k:k+nsl] = matrix(sl)
snptr[j+1] = snptr[j] + nsl
k += nsl
j += 1
return snode, snptr, snpar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def amalgamate(colcount, snode, snptr, snpar, snpost, merge_function):
""" Supernodal amalgamation. amalgamate(colcount, snode, snptr, snpar, snpost, merge_function) PURPOSE Iterates over the clique tree in topological order and greedily merges a supernode with its parent if merge_function(|J_{par(k)}|, |J_k|, |N_{par(k)}|, |N_k|) returns True. ARGUMENTS colcount vector with column counts snode vector with supernodes snptr vector with offsets snpar vector with supernodal parent indices snpost vector with supernodal post ordering merge_function function RETURNS colcount vector with amalgamated column counts snode vector with amalgamated supernodes snptr vector with amalgamated offsets snpar vector with amalgamated supernodal parent indices snpost vector with amalgamated supernodal post ordering """ |
N = len(snpost)
ch = {}
for j in snpost:
if snpar[j] in ch: ch[snpar[j]].append(j)
else: ch[snpar[j]] = [j]
snlist = [snode[snptr[k]:snptr[k+1]] for k in range(N)]
snpar_ = +snpar
colcount_ = +colcount
Ns = N
for k in snpost:
if snpar_[k] != k:
colk = colcount_[snlist[k][0]]
colp = colcount_[snlist[snpar_[k]][0]]
nk = len(snlist[k])
np = len(snlist[snpar_[k]])
if merge_function and merge_function(colp,colk,np,nk):
# merge supernode k and snpar[k]
snlist[snpar_[k]] = matrix(sorted(list(snlist[k]) + list(snlist[snpar_[k]])))
snlist[k] = None
colcount_[snlist[snpar_[k]][0]] = colp + nk
Ns -= 1
if k in ch:
for c in ch[k]:
snpar_[c] = snpar_[k]
ch[snpar_[k]] += ch[k]
snpar_[k] = k
L = [i for i,s in enumerate(snlist) if s is not None]
snptr_ = matrix(0,(len(L)+1,1))
snode_ = +snode
for i,l in enumerate(L):
snptr_[i+1] = snptr_[i] + len(snlist[l])
snode_[snptr_[i]:snptr_[i+1]] = snlist[l]
snpar_ = snpar_[L]
for i in range(len(snpar_)):
snpar_[i] = L.index(snpar_[i])
snpost_ = post_order(snpar_)
return colcount_, snode_, snptr_, snpar_, snpost_ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def embed(A, colcount, snode, snptr, snpar, snpost):
""" Compute filled pattern. colptr, rowidx = embed(A, colcount, snode, snptr, snpar, snpost) PURPOSE Computes rowindices and column pointer for representative vertices in supernodes. ARGUMENTS A sparse matrix colcount vector with column counts snode vector with supernodes snptr vector with offsets snpar vector with supernodal parent indices snpost vector with supernodal post ordering RETURNS colptr vector with offsets rowidx vector with rowindices """ |
Alo = tril(A)
cp,ri,_ = Alo.CCS
N = len(snpar)
# colptr for compressed cholesky factor
colptr = matrix(0,(N+1,1))
for k in range(N):
colptr[k+1] = colptr[k] + colcount[snode[snptr[k]]]
rowidx = matrix(-1,(colptr[-1],1))
cnnz = matrix(0,(N,1))
# compute compressed sparse representation
for k in range(N):
p = snptr[k]
Nk = snptr[k+1]-p
nk = cp[snode[p]+1] - cp[snode[p]]
rowidx[colptr[k]:colptr[k]+nk] = ri[cp[snode[p]]:cp[snode[p]+1]]
cnnz[k] = nk
for i in range(1,Nk):
nk = cp[snode[p+i]+1]-cp[snode[p+i]]
cnnz[k] = lmerge(rowidx, ri, colptr[k], cp[snode[p+i]], cnnz[k], nk)
for k in snpost:
p = snptr[k]
Nk = snptr[k+1]-p
if snpar[k] != k:
cnnz[snpar[k]] = lmerge(rowidx,rowidx,colptr[snpar[k]], colptr[k]+Nk,cnnz[snpar[k]], cnnz[k]-Nk)
return colptr, rowidx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relative_idx(colptr, rowidx, snptr, snpar):
""" Compute relative indices of update matrices in frontal matrix of parent. """ |
relptr = matrix(0, (len(snptr),1))
relidx = matrix(-1, (colptr[-1],1))
def lfind(a,b):
i = 0
ret = +a
for k in range(len(a)):
while a[k] != b[i]: i += 1
ret[k] = i
i += 1
return ret
for k in range(len(snpar)):
p = snpar[k]
relptr[k+1] = relptr[k]
if p != -1:
nk = snptr[k+1] - snptr[k]
relptr[k+1] += colptr[k+1] - colptr[k] - nk
relidx[relptr[k]:relptr[k+1]] = lfind(rowidx[colptr[k]+nk:colptr[k+1]], rowidx[colptr[p]:colptr[p+1]])
return relptr, relidx[:relptr[k+1]] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peo(A, p):
""" Checks whether an ordering is a perfect elmimination order. Returns `True` if the permutation :math:`p` is a perfect elimination order for a Cholesky factorization :math:`PAP^T = LL^T`. Only the lower triangular part of :math:`A` is accessed. :param A: :py:class:`spmatrix` :param p: :py:class:`matrix` or :class:`list` of length `A.size[0]` """ |
n = A.size[0]
assert type(A) == spmatrix, "A must be a sparse matrix"
assert A.size[1] == n, "A must be a square matrix"
assert len(p) == n, "length of p must be equal to the order of A"
if isinstance(p, list): p = matrix(p)
As = symmetrize(A)
cp,ri,_ = As.CCS
# compute inverse permutation array
ip = matrix(0,(n,1))
ip[p] = matrix(range(n),(n,1))
# test set inclusion
for k in range(n):
v = p[k] # next vertex to be eliminated
# indices of neighbors that correspond to strictly lower triangular elements in reordered pattern
r = set([rj for rj in ri[cp[v]:cp[v+1]] if ip[rj] > k])
for rj in r:
if not r.issubset(set(ri[cp[rj]:cp[rj+1]])): return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nnz(self):
""" Returns the number of lower-triangular nonzeros. """ |
nnz = 0
for k in range(len(self.snpost)):
nn = self.snptr[k+1]-self.snptr[k]
na = self.relptr[k+1]-self.relptr[k]
nnz += nn*(nn+1)/2 + nn*na
return nnz |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cliques(self, reordered = True):
""" Returns a list of cliques """ |
if reordered:
return [list(self.snrowidx[self.sncolptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)]
else:
return [list(self.__p[self.snrowidx[self.sncolptr[k]:self.sncolptr[k+1]]]) for k in range(self.Nsn)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def separators(self, reordered = True):
""" Returns a list of separator sets """ |
if reordered:
return [list(self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)]
else:
return [list(self.__p[self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]]) for k in range(self.Nsn)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def supernodes(self, reordered = True):
""" Returns a list of supernode sets """ |
if reordered:
return [list(self.snode[self.snptr[k]:self.snptr[k+1]]) for k in range(self.Nsn)]
else:
return [list(self.__p[self.snode[self.snptr[k]:self.snptr[k+1]]]) for k in range(self.Nsn)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diag(self, reordered = True):
""" Returns a vector with the diagonal elements of the matrix. """ |
sncolptr = self.symb.sncolptr
snptr = self.symb.snptr
snode = self.symb.snode
blkptr = self.symb.blkptr
D = matrix(0.0,(self.symb.n,1))
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
w = sncolptr[k+1]-sncolptr[k]
for i in range(nn): D[snode[snptr[k]+i]] = self.blkval[blkptr[k]+i*w+i]
if reordered: return D
else: return D[self.symb.ip] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_deep_link_url(self, data=None, alias=None, type=0, duration=None, identity=None, tags=None, campaign=None, feature=None, channel=None, stage=None, skip_api_call=False):
""" Creates a deep linking url See the URL https://dev.branch.io/references/http_api/#creating-a-deep-linking-url You can also use this method to bulk create deep link by setting "skip_api_call=True" and using the parameters returned by the method as an array and call "create_deep_linking_urls" :return: params or the response """ |
url = "/v1/url"
method = "POST"
params = {}
# Check Params
self._check_param("data", data, params, type=dict)
self._check_param("alias", alias, params, type=(binary_type, text_type))
self._check_param("type", type, params, type=int, lte=2, gte=0)
self._check_param("duration", duration, params, type=int)
self._check_param("identity", identity, params, type=(binary_type, text_type), max_length=127)
self._check_param("tags", tags, params, type=list, sub_type=(binary_type, text_type), sub_max_length=64)
self._check_param("campaign", campaign, params, type=(binary_type, text_type), max_length=128)
self._check_param("feature", feature, params, type=(binary_type, text_type), max_length=128)
self._check_param("channel", channel, params, type=(binary_type, text_type), max_length=128)
self._check_param("stage", stage, params, type=(binary_type, text_type), max_length=128)
if skip_api_call is True:
return params
else:
self._check_param("branch_key", self.branch_key, params, optional=False, type=(binary_type, text_type))
return self.make_api_call(method, url, json_params=params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_deep_linking_urls(self, url_params):
""" Bulk Creates Deep Linking URLs See the URL https://dev.branch.io/references/http_api/#bulk-creating-deep-linking-urls :return: The response """ |
url = "/v1/url/bulk/%s" % self.branch_key
method = "POST"
# Checks params
self._check_param(value=url_params, type=list, sub_type=dict, optional=False)
return self.make_api_call(method, url, json_params=url_params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_enum(lib_prefix, enum_prefix=''):
"""Class decorator for automatically adding enum values. The values are read directly from the :attr:`spotify.lib` CFFI wrapper around libspotify. All values starting with ``lib_prefix`` are added. The ``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can be specified to add a prefix to all the names. """ |
def wrapper(cls):
for attr in dir(lib):
if attr.startswith(lib_prefix):
name = attr.replace(lib_prefix, enum_prefix)
cls.add(name, getattr(lib, attr))
return cls
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_bytes(value):
"""Converts bytes, unicode, and C char arrays to bytes. Unicode strings are encoded to UTF-8. """ |
if isinstance(value, text_type):
return value.encode('utf-8')
elif isinstance(value, ffi.CData):
return ffi.string(value)
elif isinstance(value, binary_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_bytes_or_none(value):
"""Converts C char arrays to bytes and C NULL values to None.""" |
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value)
else:
raise ValueError('Value must be char[] or NULL') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_unicode(value):
"""Converts bytes, unicode, and C char arrays to unicode strings. Bytes and C char arrays are decoded from UTF-8. """ |
if isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
elif isinstance(value, binary_type):
return value.decode('utf-8')
elif isinstance(value, text_type):
return value
else:
raise ValueError('Value must be text, bytes, or char[]') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_unicode_or_none(value):
"""Converts C char arrays to unicode and C NULL values to None. C char arrays are decoded from UTF-8. """ |
if value == ffi.NULL:
return None
elif isinstance(value, ffi.CData):
return ffi.string(value).decode('utf-8')
else:
raise ValueError('Value must be char[] or NULL') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on(self, event, listener, *user_args):
"""Register a ``listener`` to be called on ``event``. The listener will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` last. If the listener function returns :class:`False`, it is removed and will not be called the next time the ``event`` is emitted. """ |
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def off(self, event=None, listener=None):
"""Remove a ``listener`` that was to be called on ``event``. If ``listener`` is :class:`None`, all listeners for the given ``event`` will be removed. If ``event`` is :class:`None`, all listeners for all events on this object will be removed. """ |
if event is None:
events = self._listeners.keys()
else:
events = [event]
for event in events:
if listener is None:
self._listeners[event] = []
else:
self._listeners[event] = [
l for l in self._listeners[event]
if l.callback != listener] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``. The listeners will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` """ |
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def num_listeners(self, event=None):
"""Return the number of listeners for ``event``. Return the total number of listeners for all events on this object if ``event`` is :class:`None`. """ |
if event is not None:
return len(self._listeners[event])
else:
return sum(len(l) for l in self._listeners.values()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call(self, event, *event_args):
"""Call the single registered listener for ``event``. The listener will be called with any extra arguments passed to :meth:`call` first, and then the extra arguments passed to :meth:`on` Raises :exc:`AssertionError` if there is none or multiple listeners for ``event``. Returns the listener's return value on success. """ |
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(cls, name, value):
"""Add a name-value pair to the enumeration.""" |
attr = cls(value)
attr._name = name
setattr(cls, name, attr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_minimum_size(self, data):
""" Minimum height is the total height + margins, minimum width is the largest width. """ |
min_width = 0
height = 0
for element in self.elements:
size = element.get_minimum_size(data)
min_width = max(min_width, size.x)
height += size.y
height += (len(self.elements)-1)*self.margin
return datatypes.Point(min_width, height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_minimum_size(self, data):
"""Minimum width is the total width + margins, minimum height is the largest height.""" |
width = 0
min_height = 0
for element in self.elements:
size = element.get_minimum_size(data)
min_height = max(min_height, size.y)
width += size.x
width += (len(self.elements)-1)*self.margin
return datatypes.Point(width, min_height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(self, rect, data):
"""Draws the columns.""" |
num_elements = len(self.elements)
col_width = (rect.w-self.margin*(num_elements-1)) / float(num_elements)
x = rect.x
for element in self.elements:
if element is not None:
element.render(datatypes.Rectangle(
x, rect.y, col_width, rect.h
), data)
x += col_width + self.margin |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_minimum_size(self, data):
"""The minimum height is the number of rows multiplied by the tallest row.""" |
min_width = 0
min_height = 0
for element in self.elements:
size = (
datatypes.Point(0, 0) if element is None
else element.get_minimum_size(data)
)
min_height = max(min_height, size.y)
min_width = max(min_width, size.x)
num_elements = len(self.elements)
height = min_height * num_elements + self.margin * (num_elements-1)
return datatypes.Point(min_width, height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_import(self, name, compilation, rule):
""" Re-implementation of the core Sass import mechanism, which looks for files using the staticfiles storage and staticfiles finders. """ |
original_path = PurePath(name)
search_exts = list(compilation.compiler.dynamic_extensions)
if original_path.suffix and original_path.suffix in search_exts:
basename = original_path.stem
else:
basename = original_path.name
if original_path.is_absolute():
# Remove the beginning slash
search_path = original_path.relative_to('/').parent
elif rule.source_file.origin:
search_path = rule.source_file.origin
if original_path.parent:
search_path = os.path.normpath(str(search_path / original_path.parent))
else:
search_path = original_path.parent
for prefix, suffix in product(('_', ''), search_exts):
filename = PurePath(prefix + basename + suffix)
full_filename, storage = get_file_and_storage(str(search_path / filename))
if full_filename:
with storage.open(full_filename) as f:
return SourceFile.from_file(f, origin=search_path, relpath=filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parser_help_text(help_text):
"""Takes the help text supplied as a doc string and extraxts the description and any param arguments.""" |
if help_text is None:
return None, {}
main_text = ''
params_help = {}
for line in help_text.splitlines():
line = line.strip()
match = re.search(r':\s*param\s*(?P<param>\w+)\s*:(?P<help>.*)$', line)
if match:
params_help[match.group('param')] = match.group('help').strip()
else:
main_text += line + ' '
main_text = main_text.strip()
return main_text, params_help |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_default_type(arg, has_default, default_value, params_help):
"""This function looks at the default value and returns the type that should be supplied to the parser""" |
positional = True
arg_params = {}
arg_name = arg
# Check to see if we have help text for this argument
try:
arg_params['help'] = params_help[arg_name]
except KeyError:
pass
# If we have a default value, then this is not positional
if has_default:
positional = False
# Special case when a base type is supplied
if default_value in (int, float):
positional = True
# For boolean options, change the action
if default_value is True:
arg_params['action'] = 'store_false'
elif default_value is False:
arg_params['action'] = 'store_true'
# Finally, check if the default value is an integer or a float
# and set the arg type on the item
if type(default_value) in (int, float):
arg_params['type'] = type(default_value)
# Update the arg_name
if positional:
if arg_name.startswith('_'):
arg_params['nargs'] = '?'
arg_params['default'] = None
arg_params['metavar'] = arg_name.lstrip('_')
#arg_name = arg_name.lstrip('_')
else:
arg_params['default'] = default_value
if len(arg_name) == 1:
arg_name = '-' + arg_name
else:
arg_name = '--' + arg_name
return arg_name, arg_params |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def function_parser(function, parser):
"""This function parses a function and adds its arguments to the supplied parser""" |
# Store the function pointer on the parser for later use
parser.set_defaults(func=function)
# Get the help text and parse it for params
help_text = inspect.getdoc(function)
main_text, params_help = parser_help_text(help_text)
# Get the function information
args, varargs, keywords, defaults = inspect.getargspec(function)
if args is None:
args = []
if defaults is None:
defaults = []
# If the function is a class method, it will have a self that needs to be removed
if len(args) and args[0] == 'self':
args.pop(0)
# Work out whether the argument has a default by subtracting the length
# of the default args from the number of arguments
num_required_args = len(args) - len(defaults)
for idx, arg in enumerate(args):
if idx < num_required_args:
arg_name, arg_params = calculate_default_type(arg, False, None, params_help)
else:
default_value = defaults[idx - num_required_args]
arg_name, arg_params = calculate_default_type(arg, True, default_value, params_help)
parser.add_argument(arg_name, **arg_params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def urijoin(base, ref, strict=False):
"""Convert a URI reference relative to a base URI to its target URI string. """ |
if isinstance(base, type(ref)):
return urisplit(base).transform(ref, strict).geturi()
elif isinstance(base, bytes):
return urisplit(base.decode()).transform(ref, strict).geturi()
else:
return urisplit(base).transform(ref.decode(), strict).geturi() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command """ |
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.