text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Decorator to ensure the given snake_case method is also written in <END_TASK> <USER_TASK:> Description: def ucamel_method(func): """ Decorator to ensure the given snake_case method is also written in UpperCamelCase in the given namespace. That was mainly written to avoid confusion when using wxPython and its UpperCamelCaseMethods. """
frame_locals = inspect.currentframe().f_back.f_locals frame_locals[snake2ucamel(func.__name__)] = func return func
<SYSTEM_TASK:> Reads a file as a list of strings. <END_TASK> <USER_TASK:> Description: def read_plain_text(fname, encoding="utf-8"): """Reads a file as a list of strings."""
with io.open(fname, encoding=encoding) as f: result = list(f) if result: if result[-1][-1:] == "\n": result.append("\n") else: result[-1] += "\n" return [line[:-1] for line in result] return []
<SYSTEM_TASK:> Maximum cardinality search ordering of a sparse chordal matrix. <END_TASK> <USER_TASK:> Description: def maxcardsearch(A, ve = None): """ Maximum cardinality search ordering of a sparse chordal matrix. Returns the maximum cardinality search ordering of a symmetric chordal matrix :math:`A`. Only the lower triangular part of :math:`A` is accessed. The maximum cardinality search ordering is a perfect elimination ordering in the factorization :math:`PAP^T = LL^T`. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is n-1). :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) """
n = A.size[0] assert A.size[1] == n, "A must be a square matrix" assert type(A) is spmatrix, "A must be a sparse matrix" if ve is None: ve = n-1 else: assert type(ve) is int and 0<=ve<n,\ "ve must be an integer between 0 and A.size[0]-1" As = symmetrize(A) cp,ri,_ = As.CCS # permutation vector p = matrix(0,(n,1)) # weight array w = matrix(0,(n,1)) max_w = 0 S = [list(range(ve))+list(range(ve+1,n))+[ve]] + [[] for i in range(n-1)] for i in range(n-1,-1,-1): while True: if len(S[max_w]) > 0: v = S[max_w].pop() if w[v] >= 0: break else: max_w -= 1 p[i] = v w[v] = -1 # set w[v] = -1 to mark that node v has been numbered # increase weights for all unnumbered neighbors for r in ri[cp[v]:cp[v+1]]: if w[r] >= 0: w[r] += 1 S[w[r]].append(r) # bump r up to S[w[r]] max_w = max(max_w,w[r]) return p
<SYSTEM_TASK:> Decorator that serializes access to all decorated functions. <END_TASK> <USER_TASK:> Description: def serialized(f): """Decorator that serializes access to all decorated functions. The decorator acquires pyspotify's single global lock while calling any wrapped function. It is used to serialize access to: - All calls to functions on :attr:`spotify.lib`. - All code blocks working on pointers returned from functions on :attr:`spotify.lib`. - All code blocks working on other internal data structures in pyspotify. Together this is what makes pyspotify safe to use from multiple threads and enables convenient features like the :class:`~spotify.EventLoop`. Internal function. """
import functools @functools.wraps(f) def wrapper(*args, **kwargs): with _lock: return f(*args, **kwargs) if not hasattr(wrapper, '__wrapped__'): # Workaround for Python < 3.2 wrapper.__wrapped__ = f return wrapper
<SYSTEM_TASK:> Creates one or more managers that wraps the given elements into <END_TASK> <USER_TASK:> Description: def get_pocketmod_pages(elements, page_edge_bottom=True, first_page_vertical=True): """ Creates one or more managers that wraps the given elements into one or more Pocket Mod-style page sets. Each manager in the list that is returned corresponds to one page. This imposer is designed to work with portrait oriented content pages, laid out onto a landscape oriented page. Arguments: ``elements`` The elements to lay out. PocketMod uses sheets with 8 pages on them, but you can pass in fewer elements - additional space will be left blank. The number of pages output is just the ceiling of the number of pages passed in divided by 8. ``page_edge_bottom`` If true the pages should be arranged so that, when folded, the bottom of each page touches the edge of the sheet of paper. This is normal, because the edge of the paper is where a normal printer blank-margin is located, and the bottom edge of a page usually has the largest margin. ``first_page_vertical`` If true then the fold on the first page will be vertical. Each 'page' in the book has either a fold on the outside edge or on one of the two horizontal edges (the top edge if page_edge_bottom is set, the bottom otherwise). The horizontal fold keeps the page together more strongly, so is normally used for the first page. The original Pocket Mod software has the first page with a horizontal fold. The returned page layouts can be given to a PageLM for rendering onto individual pages of output. This method isn't a layout manager in its own right. """
pages = { (False, False):[2,3,4,5,1,8,7,6], (False, True):[4,5,6,7,3,2,1,8], (True, False):[5,4,3,2,6,7,8,1], (True, True):[7,6,5,4,8,1,2,3] }[page_edge_bottom, first_page_vertical] output = [] num_pages = len(elements) for index in range(0, num_pages, 8): sglm = grid.SimpleGridLM(4, 2) for cell_index, cell in enumerate(pages): if index + cell - 1 < num_pages: element = elements[index+cell-1] if (cell_index > 3) != page_edge_bottom: element = transform.RotateLM(2, element) sglm.add_element(element) else: sglm.add_element(None) output.append(sglm) return output
<SYSTEM_TASK:> Close the internal epoll file descriptor if it isn't closed <END_TASK> <USER_TASK:> Description: def close(self): """ Close the internal epoll file descriptor if it isn't closed :raises OSError: If the underlying ``close(2)`` fails. The error message matches those found in the manual page. """
with self._close_lock: epfd = self._epfd if epfd >= 0: self._epfd = -1 close(epfd)
<SYSTEM_TASK:> Create a new epoll object from a given file descriptor <END_TASK> <USER_TASK:> Description: def fromfd(cls, fd): """ Create a new epoll object from a given file descriptor :param fd: A pre-made file descriptor obtained from ``epoll_create(2)`` or ``epoll_create1(2)`` :raises ValueError: If fd is not a valid file descriptor :returns: A new epoll object .. note:: If the passed descriptor is incorrect then various methods will fail and raise OSError with an appropriate message. """
if fd < 0: _err_closed() self = cls.__new__() object.__init__(self) self._epfd = fd return self
<SYSTEM_TASK:> Register a new descriptor <END_TASK> <USER_TASK:> Description: def register(self, fd, eventmask=None): """ Register a new descriptor :param fd: The descriptor to register. :param eventmask: Bit-mask of events that will be monitored. By default EPOLLIN, EPOLLOUT and EPOLLPRI are used. Note that EPOLLHUP is implicit and doesn't need to be provided. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. """
if self._epfd < 0: _err_closed() if eventmask is None: eventmask = EPOLLIN | EPOLLOUT | EPOLLPRI ev = epoll_event() ev.events = eventmask ev.data.fd = fd epoll_ctl(self._epfd, EPOLL_CTL_ADD, fd, byref(ev))
<SYSTEM_TASK:> Unregister a previously registered descriptor <END_TASK> <USER_TASK:> Description: def unregister(self, fd): """ Unregister a previously registered descriptor :param fd: The descriptor to unregister :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. .. note:: For feature parity with Python 3.4, unlike what ``epoll_ctl(2)`` would do, we are silently ignoring ``EBADF`` which is raised if """
if self._epfd < 0: _err_closed() ev = epoll_event() try: epoll_ctl(self._epfd, EPOLL_CTL_DEL, fd, byref(ev)) except OSError as exc: # Allow fd to be closed, matching Python 3.4 if exc.errno != EBADF: raise
<SYSTEM_TASK:> Change the bit-mask of events associated with a previously-registered <END_TASK> <USER_TASK:> Description: def modify(self, fd, eventmask): """ Change the bit-mask of events associated with a previously-registered descriptor. :param fd: The descriptor to modify. :param eventmask: New bit-mask of events that will be monitored. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. """
if self._epfd < 0: _err_closed() ev = epoll_event() ev.events = eventmask ev.data.fd = fd epoll_ctl(self._epfd, EPOLL_CTL_MOD, fd, byref(ev))
<SYSTEM_TASK:> Poll for events <END_TASK> <USER_TASK:> Description: def poll(self, timeout=-1, maxevents=-1): """ Poll for events :param timeout: The amount of seconds to wait for events before giving up. The default value, -1, represents infinity. Note that unlike the underlying ``epoll_wait()`` timeout is a fractional number representing **seconds**. :param maxevents: The maximum number of events to report. The default is a reasonably-sized maximum, identical to the one selected by Python 3.4. :returns: A list of (fd, events) that were reported or an empty list if the timeout elapsed. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_wait(2)`` fails. The error message matches those found in the manual page. """
if self._epfd < 0: _err_closed() if timeout != -1: # 1000 because epoll_wait(2) uses milliseconds timeout = int(timeout * 1000) if maxevents == -1: maxevents = FD_SETSIZE - 1 events = (epoll_event * maxevents)() num_events = epoll_wait( self._epfd, cast(byref(events), POINTER(epoll_event)), maxevents, timeout) return [(events[i].data.fd, events[i].events) for i in range(num_events)]
<SYSTEM_TASK:> Compare the valid information on an xml from given schema. <END_TASK> <USER_TASK:> Description: def validate(self, schema_str, xml_valid): """Compare the valid information on an xml from given schema. :param str schema_str: content string from schema file. :param str xml_valid: content string from xml file. :returns: If it is Valid or Not. :rtype: bool """
# TODO: be able to get doc for error given an xsd. # Changed path to allow have xsd that are imported by others xsd in the # same library, and not call to SAT page each time that is generated # a new XML. with change_path(): path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'templates') os.chdir(path) schema_root = etree.parse(StringIO(schema_str)) schema = etree.XMLSchema(schema_root) try: tree = etree.parse(StringIO(xml_valid.encode('UTF-8'))) schema.assertValid(tree) except etree.DocumentInvalid as ups: self.ups = ups finally: if self.ups: self.valid = False else: self.valid = True return self.valid
<SYSTEM_TASK:> Set document xml just rendered already <END_TASK> <USER_TASK:> Description: def set_xml(self): """Set document xml just rendered already validated against xsd to be signed. :params boolean debug_mode: Either if you want the rendered template to be saved either it is valid or not with the given schema. :returns boolean: Either was valid or not the generated document. """
cached = StringIO() document = u'' try: document = self.template.render(inv=self) except UndefinedError as ups: self.ups = ups # TODO: Here should be called the cleanup 'Just before the validation'. valid = self.validate(self.schema, document) self.document = document if valid: document = etree.XML(document) self.document = etree.tostring(document, pretty_print=True, xml_declaration=True, encoding='utf-8') # TODO: When Document Generated, this this should not fail either. # Caching just when valid then. cached.write(self.document is not None and self.document or u'') cached.seek(0) self.document_path = cached
<SYSTEM_TASK:> Get a single record <END_TASK> <USER_TASK:> Description: def get_record(uid=None): """Get a single record """
obj = None if uid is not None: obj = get_object_by_uid(uid) else: obj = get_object_by_request() if obj is None: fail(404, "No object found") complete = req.get_complete(default=_marker) if complete is _marker: complete = True items = make_items_for([obj], complete=complete) return u.first(items)
<SYSTEM_TASK:> Extract the data from the catalog brain or object <END_TASK> <USER_TASK:> Description: def get_info(brain_or_object, endpoint=None, complete=False): """Extract the data from the catalog brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: Data mapping for the object/catalog brain :rtype: dict """
# also extract the brain data for objects if not is_brain(brain_or_object): brain_or_object = get_brain(brain_or_object) if brain_or_object is None: logger.warn("Couldn't find/fetch brain of {}".format(brain_or_object)) return {} complete = True # When querying uid catalog we have to be sure that we skip the objects # used to relate two or more objects if is_relationship_object(brain_or_object): logger.warn("Skipping relationship object {}".format(repr(brain_or_object))) return {} # extract the data from the initial object with the proper adapter info = IInfo(brain_or_object).to_dict() # update with url info (always included) url_info = get_url_info(brain_or_object, endpoint) info.update(url_info) # include the parent url info parent = get_parent_info(brain_or_object) info.update(parent) # add the complete data of the object if requested # -> requires to wake up the object if it is a catalog brain if complete: # ensure we have a full content object obj = api.get_object(brain_or_object) # get the compatible adapter adapter = IInfo(obj) # update the data set with the complete information info.update(adapter.to_dict()) # update the data set with the workflow information # -> only possible if `?complete=yes&workflow=yes` if req.get_workflow(False): info.update(get_workflow_info(obj)) # # add sharing data if the user requested it # # -> only possible if `?complete=yes` # if req.get_sharing(False): # sharing = get_sharing_info(obj) # info.update({"sharing": sharing}) return info
<SYSTEM_TASK:> Generate url information for the parent object <END_TASK> <USER_TASK:> Description: def get_parent_info(brain_or_object, endpoint=None): """Generate url information for the parent object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :returns: URL information mapping :rtype: dict """
# special case for the portal object if is_root(brain_or_object): return {} # get the parent object parent = get_parent(brain_or_object) portal_type = get_portal_type(parent) resource = portal_type_to_resource(portal_type) # fall back if no endpoint specified if endpoint is None: endpoint = get_endpoint(parent) return { "parent_id": get_id(parent), "parent_uid": get_uid(parent), "parent_url": url_for(endpoint, resource=resource, uid=get_uid(parent)) }
<SYSTEM_TASK:> Generate data items of the contained contents <END_TASK> <USER_TASK:> Description: def get_children_info(brain_or_object, complete=False): """Generate data items of the contained contents :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: info mapping of contained content items :rtype: list """
# fetch the contents (if folderish) children = get_contents(brain_or_object) def extract_data(brain_or_object): return get_info(brain_or_object, complete=complete) items = map(extract_data, children) return { "children_count": len(items), "children": items }
<SYSTEM_TASK:> Extract file data from a file field <END_TASK> <USER_TASK:> Description: def get_file_info(obj, fieldname, default=None): """Extract file data from a file field :param obj: Content object :type obj: ATContentType/DexterityContentType :param fieldname: Schema name of the field :type fieldname: str/unicode :returns: File data mapping :rtype: dict """
# extract the file field from the object if omitted field = get_field(obj, fieldname) # get the value with the fieldmanager fm = IFieldManager(field) # return None if we have no file data if fm.get_size(obj) == 0: return None out = { "content_type": fm.get_content_type(obj), "filename": fm.get_filename(obj), "download": fm.get_download_url(obj), } # only return file data only if requested (?filedata=yes) if req.get_filedata(False): data = fm.get_data(obj) out["data"] = data.encode("base64") return out
<SYSTEM_TASK:> Generate workflow information of the assigned workflows <END_TASK> <USER_TASK:> Description: def get_workflow_info(brain_or_object, endpoint=None): """Generate workflow information of the assigned workflows :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :returns: Workflows info :rtype: dict """
# ensure we have a full content object obj = get_object(brain_or_object) # get the portal workflow tool wf_tool = get_tool("portal_workflow") # the assigned workflows of this object workflows = wf_tool.getWorkflowsFor(obj) # no worfkflows assigned -> return if not workflows: return [] def to_transition_info(transition): """ return the transition information """ return { "title": transition["title"], "value": transition["id"], "display": transition["description"], "url": transition["url"], } def to_review_history_info(review_history): """ return the transition information """ converted = DT2dt(review_history.get('time')).\ strftime("%Y-%m-%d %H:%M:%S") review_history['time'] = converted return review_history out = [] for workflow in workflows: # get the status info of the current state (dictionary) info = wf_tool.getStatusOf(workflow.getId(), obj) if info is None: continue # get the current review_status review_state = info.get("review_state", None) inactive_state = info.get("inactive_state", None) cancellation_state = info.get("cancellation_state", None) worksheetanalysis_review_state = info.get("worksheetanalysis_review_state", None) state = review_state or \ inactive_state or \ cancellation_state or \ worksheetanalysis_review_state if state is None: logger.warn("No state variable found for {} -> {}".format( repr(obj), info)) continue # get the wf status object status_info = workflow.states[state] # get the title of the current status status = status_info.title # get the transition informations transitions = map(to_transition_info, wf_tool.getTransitionsFor(obj)) # get the review history rh = map(to_review_history_info, workflow.getInfoFor(obj, 'review_history', '')) out.append({ "workflow": workflow.getId(), "status": status, "review_state": state, "transitions": transitions, "review_history": rh, }) return {"workflow_info": out}
<SYSTEM_TASK:> Search the catalog and return the results <END_TASK> <USER_TASK:> Description: def get_search_results(portal_type=None, uid=None, **kw): """Search the catalog and return the results :returns: Catalog search results :rtype: iterable """
# If we have an UID, return the object immediately if uid is not None: logger.info("UID '%s' found, returning the object immediately" % uid) return u.to_list(get_object_by_uid(uid)) # allow to search search for the Plone Site with portal_type include_portal = False if u.to_string(portal_type) == "Plone Site": include_portal = True # The request may contain a list of portal_types, e.g. # `?portal_type=Document&portal_type=Plone Site` if "Plone Site" in u.to_list(req.get("portal_type")): include_portal = True # Build and execute a catalog query results = search(portal_type=portal_type, uid=uid, **kw) if include_portal: results = list(results) + u.to_list(get_portal()) return results
<SYSTEM_TASK:> Return the named field <END_TASK> <USER_TASK:> Description: def get_field(brain_or_object, name, default=None): """Return the named field """
fields = get_fields(brain_or_object) return fields.get(name, default)
<SYSTEM_TASK:> Iterate over all behaviors that are assigned to the object <END_TASK> <USER_TASK:> Description: def get_behaviors(brain_or_object): """Iterate over all behaviors that are assigned to the object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Behaviors :rtype: list """
obj = get_object(brain_or_object) if not is_dexterity_content(obj): fail(400, "Only Dexterity contents can have assigned behaviors") assignable = IBehaviorAssignable(obj, None) if not assignable: return {} out = {} for behavior in assignable.enumerateBehaviors(): for name, field in getFields(behavior.interface).items(): out[name] = field return out
<SYSTEM_TASK:> Checks if the passed in path is a valid Path within the portal <END_TASK> <USER_TASK:> Description: def is_path(path): """Checks if the passed in path is a valid Path within the portal :param path: The path to check :type uid: string :return: True if the path is a valid path within the portal :rtype: bool """
if not isinstance(path, basestring): return False portal_path = get_path(get_portal()) if not path.startswith(portal_path): return False obj = get_object_by_path(path) if obj is None: return False return True
<SYSTEM_TASK:> JSON save value encoding <END_TASK> <USER_TASK:> Description: def to_json_value(obj, fieldname, value=_marker, default=None): """JSON save value encoding :param obj: Content object :type obj: ATContentType/DexterityContentType :param fieldname: Schema name of the field :type fieldname: str/unicode :param value: The field value :type value: depends on the field type :returns: JSON encoded field value :rtype: field dependent """
# This function bridges the value of the field to a probably more complex # JSON structure to return to the client. # extract the value from the object if omitted if value is _marker: value = IDataManager(obj).json_data(fieldname) # convert objects if isinstance(value, ImplicitAcquisitionWrapper): return get_url_info(value) # check if the value is callable if callable(value): value = value() # convert dates if is_date(value): return to_iso_date(value) # check if the value is JSON serializable if not is_json_serializable(value): logger.warn("Output {} is not JSON serializable".format(repr(value))) return default return value
<SYSTEM_TASK:> Checks if the given thing represents a date <END_TASK> <USER_TASK:> Description: def is_date(thing): """Checks if the given thing represents a date :param thing: The object to check if it is a date :type thing: arbitrary object :returns: True if we have a date object :rtype: bool """
# known date types date_types = (datetime.datetime, datetime.date, DateTime) return isinstance(thing, date_types)
<SYSTEM_TASK:> Map resources used in the routes to portal types <END_TASK> <USER_TASK:> Description: def get_resource_mapping(): """Map resources used in the routes to portal types :returns: Mapping of resource->portal_type :rtype: dict """
portal_types = get_portal_types() resources = map(portal_type_to_resource, portal_types) return dict(zip(resources, portal_types))
<SYSTEM_TASK:> Converts a resource to a portal type <END_TASK> <USER_TASK:> Description: def resource_to_portal_type(resource): """Converts a resource to a portal type :param resource: Resource name as it is used in the content route :type name: string :returns: Portal type name :rtype: string """
if resource is None: return None resource_mapping = get_resource_mapping() portal_type = resource_mapping.get(resource.lower()) if portal_type is None: logger.warn("Could not map the resource '{}' " "to any known portal type".format(resource)) return portal_type
<SYSTEM_TASK:> Returns the single holding container object of this content type <END_TASK> <USER_TASK:> Description: def get_container_for(portal_type): """Returns the single holding container object of this content type :param portal_type: The portal type requested :type portal_type: string :returns: Folderish container where the portal type can be created :rtype: AT content object """
container_paths = config.CONTAINER_PATHS_FOR_PORTAL_TYPES container_path = container_paths.get(portal_type) if container_path is None: return None portal_path = get_path(get_portal()) return get_object_by_path("/".join([portal_path, container_path]))
<SYSTEM_TASK:> Calculate the endpoint for this object <END_TASK> <USER_TASK:> Description: def get_endpoint(brain_or_object, default=DEFAULT_ENDPOINT): """Calculate the endpoint for this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Endpoint for this object :rtype: string """
portal_type = get_portal_type(brain_or_object) resource = portal_type_to_resource(portal_type) # Try to get the right namespaced endpoint endpoints = router.DefaultRouter.view_functions.keys() if resource in endpoints: return resource # exact match endpoint_candidates = filter(lambda e: e.endswith(resource), endpoints) if len(endpoint_candidates) == 1: # only return the namespaced endpoint, if we have an exact match return endpoint_candidates[0] return default
<SYSTEM_TASK:> Find an object by a given record <END_TASK> <USER_TASK:> Description: def get_object_by_record(record): """Find an object by a given record Inspects request the record to locate an object :param record: A dictionary representation of an object :type record: dict :returns: Found Object or None :rtype: object """
# nothing to do here if not record: return None if record.get("uid"): return get_object_by_uid(record["uid"]) if record.get("path"): return get_object_by_path(record["path"]) if record.get("parent_path") and record.get("id"): path = "/".join([record["parent_path"], record["id"]]) return get_object_by_path(path) logger.warn("get_object_by_record::No object found! record='%r'" % record) return None
<SYSTEM_TASK:> Find an object by a given physical path <END_TASK> <USER_TASK:> Description: def get_object_by_path(path): """Find an object by a given physical path :param path: The physical path of the object to find :type path: string :returns: Found Object or None :rtype: object """
# nothing to do here if not isinstance(path, basestring): return None # path must be a string path = str(path) portal = get_portal() portal_path = get_path(portal) if path == portal_path: return portal if path.startswith(portal_path): segments = path.split("/") path = "/".join(segments[2:]) try: return portal.restrictedTraverse(str(path)) except (KeyError, AttributeError): fail(404, "No object could be found at {}".format(str(path)))
<SYSTEM_TASK:> Return all member ids of the portal. <END_TASK> <USER_TASK:> Description: def get_member_ids(): """Return all member ids of the portal. """
pm = get_tool("portal_membership") member_ids = pm.listMemberIds() # How can it be possible to get member ids with None? return filter(lambda x: x, member_ids)
<SYSTEM_TASK:> Find the object by its UID <END_TASK> <USER_TASK:> Description: def find_objects(uid=None): """Find the object by its UID 1. get the object from the given uid 2. fetch objects specified in the request parameters 3. fetch objects located in the request body :param uid: The UID of the object to find :type uid: string :returns: List of found objects :rtype: list """
# The objects to cut objects = [] # get the object by the given uid or try to find it by the request # parameters obj = get_object_by_uid(uid) or get_object_by_request() if obj: objects.append(obj) else: # no uid -> go through the record items records = req.get_request_data() for record in records: # try to get the object by the given record obj = get_object_by_record(record) # no object found for this record if obj is None: continue objects.append(obj) return objects
<SYSTEM_TASK:> Locates a target container for the given portal_type and record <END_TASK> <USER_TASK:> Description: def find_target_container(portal_type, record): """Locates a target container for the given portal_type and record :param record: The dictionary representation of a content object :type record: dict :returns: folder which contains the object :rtype: object """
portal_type = portal_type or record.get("portal_type") container = get_container_for(portal_type) if container: return container parent_uid = record.pop("parent_uid", None) parent_path = record.pop("parent_path", None) target = None # Try to find the target object if parent_uid: target = get_object_by_uid(parent_uid) elif parent_path: target = get_object_by_path(parent_path) else: fail(404, "No target UID/PATH information found") if not target: fail(404, "No target container found") return target
<SYSTEM_TASK:> Creates an object slug <END_TASK> <USER_TASK:> Description: def create_object(container, portal_type, **data): """Creates an object slug :returns: The new created content object :rtype: object """
if "id" in data: # always omit the id as senaite LIMS generates a proper one id = data.pop("id") logger.warn("Passed in ID '{}' omitted! Senaite LIMS " "generates a proper ID for you" .format(id)) try: # Special case for ARs # => return immediately w/o update if portal_type == "AnalysisRequest": obj = create_analysisrequest(container, **data) # Omit values which are already set through the helper data = u.omit(data, "SampleType", "Analyses") # Set the container as the client, as the AR lives in it data["Client"] = container # Standard content creation else: # we want just a minimun viable object and set the data later obj = api.create(container, portal_type) # obj = api.create(container, portal_type, **data) except Unauthorized: fail(401, "You are not allowed to create this content") # Update the object with the given data, but omit the id try: update_object_with_data(obj, data) except APIError: # Failure in creation process, delete the invalid object container.manage_delObjects(obj.id) # reraise the error raise return obj
<SYSTEM_TASK:> Update the content with the record data <END_TASK> <USER_TASK:> Description: def update_object_with_data(content, record): """Update the content with the record data :param content: A single folderish catalog brain or content object :type content: ATContentType/DexterityContentType/CatalogBrain :param record: The data to update :type record: dict :returns: The updated content object :rtype: object :raises: APIError, :class:`~plone.jsonapi.routes.exceptions.APIError` """
# ensure we have a full content object content = get_object(content) # get the proper data manager dm = IDataManager(content) if dm is None: fail(400, "Update for this object is not allowed") # Iterate through record items for k, v in record.items(): try: success = dm.set(k, v, **record) except Unauthorized: fail(401, "Not allowed to set the field '%s'" % k) except ValueError, exc: fail(400, str(exc)) if not success: logger.warn("update_object_with_data::skipping key=%r", k) continue logger.debug("update_object_with_data::field %r updated", k) # Validate the entire content object invalid = validate_object(content, record) if invalid: fail(400, u.to_json(invalid)) # do a wf transition if record.get("transition", None): t = record.get("transition") logger.debug(">>> Do Transition '%s' for Object %s", t, content.getId()) do_transition_for(content, t) # reindex the object content.reindexObject() return content
<SYSTEM_TASK:> Validate the entire object <END_TASK> <USER_TASK:> Description: def validate_object(brain_or_object, data): """Validate the entire object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param data: The sharing dictionary as returned from the API :type data: dict :returns: invalidity status :rtype: dict """
obj = get_object(brain_or_object) # Call the validator of AT Content Types if is_at_content(obj): return obj.validate(data=data) return {}
<SYSTEM_TASK:> Get the settings associated to the specified keyword or, if <END_TASK> <USER_TASK:> Description: def get_settings_by_keyword(keyword=None): """Get the settings associated to the specified keyword or, if keyword is None, get all the settings. :param keyword: settings to be retrieved :return: dictionary with the settings plus a key to identify from which keyword where retrieved. """
settings = [] if keyword is None: # iterate over all the schemas to return all settings for key, ischemas in CONTROLPANEL_INTERFACE_MAPPING.items(): settings_from_ifaces = map(get_settings_from_interface, ischemas) settings_from_key = {k: v for d in settings_from_ifaces for k, v in d.items()} settings.append({key: settings_from_key, "api_url": url_for("senaite.jsonapi.v1.settings", key=key)}) return settings # if keyword has value then get only the settings associated to the key settings_from_ifaces = map(get_settings_from_interface, CONTROLPANEL_INTERFACE_MAPPING[keyword]) settings_from_key = {k: v for d in settings_from_ifaces for k, v in d.items()} settings.append({keyword: settings_from_key, "api_url": url_for("senaite.jsonapi.v1.settings", key=keyword)}) return settings
<SYSTEM_TASK:> Get the configuration settings associated to a list of schema <END_TASK> <USER_TASK:> Description: def get_settings_from_interface(iface): """Get the configuration settings associated to a list of schema interfaces :param iface: The schema interface from which we want to get its fields :return: Dictionary with iface name as key and as value a dictionary with the setting names (keys) linked to that schema and its values. """
settings = {} schema_id = iface.getName() settings[schema_id] = {} schema = getAdapter(api.get_portal(), iface) for setting in getFieldNames(iface): value = getattr(schema, setting, None) if is_json_serializable(value): settings[schema_id][setting] = value return settings
<SYSTEM_TASK:> Assuming the number of rows is constant, work out the best <END_TASK> <USER_TASK:> Description: def calculate_columns(self): """Assuming the number of rows is constant, work out the best number of columns to use."""
self.cols = int(math.ceil(len(self.elements) / float(self.rows)))
<SYSTEM_TASK:> Adds a rule to the grid. The row and column numbers are <END_TASK> <USER_TASK:> Description: def add_rule(self, start_col, start_row, end_col, end_row, width=0.5, color=(0,0,0)): """Adds a rule to the grid. The row and column numbers are those on the top left of the corresponding cell in the grid. So if the grid is 10x10, then the right hand edge of the grid will be column 10, and the bottom will be column 10. In other words there is one more rule-row and column than there are cell rows and columns."""
self.rules.append( (start_col, start_row, end_col, end_row, width, color) )
<SYSTEM_TASK:> Build one set of col widths or row heights. <END_TASK> <USER_TASK:> Description: def _compile_dimension_size(self, base_index, array, property, sized_elements): """Build one set of col widths or row heights."""
sort_index = base_index + 2 sized_elements.sort(key=lambda x: x[sort_index]) for element_data in sized_elements: start, end = element_data[base_index], element_data[sort_index] end += start element, size = element_data[4:6] # Find the total current size of the set set_size = sum(array[start:end]) + (end-start-1)*self.margin # Work out the extra space we need extra_space_needed = getattr(size, property) - set_size if extra_space_needed < 0: continue # Distribute it among the entries extra_space_each = extra_space_needed / (end-start) for index in range(start, end): array[index] += extra_space_each
<SYSTEM_TASK:> Finds the minimum size of the grid. <END_TASK> <USER_TASK:> Description: def get_minimum_size(self, data): """Finds the minimum size of the grid."""
# Gat a list of elements with their sizes, so we don't have to # recalculate that each time. sized_elements = [ (col, row, cols, rows, element, element.get_minimum_size(data)) for col, row, cols, rows, element in self.elements ] # Create the heights and widths for each cell. self.col_widths = [0] * self.cols self.row_heights = [0] * self.rows self._compile_dimension_size(0, self.col_widths, 'x', sized_elements) self._compile_dimension_size(1, self.row_heights, 'y', sized_elements) # The final size is the total width and height om = 2*self.outside_margin return datatypes.Point( sum(self.col_widths) + (self.cols-1)*self.margin + om, sum(self.row_heights) + (self.rows-1)*self.margin + om )
<SYSTEM_TASK:> Draws the cells in grid. <END_TASK> <USER_TASK:> Description: def render(self, rect, data): """Draws the cells in grid."""
size = self.get_minimum_size(data) # Find how much extra space we have. extra_width = rect.w - size.x extra_height = rect.h - size.y # Distribute the extra space into the correct rows and columns. if self.scaling_col is None or not 0 <= self.scaling_col < self.cols: width_per_col = extra_width / float(self.cols) col_widths = [ width + width_per_col for width in self.col_widths ] else: col_widths = self.col_widths[:] col_widths[self.scaling_col] += extra_width if self.scaling_row is None or not 0 <= self.scaling_row < self.rows: height_per_row = extra_height / float(self.rows) row_heights = [ height + height_per_row for height in self.row_heights ] else: row_heights = self.row_heights[:] row_heights[self.scaling_row] += extra_height # Find the (start, end) positions of each row and column. col_xs = [] last_x = rect.left + self.outside_margin for width in col_widths: col_xs.append((last_x, last_x + width)) last_x += width + self.margin row_ys = [] last_y = rect.top - self.outside_margin for height in row_heights: row_ys.append((last_y, last_y - height)) last_y -= height + self.margin # Now we can loop over the elements and have them rendered. for col, row, cols, rows, element in self.elements: x_start = col_xs[col][0] y_start = row_ys[row][0] x_end = col_xs[col+cols-1][1] y_end = row_ys[row+rows-1][1] element.render(datatypes.Rectangle( x_start, y_end, x_end-x_start, y_start-y_end ), data) # And finally we can draw the rules def _get_value(array, index, sign): """Returns the value of the index in the given array, where the array (like col_xs and row_ys), consists of start-end pairs of values.""" if index <= 0: # Special case, it is the start of the first range return array[0][0]-self.outside_margin*sign elif index >= len(array): # Special case, it is the end of the last range return array[-1][1]+self.outside_margin*sign else: # Otherwise it is the blend of a start and end. return (array[index-1][1] + array[index][0])*0.5 for start_col, start_row, end_col, end_row, width, color in self.rules: x_start = _get_value(col_xs, start_col, 1) y_start = _get_value(row_ys, start_row, -1) x_end = _get_value(col_xs, end_col, 1) y_end = _get_value(row_ys, end_row, -1) data['output'].line( x_start, y_start, x_end, y_end, stroke=color, stroke_width=width )
<SYSTEM_TASK:> First block in a list of one line strings containing <END_TASK> <USER_TASK:> Description: def get_block(name, data, newline="\n"): """ First block in a list of one line strings containing reStructuredText data. The result is as a joined string with the given newline, or a line generator if it's None. The BLOCK_START and BLOCK_END delimiters are selected with the given name and aren't included in the result. """
lines = itertools.dropwhile(not_eq(BLOCK_START % name), data) gen = itertools.takewhile(not_eq(BLOCK_END % name), tail(lines)) return gen if newline is None else newline.join(gen)
<SYSTEM_TASK:> Multiline string from a list of strings data, removing every <END_TASK> <USER_TASK:> Description: def all_but_blocks(names, data, newline="\n", remove_empty_next=True, remove_comments=True): """ Multiline string from a list of strings data, removing every block with any of the given names, as well as their delimiters. Removes the empty lines after BLOCK_END when ``remove_empty_next`` is True. Returns a joined string with the given newline, or a line generator if it's None. If desired, this function use ``commentless`` internally to remove the remaining comments. """
@allow_implicit_stop def remove_blocks(name, iterable): start, end = BLOCK_START % name, BLOCK_END % name it = iter(iterable) while True: line = next(it) while line != start: yield line line = next(it) it = tail(itertools.dropwhile(not_eq(end), it)) if remove_empty_next: it = itertools.dropwhile(lambda el: not el.strip(), it) if isinstance(names, str): names = [names] processors = [functools.partial(remove_blocks, name) for name in names] if remove_comments: processors.append(commentless) gen = functools.reduce(lambda result, func: func(result), processors, data) return gen if newline is None else newline.join(gen)
<SYSTEM_TASK:> Generator that removes from a list of strings the double dot <END_TASK> <USER_TASK:> Description: def commentless(data): """ Generator that removes from a list of strings the double dot reStructuredText comments and its contents based on indentation, removing trailing empty lines after each comment as well. """
it = iter(data) while True: line = next(it) while ":" in line or not line.lstrip().startswith(".."): yield line line = next(it) indent = indent_size(line) it = itertools.dropwhile(lambda el: indent_size(el) > indent or not el.strip(), it)
<SYSTEM_TASK:> Depth-first search and postorder of a tree rooted at node j. <END_TASK> <USER_TASK:> Description: def __tdfs(j, k, head, next, post, stack): """ Depth-first search and postorder of a tree rooted at node j. """
top = 0 stack[0] = j while (top >= 0): p = stack[top] i = head[p] if i == -1: top -= 1 post[k] = p k += 1 else: head[p] = next[i] top += 1 stack[top] = i return k
<SYSTEM_TASK:> Compute elimination tree from upper triangle of A. <END_TASK> <USER_TASK:> Description: def etree(A): """ Compute elimination tree from upper triangle of A. """
assert isinstance(A,spmatrix), "A must be a sparse matrix" assert A.size[0] == A.size[1], "A must be a square matrix" n = A.size[0] cp,ri,_ = A.CCS parent = matrix(0,(n,1)) w = matrix(0,(n,1)) for k in range(n): parent[k] = k w[k] = -1 for p in range(cp[k],cp[k+1]): i = ri[p] while ((not i == -1) and (i < k)): inext = w[i] w[i] = k if inext == -1: parent[i] = k i = inext; return parent
<SYSTEM_TASK:> Determine if j is leaf of i'th row subtree. <END_TASK> <USER_TASK:> Description: def __leaf(i, j, first, maxfirst, prevleaf, ancestor): """ Determine if j is leaf of i'th row subtree. """
jleaf = 0 if i<=j or first[j] <= maxfirst[i]: return -1, jleaf maxfirst[i] = first[j] jprev = prevleaf[i] prevleaf[i] = j if jprev == -1: jleaf = 1 else: jleaf = 2 if jleaf == 1: return i, jleaf q = jprev while q != ancestor[q]: q = ancestor[q] s = jprev while s != q: sparent = ancestor[s] ancestor[s] = q s = sparent return q, jleaf
<SYSTEM_TASK:> Compute column counts. <END_TASK> <USER_TASK:> Description: def counts(A, parent, post): """ Compute column counts. """
n = A.size[0] colcount = matrix(0,(n,1)) ancestor = matrix(range(n),(n,1)) maxfirst = matrix(-1,(n,1)) prevleaf = matrix(-1,(n,1)) first = matrix(-1,(n,1)) for k in range(n): j = post[k] if first[j] == -1: colcount[j] = 1 else: colcount[j] = 0 while j != -1 and first[j] == -1: first[j] = k; j = parent[j] cp,ri,_ = A.CCS for k in range(n): j = post[k] if parent[j] != j: colcount[parent[j]] -= 1 for p in range(cp[j],cp[j+1]): i = ri[p] if i <= j: continue q, jleaf = __leaf(i, j, first, maxfirst, prevleaf, ancestor) if jleaf >= 1: colcount[j] += 1 if jleaf == 2: colcount[q] -= 1 if parent[j] != j: ancestor[j] = parent[j] for j in range(n): if parent[j] != j: colcount[parent[j]] += colcount[j] return colcount
<SYSTEM_TASK:> Find supernodes and supernodal etree. <END_TASK> <USER_TASK:> Description: def pothen_sun(par, post, colcount): """ Find supernodes and supernodal etree. ARGUMENTS par parent array post array with post ordering colcount array with column counts RETURNS snpar supernodal parent structure flag integer vector of length n; if flag[i] < 0, then -flag[i] is the degree of the supernode with repr. vertex i; if flag[i] >= 0, then flag[i] is the repr. vertex to which node i belongs. """
n = len(par) flag = matrix(-1, (n, 1)) snpar = matrix(-1, (n, 1)) snodes = n ch = {} for j in post: if par[j] in ch: ch[par[j]].append(j) else: ch[par[j]] = [j] mdeg = colcount[j] - 1 if par[j] != j: if mdeg == colcount[par[j]] and flag[par[j]] == -1: # par[j] not assigned to supernode snodes -= 1 if flag[j] < 0: # j is a repr. vertex flag[par[j]] = j flag[j] -= 1 else: # j is not a repr. vertex flag[par[j]] = flag[j] flag[flag[j]] -= 1 else: if flag[j] < 0: snpar[j] = j else: snpar[flag[j]] = flag[j] if flag[j] < 0: k = j else: k = flag[j] if j in ch: for i in ch[j]: if flag[i] < 0: l = i else: l = flag[i] if not l == k: snpar[l] = k repr = matrix([i for i in range(n) if flag[i] < 0]) deg = matrix([-flag[i] for i in range(n) if flag[i] < 0]) # renumber etree with number of supernodes sn = matrix(-1, (n+1, 1)) for k, r in enumerate(repr): sn[r] = k snpar = sn[snpar[repr]] return snpar, flag
<SYSTEM_TASK:> Find supernodes. <END_TASK> <USER_TASK:> Description: def supernodes(par, post, colcount): """ Find supernodes. ARGUMENTS par parent array post array with post ordering colcount array with column counts RETURNS snode array with supernodes; snode[snptr[k]:snptr[k+1]] contains the indices of supernode k snptr pointer array; snptr[k] is the index of the representative vertex of supernode k in the snode array snpar supernodal parent structure """
snpar, flag = pothen_sun(par, post, colcount) n = len(par) N = len(snpar) snode = matrix(0, (n,1)) snptr = matrix(0, (N+1,1)) slist = [[] for i in range(n)] for i in range(n): f = flag[i] if f < 0: slist[i].append(i) else: slist[f].append(i) k = 0; j = 0 for i,sl in enumerate(slist): nsl = len(sl) if nsl > 0: snode[k:k+nsl] = matrix(sl) snptr[j+1] = snptr[j] + nsl k += nsl j += 1 return snode, snptr, snpar
<SYSTEM_TASK:> Supernodal amalgamation. <END_TASK> <USER_TASK:> Description: def amalgamate(colcount, snode, snptr, snpar, snpost, merge_function): """ Supernodal amalgamation. colcount, snode, snptr, snpar, snpost = ... amalgamate(colcount, snode, snptr, snpar, snpost, merge_function) PURPOSE Iterates over the clique tree in topological order and greedily merges a supernode with its parent if merge_function(|J_{par(k)}|, |J_k|, |N_{par(k)}|, |N_k|) returns True. ARGUMENTS colcount vector with column counts snode vector with supernodes snptr vector with offsets snpar vector with supernodal parent indices snpost vector with supernodal post ordering merge_function function RETURNS colcount vector with amalgamated column counts snode vector with amalgamated supernodes snptr vector with amalgamated offsets snpar vector with amalgamated supernodal parent indices snpost vector with amalgamated supernodal post ordering """
N = len(snpost) ch = {} for j in snpost: if snpar[j] in ch: ch[snpar[j]].append(j) else: ch[snpar[j]] = [j] snlist = [snode[snptr[k]:snptr[k+1]] for k in range(N)] snpar_ = +snpar colcount_ = +colcount Ns = N for k in snpost: if snpar_[k] != k: colk = colcount_[snlist[k][0]] colp = colcount_[snlist[snpar_[k]][0]] nk = len(snlist[k]) np = len(snlist[snpar_[k]]) if merge_function and merge_function(colp,colk,np,nk): # merge supernode k and snpar[k] snlist[snpar_[k]] = matrix(sorted(list(snlist[k]) + list(snlist[snpar_[k]]))) snlist[k] = None colcount_[snlist[snpar_[k]][0]] = colp + nk Ns -= 1 if k in ch: for c in ch[k]: snpar_[c] = snpar_[k] ch[snpar_[k]] += ch[k] snpar_[k] = k L = [i for i,s in enumerate(snlist) if s is not None] snptr_ = matrix(0,(len(L)+1,1)) snode_ = +snode for i,l in enumerate(L): snptr_[i+1] = snptr_[i] + len(snlist[l]) snode_[snptr_[i]:snptr_[i+1]] = snlist[l] snpar_ = snpar_[L] for i in range(len(snpar_)): snpar_[i] = L.index(snpar_[i]) snpost_ = post_order(snpar_) return colcount_, snode_, snptr_, snpar_, snpost_
<SYSTEM_TASK:> Compute filled pattern. <END_TASK> <USER_TASK:> Description: def embed(A, colcount, snode, snptr, snpar, snpost): """ Compute filled pattern. colptr, rowidx = embed(A, colcount, snode, snptr, snpar, snpost) PURPOSE Computes rowindices and column pointer for representative vertices in supernodes. ARGUMENTS A sparse matrix colcount vector with column counts snode vector with supernodes snptr vector with offsets snpar vector with supernodal parent indices snpost vector with supernodal post ordering RETURNS colptr vector with offsets rowidx vector with rowindices """
Alo = tril(A) cp,ri,_ = Alo.CCS N = len(snpar) # colptr for compressed cholesky factor colptr = matrix(0,(N+1,1)) for k in range(N): colptr[k+1] = colptr[k] + colcount[snode[snptr[k]]] rowidx = matrix(-1,(colptr[-1],1)) cnnz = matrix(0,(N,1)) # compute compressed sparse representation for k in range(N): p = snptr[k] Nk = snptr[k+1]-p nk = cp[snode[p]+1] - cp[snode[p]] rowidx[colptr[k]:colptr[k]+nk] = ri[cp[snode[p]]:cp[snode[p]+1]] cnnz[k] = nk for i in range(1,Nk): nk = cp[snode[p+i]+1]-cp[snode[p+i]] cnnz[k] = lmerge(rowidx, ri, colptr[k], cp[snode[p+i]], cnnz[k], nk) for k in snpost: p = snptr[k] Nk = snptr[k+1]-p if snpar[k] != k: cnnz[snpar[k]] = lmerge(rowidx,rowidx,colptr[snpar[k]], colptr[k]+Nk,cnnz[snpar[k]], cnnz[k]-Nk) return colptr, rowidx
<SYSTEM_TASK:> Compute relative indices of update matrices in frontal matrix of parent. <END_TASK> <USER_TASK:> Description: def relative_idx(colptr, rowidx, snptr, snpar): """ Compute relative indices of update matrices in frontal matrix of parent. """
relptr = matrix(0, (len(snptr),1)) relidx = matrix(-1, (colptr[-1],1)) def lfind(a,b): i = 0 ret = +a for k in range(len(a)): while a[k] != b[i]: i += 1 ret[k] = i i += 1 return ret for k in range(len(snpar)): p = snpar[k] relptr[k+1] = relptr[k] if p != -1: nk = snptr[k+1] - snptr[k] relptr[k+1] += colptr[k+1] - colptr[k] - nk relidx[relptr[k]:relptr[k+1]] = lfind(rowidx[colptr[k]+nk:colptr[k+1]], rowidx[colptr[p]:colptr[p+1]]) return relptr, relidx[:relptr[k+1]]
<SYSTEM_TASK:> Checks whether an ordering is a perfect elmimination order. <END_TASK> <USER_TASK:> Description: def peo(A, p): """ Checks whether an ordering is a perfect elmimination order. Returns `True` if the permutation :math:`p` is a perfect elimination order for a Cholesky factorization :math:`PAP^T = LL^T`. Only the lower triangular part of :math:`A` is accessed. :param A: :py:class:`spmatrix` :param p: :py:class:`matrix` or :class:`list` of length `A.size[0]` """
n = A.size[0] assert type(A) == spmatrix, "A must be a sparse matrix" assert A.size[1] == n, "A must be a square matrix" assert len(p) == n, "length of p must be equal to the order of A" if isinstance(p, list): p = matrix(p) As = symmetrize(A) cp,ri,_ = As.CCS # compute inverse permutation array ip = matrix(0,(n,1)) ip[p] = matrix(range(n),(n,1)) # test set inclusion for k in range(n): v = p[k] # next vertex to be eliminated # indices of neighbors that correspond to strictly lower triangular elements in reordered pattern r = set([rj for rj in ri[cp[v]:cp[v+1]] if ip[rj] > k]) for rj in r: if not r.issubset(set(ri[cp[rj]:cp[rj+1]])): return False return True
<SYSTEM_TASK:> Returns the number of lower-triangular nonzeros. <END_TASK> <USER_TASK:> Description: def nnz(self): """ Returns the number of lower-triangular nonzeros. """
nnz = 0 for k in range(len(self.snpost)): nn = self.snptr[k+1]-self.snptr[k] na = self.relptr[k+1]-self.relptr[k] nnz += nn*(nn+1)/2 + nn*na return nnz
<SYSTEM_TASK:> Returns a list of cliques <END_TASK> <USER_TASK:> Description: def cliques(self, reordered = True): """ Returns a list of cliques """
if reordered: return [list(self.snrowidx[self.sncolptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)] else: return [list(self.__p[self.snrowidx[self.sncolptr[k]:self.sncolptr[k+1]]]) for k in range(self.Nsn)]
<SYSTEM_TASK:> Returns a list of separator sets <END_TASK> <USER_TASK:> Description: def separators(self, reordered = True): """ Returns a list of separator sets """
if reordered: return [list(self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)] else: return [list(self.__p[self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]]) for k in range(self.Nsn)]
<SYSTEM_TASK:> Returns a list of supernode sets <END_TASK> <USER_TASK:> Description: def supernodes(self, reordered = True): """ Returns a list of supernode sets """
if reordered: return [list(self.snode[self.snptr[k]:self.snptr[k+1]]) for k in range(self.Nsn)] else: return [list(self.__p[self.snode[self.snptr[k]:self.snptr[k+1]]]) for k in range(self.Nsn)]
<SYSTEM_TASK:> Returns a vector with the diagonal elements of the matrix. <END_TASK> <USER_TASK:> Description: def diag(self, reordered = True): """ Returns a vector with the diagonal elements of the matrix. """
sncolptr = self.symb.sncolptr snptr = self.symb.snptr snode = self.symb.snode blkptr = self.symb.blkptr D = matrix(0.0,(self.symb.n,1)) for k in range(self.symb.Nsn): nn = snptr[k+1]-snptr[k] w = sncolptr[k+1]-sncolptr[k] for i in range(nn): D[snode[snptr[k]+i]] = self.blkval[blkptr[k]+i*w+i] if reordered: return D else: return D[self.symb.ip]
<SYSTEM_TASK:> Creates a deep linking url <END_TASK> <USER_TASK:> Description: def create_deep_link_url(self, data=None, alias=None, type=0, duration=None, identity=None, tags=None, campaign=None, feature=None, channel=None, stage=None, skip_api_call=False): """ Creates a deep linking url See the URL https://dev.branch.io/references/http_api/#creating-a-deep-linking-url You can also use this method to bulk create deep link by setting "skip_api_call=True" and using the parameters returned by the method as an array and call "create_deep_linking_urls" :return: params or the response """
url = "/v1/url" method = "POST" params = {} # Check Params self._check_param("data", data, params, type=dict) self._check_param("alias", alias, params, type=(binary_type, text_type)) self._check_param("type", type, params, type=int, lte=2, gte=0) self._check_param("duration", duration, params, type=int) self._check_param("identity", identity, params, type=(binary_type, text_type), max_length=127) self._check_param("tags", tags, params, type=list, sub_type=(binary_type, text_type), sub_max_length=64) self._check_param("campaign", campaign, params, type=(binary_type, text_type), max_length=128) self._check_param("feature", feature, params, type=(binary_type, text_type), max_length=128) self._check_param("channel", channel, params, type=(binary_type, text_type), max_length=128) self._check_param("stage", stage, params, type=(binary_type, text_type), max_length=128) if skip_api_call is True: return params else: self._check_param("branch_key", self.branch_key, params, optional=False, type=(binary_type, text_type)) return self.make_api_call(method, url, json_params=params)
<SYSTEM_TASK:> Class decorator for automatically adding enum values. <END_TASK> <USER_TASK:> Description: def make_enum(lib_prefix, enum_prefix=''): """Class decorator for automatically adding enum values. The values are read directly from the :attr:`spotify.lib` CFFI wrapper around libspotify. All values starting with ``lib_prefix`` are added. The ``lib_prefix`` is stripped from the name. Optionally, ``enum_prefix`` can be specified to add a prefix to all the names. """
def wrapper(cls): for attr in dir(lib): if attr.startswith(lib_prefix): name = attr.replace(lib_prefix, enum_prefix) cls.add(name, getattr(lib, attr)) return cls return wrapper
<SYSTEM_TASK:> Converts bytes, unicode, and C char arrays to bytes. <END_TASK> <USER_TASK:> Description: def to_bytes(value): """Converts bytes, unicode, and C char arrays to bytes. Unicode strings are encoded to UTF-8. """
if isinstance(value, text_type): return value.encode('utf-8') elif isinstance(value, ffi.CData): return ffi.string(value) elif isinstance(value, binary_type): return value else: raise ValueError('Value must be text, bytes, or char[]')
<SYSTEM_TASK:> Converts bytes, unicode, and C char arrays to unicode strings. <END_TASK> <USER_TASK:> Description: def to_unicode(value): """Converts bytes, unicode, and C char arrays to unicode strings. Bytes and C char arrays are decoded from UTF-8. """
if isinstance(value, ffi.CData): return ffi.string(value).decode('utf-8') elif isinstance(value, binary_type): return value.decode('utf-8') elif isinstance(value, text_type): return value else: raise ValueError('Value must be text, bytes, or char[]')
<SYSTEM_TASK:> Converts C char arrays to unicode and C NULL values to None. <END_TASK> <USER_TASK:> Description: def to_unicode_or_none(value): """Converts C char arrays to unicode and C NULL values to None. C char arrays are decoded from UTF-8. """
if value == ffi.NULL: return None elif isinstance(value, ffi.CData): return ffi.string(value).decode('utf-8') else: raise ValueError('Value must be char[] or NULL')
<SYSTEM_TASK:> Register a ``listener`` to be called on ``event``. <END_TASK> <USER_TASK:> Description: def on(self, event, listener, *user_args): """Register a ``listener`` to be called on ``event``. The listener will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` last. If the listener function returns :class:`False`, it is removed and will not be called the next time the ``event`` is emitted. """
self._listeners[event].append( _Listener(callback=listener, user_args=user_args))
<SYSTEM_TASK:> Remove a ``listener`` that was to be called on ``event``. <END_TASK> <USER_TASK:> Description: def off(self, event=None, listener=None): """Remove a ``listener`` that was to be called on ``event``. If ``listener`` is :class:`None`, all listeners for the given ``event`` will be removed. If ``event`` is :class:`None`, all listeners for all events on this object will be removed. """
if event is None: events = self._listeners.keys() else: events = [event] for event in events: if listener is None: self._listeners[event] = [] else: self._listeners[event] = [ l for l in self._listeners[event] if l.callback != listener]
<SYSTEM_TASK:> Call the registered listeners for ``event``. <END_TASK> <USER_TASK:> Description: def emit(self, event, *event_args): """Call the registered listeners for ``event``. The listeners will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` """
listeners = self._listeners[event][:] for listener in listeners: args = list(event_args) + list(listener.user_args) result = listener.callback(*args) if result is False: self.off(event, listener.callback)
<SYSTEM_TASK:> Return the number of listeners for ``event``. <END_TASK> <USER_TASK:> Description: def num_listeners(self, event=None): """Return the number of listeners for ``event``. Return the total number of listeners for all events on this object if ``event`` is :class:`None`. """
if event is not None: return len(self._listeners[event]) else: return sum(len(l) for l in self._listeners.values())
<SYSTEM_TASK:> Call the single registered listener for ``event``. <END_TASK> <USER_TASK:> Description: def call(self, event, *event_args): """Call the single registered listener for ``event``. The listener will be called with any extra arguments passed to :meth:`call` first, and then the extra arguments passed to :meth:`on` Raises :exc:`AssertionError` if there is none or multiple listeners for ``event``. Returns the listener's return value on success. """
# XXX It would be a lot better for debugging if this error was raised # when registering the second listener instead of when the event is # emitted. assert self.num_listeners(event) == 1, ( 'Expected exactly 1 event listener, found %d listeners' % self.num_listeners(event)) listener = self._listeners[event][0] args = list(event_args) + list(listener.user_args) return listener.callback(*args)
<SYSTEM_TASK:> Add a name-value pair to the enumeration. <END_TASK> <USER_TASK:> Description: def add(cls, name, value): """Add a name-value pair to the enumeration."""
attr = cls(value) attr._name = name setattr(cls, name, attr)
<SYSTEM_TASK:> Minimum height is the total height + margins, minimum width <END_TASK> <USER_TASK:> Description: def get_minimum_size(self, data): """ Minimum height is the total height + margins, minimum width is the largest width. """
min_width = 0 height = 0 for element in self.elements: size = element.get_minimum_size(data) min_width = max(min_width, size.x) height += size.y height += (len(self.elements)-1)*self.margin return datatypes.Point(min_width, height)
<SYSTEM_TASK:> Minimum width is the total width + margins, minimum height <END_TASK> <USER_TASK:> Description: def get_minimum_size(self, data): """Minimum width is the total width + margins, minimum height is the largest height."""
width = 0 min_height = 0 for element in self.elements: size = element.get_minimum_size(data) min_height = max(min_height, size.y) width += size.x width += (len(self.elements)-1)*self.margin return datatypes.Point(width, min_height)
<SYSTEM_TASK:> Draws the columns. <END_TASK> <USER_TASK:> Description: def render(self, rect, data): """Draws the columns."""
num_elements = len(self.elements) col_width = (rect.w-self.margin*(num_elements-1)) / float(num_elements) x = rect.x for element in self.elements: if element is not None: element.render(datatypes.Rectangle( x, rect.y, col_width, rect.h ), data) x += col_width + self.margin
<SYSTEM_TASK:> The minimum height is the number of rows multiplied by the <END_TASK> <USER_TASK:> Description: def get_minimum_size(self, data): """The minimum height is the number of rows multiplied by the tallest row."""
min_width = 0 min_height = 0 for element in self.elements: size = ( datatypes.Point(0, 0) if element is None else element.get_minimum_size(data) ) min_height = max(min_height, size.y) min_width = max(min_width, size.x) num_elements = len(self.elements) height = min_height * num_elements + self.margin * (num_elements-1) return datatypes.Point(min_width, height)
<SYSTEM_TASK:> Re-implementation of the core Sass import mechanism, which looks for <END_TASK> <USER_TASK:> Description: def handle_import(self, name, compilation, rule): """ Re-implementation of the core Sass import mechanism, which looks for files using the staticfiles storage and staticfiles finders. """
original_path = PurePath(name) search_exts = list(compilation.compiler.dynamic_extensions) if original_path.suffix and original_path.suffix in search_exts: basename = original_path.stem else: basename = original_path.name if original_path.is_absolute(): # Remove the beginning slash search_path = original_path.relative_to('/').parent elif rule.source_file.origin: search_path = rule.source_file.origin if original_path.parent: search_path = os.path.normpath(str(search_path / original_path.parent)) else: search_path = original_path.parent for prefix, suffix in product(('_', ''), search_exts): filename = PurePath(prefix + basename + suffix) full_filename, storage = get_file_and_storage(str(search_path / filename)) if full_filename: with storage.open(full_filename) as f: return SourceFile.from_file(f, origin=search_path, relpath=filename)
<SYSTEM_TASK:> Takes the help text supplied as a doc string and extraxts the <END_TASK> <USER_TASK:> Description: def parser_help_text(help_text): """Takes the help text supplied as a doc string and extraxts the description and any param arguments."""
if help_text is None: return None, {} main_text = '' params_help = {} for line in help_text.splitlines(): line = line.strip() match = re.search(r':\s*param\s*(?P<param>\w+)\s*:(?P<help>.*)$', line) if match: params_help[match.group('param')] = match.group('help').strip() else: main_text += line + ' ' main_text = main_text.strip() return main_text, params_help
<SYSTEM_TASK:> This function looks at the default value and returns the type that <END_TASK> <USER_TASK:> Description: def calculate_default_type(arg, has_default, default_value, params_help): """This function looks at the default value and returns the type that should be supplied to the parser"""
positional = True arg_params = {} arg_name = arg # Check to see if we have help text for this argument try: arg_params['help'] = params_help[arg_name] except KeyError: pass # If we have a default value, then this is not positional if has_default: positional = False # Special case when a base type is supplied if default_value in (int, float): positional = True # For boolean options, change the action if default_value is True: arg_params['action'] = 'store_false' elif default_value is False: arg_params['action'] = 'store_true' # Finally, check if the default value is an integer or a float # and set the arg type on the item if type(default_value) in (int, float): arg_params['type'] = type(default_value) # Update the arg_name if positional: if arg_name.startswith('_'): arg_params['nargs'] = '?' arg_params['default'] = None arg_params['metavar'] = arg_name.lstrip('_') #arg_name = arg_name.lstrip('_') else: arg_params['default'] = default_value if len(arg_name) == 1: arg_name = '-' + arg_name else: arg_name = '--' + arg_name return arg_name, arg_params
<SYSTEM_TASK:> This function parses a function and adds its arguments to the supplied parser <END_TASK> <USER_TASK:> Description: def function_parser(function, parser): """This function parses a function and adds its arguments to the supplied parser"""
# Store the function pointer on the parser for later use parser.set_defaults(func=function) # Get the help text and parse it for params help_text = inspect.getdoc(function) main_text, params_help = parser_help_text(help_text) # Get the function information args, varargs, keywords, defaults = inspect.getargspec(function) if args is None: args = [] if defaults is None: defaults = [] # If the function is a class method, it will have a self that needs to be removed if len(args) and args[0] == 'self': args.pop(0) # Work out whether the argument has a default by subtracting the length # of the default args from the number of arguments num_required_args = len(args) - len(defaults) for idx, arg in enumerate(args): if idx < num_required_args: arg_name, arg_params = calculate_default_type(arg, False, None, params_help) else: default_value = defaults[idx - num_required_args] arg_name, arg_params = calculate_default_type(arg, True, default_value, params_help) parser.add_argument(arg_name, **arg_params)
<SYSTEM_TASK:> Convert a URI reference relative to a base URI to its target URI <END_TASK> <USER_TASK:> Description: def urijoin(base, ref, strict=False): """Convert a URI reference relative to a base URI to its target URI string. """
if isinstance(base, type(ref)): return urisplit(base).transform(ref, strict).geturi() elif isinstance(base, bytes): return urisplit(base.decode()).transform(ref, strict).geturi() else: return urisplit(base).transform(ref.decode(), strict).geturi()
<SYSTEM_TASK:> Send a unicode object as reply to the most recently-issued command <END_TASK> <USER_TASK:> Description: def send_response(self, response): """Send a unicode object as reply to the most recently-issued command """
response_bytes = response.encode(config.CODEC) log.debug("About to send reponse: %r", response_bytes) self.socket.send(response_bytes)
<SYSTEM_TASK:> Break a multi word command up into an action and its parameters <END_TASK> <USER_TASK:> Description: def parse_command(self, command): """Break a multi word command up into an action and its parameters """
words = shlex.split(command.lower()) return words[0], words[1:]
<SYSTEM_TASK:> Pass a command along with its params to a suitable handler <END_TASK> <USER_TASK:> Description: def dispatch(self, command): """Pass a command along with its params to a suitable handler If the command is blank, succeed silently If the command has no handler, succeed silently If the handler raises an exception, fail with the exception message """
log.info("Dispatch on %s", command) if not command: return "OK" action, params = self.parse_command(command) log.debug("Action = %s, Params = %s", action, params) try: function = getattr(self, "do_" + action, None) if function: function(*params) return "OK" except KeyboardInterrupt: raise except Exception as exc: log.exception("Problem executing action %s", action) return "ERROR: %s" % exc
<SYSTEM_TASK:> Pass a command directly to the current output processor <END_TASK> <USER_TASK:> Description: def do_output(self, *args): """Pass a command directly to the current output processor """
if args: action, params = args[0], args[1:] log.debug("Pass %s directly to output with %s", action, params) function = getattr(self.output, "do_" + action, None) if function: function(*params)
<SYSTEM_TASK:> Generate a series of interval lengths, in ms, which <END_TASK> <USER_TASK:> Description: def intervals_ms(self, timeout_ms): """Generate a series of interval lengths, in ms, which will add up to the number of ms in timeout_ms. If timeout_ms is None, keep returning intervals forever. """
if timeout_ms is config.FOREVER: while True: yield self.try_length_ms else: whole_intervals, part_interval = divmod(timeout_ms, self.try_length_ms) for _ in range(whole_intervals): yield self.try_length_ms yield part_interval
<SYSTEM_TASK:> Check for socket activity and either return what's <END_TASK> <USER_TASK:> Description: def _receive_with_timeout(self, socket, timeout_s, use_multipart=False): """Check for socket activity and either return what's received on the socket or time out if timeout_s expires without anything on the socket. This is implemented in loops of self.try_length_ms milliseconds to allow Ctrl-C handling to take place. """
if timeout_s is config.FOREVER: timeout_ms = config.FOREVER else: timeout_ms = int(1000 * timeout_s) poller = zmq.Poller() poller.register(socket, zmq.POLLIN) ms_so_far = 0 try: for interval_ms in self.intervals_ms(timeout_ms): sockets = dict(poller.poll(interval_ms)) ms_so_far += interval_ms if socket in sockets: if use_multipart: return socket.recv_multipart() else: return socket.recv() else: raise core.SocketTimedOutError(timeout_s) except KeyboardInterrupt: raise core.SocketInterruptedError(ms_so_far / 1000.0)
<SYSTEM_TASK:> Get boxes and lines using labels as id. <END_TASK> <USER_TASK:> Description: def get_boxes_and_lines(ax, labels): """Get boxes and lines using labels as id."""
labels_u, labels_u_line = get_labels(labels) boxes = ax.findobj(mpl.text.Annotation) lines = ax.findobj(mpl.lines.Line2D) lineid_boxes = [] lineid_lines = [] for box in boxes: l = box.get_label() try: loc = labels_u.index(l) except ValueError: # this box is either one not added by lineidplot or has no label. continue lineid_boxes.append(box) for line in lines: l = line.get_label() try: loc = labels_u_line.index(l) except ValueError: # this line is either one not added by lineidplot or has no label. continue lineid_lines.append(line) return lineid_boxes, lineid_lines
<SYSTEM_TASK:> Color text boxes. <END_TASK> <USER_TASK:> Description: def color_text_boxes(ax, labels, colors, color_arrow=True): """Color text boxes. Instead of this function, one can pass annotate_kwargs and plot_kwargs to plot_line_ids function. """
assert len(labels) == len(colors), \ "Equal no. of colors and lables must be given" boxes = ax.findobj(mpl.text.Annotation) box_labels = lineid_plot.unique_labels(labels) for box in boxes: l = box.get_label() try: loc = box_labels.index(l) except ValueError: continue # No changes for this box box.set_color(colors[loc]) if color_arrow: box.arrow_patch.set_color(colors[loc]) ax.figure.canvas.draw()
<SYSTEM_TASK:> Color lines. <END_TASK> <USER_TASK:> Description: def color_lines(ax, labels, colors): """Color lines. Instead of this function, one can pass annotate_kwargs and plot_kwargs to plot_line_ids function. """
assert len(labels) == len(colors), \ "Equal no. of colors and lables must be given" lines = ax.findobj(mpl.lines.Line2D) line_labels = [i + "_line" for i in lineid_plot.unique_labels(labels)] for line in lines: l = line.get_label() try: loc = line_labels.index(l) except ValueError: continue # No changes for this line line.set_color(colors[loc]) ax.figure.canvas.draw()
<SYSTEM_TASK:> Returns tool specific dic or None if it does not exist for defined tool <END_TASK> <USER_TASK:> Description: def get_tool_definition(self, target): """ Returns tool specific dic or None if it does not exist for defined tool """
if target not in self.targets_mcu_list: logging.debug("Target not found in definitions") return None mcu_record = self.targets.get_mcu_record(target) if self.mcus.get_mcu_record(target) is None else self.mcus.get_mcu_record(target) try: return mcu_record['tool_specific'][self.tool] except KeyError: return None
<SYSTEM_TASK:> Returns True if target is supported by definitions <END_TASK> <USER_TASK:> Description: def is_supported(self, target): """ Returns True if target is supported by definitions """
if target.lower() not in self.targets_mcu_list: logging.debug("Target not found in definitions") return False mcu_record = self.targets.get_mcu_record(target) if self.mcus.get_mcu_record(target) is None else self.mcus.get_mcu_record(target) # Look at tool specific options which define tools supported for target # TODO: we might create a list of what tool requires if self.tool: # tool_specific requested look for it try: for k,v in mcu_record['tool_specific'].items(): if k == self.tool: return True except (TypeError, KeyError) as err: pass return False else: # supports generic part (mcu part) return True
<SYSTEM_TASK:> Returns other names for given real key or alias ``name``. <END_TASK> <USER_TASK:> Description: def aliases_of(self, name): """ Returns other names for given real key or alias ``name``. If given a real key, returns its aliases. If given an alias, returns the real key it points to, plus any other aliases of that real key. (The given alias itself is not included in the return value.) """
names = [] key = name # self.aliases keys are aliases, not realkeys. Easy test to see if we # should flip around to the POV of a realkey when given an alias. if name in self.aliases: key = self.aliases[name] # Ensure the real key shows up in output. names.append(key) # 'key' is now a realkey, whose aliases are all keys whose value is # itself. Filter out the original name given. names.extend([ k for k, v in six.iteritems(self.aliases) if v == key and k != name ]) return names
<SYSTEM_TASK:> Remove an existing fragment component from a URI reference string. <END_TASK> <USER_TASK:> Description: def uridefrag(uristring): """Remove an existing fragment component from a URI reference string. """
if isinstance(uristring, bytes): parts = uristring.partition(b'#') else: parts = uristring.partition(u'#') return DefragResult(parts[0], parts[2] if parts[1] else None)
<SYSTEM_TASK:> Return the recombined version of the original URI as a string. <END_TASK> <USER_TASK:> Description: def geturi(self): """Return the recombined version of the original URI as a string."""
fragment = self.fragment if fragment is None: return self.uri elif isinstance(fragment, bytes): return self.uri + b'#' + fragment else: return self.uri + u'#' + fragment
<SYSTEM_TASK:> Return the decoded fragment identifier, or `default` if the <END_TASK> <USER_TASK:> Description: def getfragment(self, default=None, encoding='utf-8', errors='strict'): """Return the decoded fragment identifier, or `default` if the original URI did not contain a fragment component. """
fragment = self.fragment if fragment is not None: return uridecode(fragment, encoding, errors) else: return default
<SYSTEM_TASK:> Click web element. <END_TASK> <USER_TASK:> Description: def click(self, locator, params=None, timeout=None): """ Click web element. :param locator: locator tuple or WebElement instance :param params: (optional) locator parameters :param timeout: (optional) time to wait for element :return: None """
self._click(locator, params, timeout)
<SYSTEM_TASK:> Alt-click web element. <END_TASK> <USER_TASK:> Description: def alt_click(self, locator, params=None, timeout=None): """ Alt-click web element. :param locator: locator tuple or WebElement instance :param params: (optional) locator parameters :param timeout: (optional) time to wait for element :return: None """
self._click(locator, params, timeout, Keys.ALT)
<SYSTEM_TASK:> Shift-click web element. <END_TASK> <USER_TASK:> Description: def shift_click(self, locator, params=None, timeout=None): """ Shift-click web element. :param locator: locator tuple or WebElement instance :param params: (optional) locator parameters :param timeout: (optional) time to wait for element :return: None """
self._click(locator, params, timeout, Keys.SHIFT)