repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
mitsei/dlkit
dlkit/json_/assessment/mixins.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/mixins.py#L641-L646
def get_responses(self): """Gets list of the latest responses""" response_list = [] for question_map in self._my_map['questions']: response_list.append(self._get_response_from_question_map(question_map)) return ResponseList(response_list)
[ "def", "get_responses", "(", "self", ")", ":", "response_list", "=", "[", "]", "for", "question_map", "in", "self", ".", "_my_map", "[", "'questions'", "]", ":", "response_list", ".", "append", "(", "self", ".", "_get_response_from_question_map", "(", "question_map", ")", ")", "return", "ResponseList", "(", "response_list", ")" ]
Gets list of the latest responses
[ "Gets", "list", "of", "the", "latest", "responses" ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/transform/_tica_base.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/transform/_tica_base.py#L117-L133
def _transform_array(self, X): r"""Projects the data onto the dominant independent components. Parameters ---------- X : ndarray(n, m) the input data Returns ------- Y : ndarray(n,) the projected data """ X_meanfree = X - self.mean Y = np.dot(X_meanfree, self.eigenvectors[:, 0:self.dimension()]) return Y.astype(self.output_type())
[ "def", "_transform_array", "(", "self", ",", "X", ")", ":", "X_meanfree", "=", "X", "-", "self", ".", "mean", "Y", "=", "np", ".", "dot", "(", "X_meanfree", ",", "self", ".", "eigenvectors", "[", ":", ",", "0", ":", "self", ".", "dimension", "(", ")", "]", ")", "return", "Y", ".", "astype", "(", "self", ".", "output_type", "(", ")", ")" ]
r"""Projects the data onto the dominant independent components. Parameters ---------- X : ndarray(n, m) the input data Returns ------- Y : ndarray(n,) the projected data
[ "r", "Projects", "the", "data", "onto", "the", "dominant", "independent", "components", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/orm_inspect.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_inspect.py#L140-L226
def walk_orm_tree(obj, debug: bool = False, seen: Set = None, skip_relationships_always: List[str] = None, skip_relationships_by_tablename: Dict[str, List[str]] = None, skip_all_relationships_for_tablenames: List[str] = None, skip_all_objects_for_tablenames: List[str] = None) \ -> Generator[object, None, None]: """ Starting with a SQLAlchemy ORM object, this function walks a relationship tree, yielding each of the objects once. To skip attributes by name, put the attribute name(s) in ``skip_attrs_always``. To skip by table name, pass ``skip_attrs_by_tablename`` as e.g. .. code-block:: python {'sometable': ['attr1_to_skip', 'attr2_to_skip']} Args: obj: the SQLAlchemy ORM object to walk debug: be verbose seen: usually ``None``, but can be a set of objects marked as "already seen"; if an object is in this set, it is skipped skip_relationships_always: relationships are skipped if the relationship has a name in this (optional) list skip_relationships_by_tablename: optional dictionary mapping table names (keys) to relationship attribute names (values); if the "related table"/"relationship attribute" pair are in this dictionary, the relationship is skipped skip_all_relationships_for_tablenames: relationships are skipped if the the related table has a name in this (optional) list skip_all_objects_for_tablenames: if the object belongs to a table whose name is in this (optional) list, the object is skipped Yields: SQLAlchemy ORM objects (including the starting object) """ # http://docs.sqlalchemy.org/en/latest/faq/sessions.html#faq-walk-objects skip_relationships_always = skip_relationships_always or [] # type: List[str] # noqa skip_relationships_by_tablename = skip_relationships_by_tablename or {} # type: Dict[str, List[str]] # noqa skip_all_relationships_for_tablenames = skip_all_relationships_for_tablenames or [] # type: List[str] # noqa skip_all_objects_for_tablenames = skip_all_objects_for_tablenames or [] # type: List[str] # noqa stack = [obj] if seen is None: seen = set() while stack: obj = stack.pop(0) if obj in seen: continue tablename = obj.__tablename__ if tablename in skip_all_objects_for_tablenames: continue seen.add(obj) if debug: log.debug("walk: yielding {!r}", obj) yield obj insp = inspect(obj) # type: InstanceState for relationship in insp.mapper.relationships: # type: RelationshipProperty # noqa attrname = relationship.key # Skip? if attrname in skip_relationships_always: continue if tablename in skip_all_relationships_for_tablenames: continue if (tablename in skip_relationships_by_tablename and attrname in skip_relationships_by_tablename[tablename]): continue # Process relationship if debug: log.debug("walk: following relationship {}", relationship) related = getattr(obj, attrname) if debug and related: log.debug("walk: queueing {!r}", related) if relationship.uselist: stack.extend(related) elif related is not None: stack.append(related)
[ "def", "walk_orm_tree", "(", "obj", ",", "debug", ":", "bool", "=", "False", ",", "seen", ":", "Set", "=", "None", ",", "skip_relationships_always", ":", "List", "[", "str", "]", "=", "None", ",", "skip_relationships_by_tablename", ":", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "=", "None", ",", "skip_all_relationships_for_tablenames", ":", "List", "[", "str", "]", "=", "None", ",", "skip_all_objects_for_tablenames", ":", "List", "[", "str", "]", "=", "None", ")", "->", "Generator", "[", "object", ",", "None", ",", "None", "]", ":", "# http://docs.sqlalchemy.org/en/latest/faq/sessions.html#faq-walk-objects", "skip_relationships_always", "=", "skip_relationships_always", "or", "[", "]", "# type: List[str] # noqa", "skip_relationships_by_tablename", "=", "skip_relationships_by_tablename", "or", "{", "}", "# type: Dict[str, List[str]] # noqa", "skip_all_relationships_for_tablenames", "=", "skip_all_relationships_for_tablenames", "or", "[", "]", "# type: List[str] # noqa", "skip_all_objects_for_tablenames", "=", "skip_all_objects_for_tablenames", "or", "[", "]", "# type: List[str] # noqa", "stack", "=", "[", "obj", "]", "if", "seen", "is", "None", ":", "seen", "=", "set", "(", ")", "while", "stack", ":", "obj", "=", "stack", ".", "pop", "(", "0", ")", "if", "obj", "in", "seen", ":", "continue", "tablename", "=", "obj", ".", "__tablename__", "if", "tablename", "in", "skip_all_objects_for_tablenames", ":", "continue", "seen", ".", "add", "(", "obj", ")", "if", "debug", ":", "log", ".", "debug", "(", "\"walk: yielding {!r}\"", ",", "obj", ")", "yield", "obj", "insp", "=", "inspect", "(", "obj", ")", "# type: InstanceState", "for", "relationship", "in", "insp", ".", "mapper", ".", "relationships", ":", "# type: RelationshipProperty # noqa", "attrname", "=", "relationship", ".", "key", "# Skip?", "if", "attrname", "in", "skip_relationships_always", ":", "continue", "if", "tablename", "in", "skip_all_relationships_for_tablenames", ":", "continue", "if", "(", "tablename", "in", "skip_relationships_by_tablename", "and", "attrname", "in", "skip_relationships_by_tablename", "[", "tablename", "]", ")", ":", "continue", "# Process relationship", "if", "debug", ":", "log", ".", "debug", "(", "\"walk: following relationship {}\"", ",", "relationship", ")", "related", "=", "getattr", "(", "obj", ",", "attrname", ")", "if", "debug", "and", "related", ":", "log", ".", "debug", "(", "\"walk: queueing {!r}\"", ",", "related", ")", "if", "relationship", ".", "uselist", ":", "stack", ".", "extend", "(", "related", ")", "elif", "related", "is", "not", "None", ":", "stack", ".", "append", "(", "related", ")" ]
Starting with a SQLAlchemy ORM object, this function walks a relationship tree, yielding each of the objects once. To skip attributes by name, put the attribute name(s) in ``skip_attrs_always``. To skip by table name, pass ``skip_attrs_by_tablename`` as e.g. .. code-block:: python {'sometable': ['attr1_to_skip', 'attr2_to_skip']} Args: obj: the SQLAlchemy ORM object to walk debug: be verbose seen: usually ``None``, but can be a set of objects marked as "already seen"; if an object is in this set, it is skipped skip_relationships_always: relationships are skipped if the relationship has a name in this (optional) list skip_relationships_by_tablename: optional dictionary mapping table names (keys) to relationship attribute names (values); if the "related table"/"relationship attribute" pair are in this dictionary, the relationship is skipped skip_all_relationships_for_tablenames: relationships are skipped if the the related table has a name in this (optional) list skip_all_objects_for_tablenames: if the object belongs to a table whose name is in this (optional) list, the object is skipped Yields: SQLAlchemy ORM objects (including the starting object)
[ "Starting", "with", "a", "SQLAlchemy", "ORM", "object", "this", "function", "walks", "a", "relationship", "tree", "yielding", "each", "of", "the", "objects", "once", "." ]
python
train
widdowquinn/pyani
pyani/run_multiprocessing.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_multiprocessing.py#L51-L65
def populate_cmdsets(job, cmdsets, depth): """Creates a list of sets containing jobs at different depths of the dependency tree. This is a recursive function (is there something quicker in the itertools module?) that descends each 'root' job in turn, populating each """ if len(cmdsets) < depth: cmdsets.append(set()) cmdsets[depth-1].add(job.command) if len(job.dependencies) == 0: return cmdsets for j in job.dependencies: cmdsets = populate_cmdsets(j, cmdsets, depth+1) return cmdsets
[ "def", "populate_cmdsets", "(", "job", ",", "cmdsets", ",", "depth", ")", ":", "if", "len", "(", "cmdsets", ")", "<", "depth", ":", "cmdsets", ".", "append", "(", "set", "(", ")", ")", "cmdsets", "[", "depth", "-", "1", "]", ".", "add", "(", "job", ".", "command", ")", "if", "len", "(", "job", ".", "dependencies", ")", "==", "0", ":", "return", "cmdsets", "for", "j", "in", "job", ".", "dependencies", ":", "cmdsets", "=", "populate_cmdsets", "(", "j", ",", "cmdsets", ",", "depth", "+", "1", ")", "return", "cmdsets" ]
Creates a list of sets containing jobs at different depths of the dependency tree. This is a recursive function (is there something quicker in the itertools module?) that descends each 'root' job in turn, populating each
[ "Creates", "a", "list", "of", "sets", "containing", "jobs", "at", "different", "depths", "of", "the", "dependency", "tree", "." ]
python
train
bharadwajyarlagadda/bingmaps
bingmaps/apiservices/trafficincidents.py
https://github.com/bharadwajyarlagadda/bingmaps/blob/6bb3cdadfb121aaff96704509cedff2710a62b6d/bingmaps/apiservices/trafficincidents.py#L300-L321
def lane_info(self): """Retrieves the lane info of the incident/incidents from the output response Returns: lane_info(namedtuple): List of named tuples of lane info of the incident/incidents """ resource_list = self.traffic_incident() lane_info = namedtuple('lane_info', 'lane_info') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [lane_info(resource['lane']) for resource in resource_list] except (KeyError, TypeError): try: return [lane_info(resource['LaneInfo']) for resource in resource_list] except KeyError: return None
[ "def", "lane_info", "(", "self", ")", ":", "resource_list", "=", "self", ".", "traffic_incident", "(", ")", "lane_info", "=", "namedtuple", "(", "'lane_info'", ",", "'lane_info'", ")", "if", "len", "(", "resource_list", ")", "==", "1", "and", "resource_list", "[", "0", "]", "is", "None", ":", "return", "None", "else", ":", "try", ":", "return", "[", "lane_info", "(", "resource", "[", "'lane'", "]", ")", "for", "resource", "in", "resource_list", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "try", ":", "return", "[", "lane_info", "(", "resource", "[", "'LaneInfo'", "]", ")", "for", "resource", "in", "resource_list", "]", "except", "KeyError", ":", "return", "None" ]
Retrieves the lane info of the incident/incidents from the output response Returns: lane_info(namedtuple): List of named tuples of lane info of the incident/incidents
[ "Retrieves", "the", "lane", "info", "of", "the", "incident", "/", "incidents", "from", "the", "output", "response" ]
python
train
roboogle/gtkmvc3
gtkmvco/gtkmvc3/support/metaclasses.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/metaclasses.py#L628-L718
def get_setter(cls, prop_name, # @NoSelf user_setter=None, setter_takes_name=False, user_getter=None, getter_takes_name=False): """The setter follows the rules of the getter. First search for property variable, then logical custom setter. If no setter is found, None is returned (i.e. the property is read-only.)""" has_prop_variable = cls.has_prop_attribute(prop_name) # WARNING! These are deprecated has_specific_setter = hasattr(cls, SET_PROP_NAME % \ {'prop_name' : prop_name}) has_general_setter = hasattr(cls, SET_GENERIC_NAME) if not (has_prop_variable or has_specific_setter or has_general_setter or user_setter): return None if has_prop_variable: if has_specific_setter or user_setter: logger.warning("In class %s.%s ignoring custom logical " "setter for property '%s' as a " "corresponding attribute exists" % \ (cls.__module__, cls.__name__, prop_name)) user_setter = user_getter = None setter_takes_name = getter_takes_name = False else: if user_setter: pass else: if has_specific_setter: def __setter(self, val): _setter = getattr(self, SET_PROP_NAME % \ {'prop_name' : prop_name}) _setter(val) return user_setter = __setter #user_setter = getattr(cls, SET_PROP_NAME % \ # {'prop_name' : prop_name}) setter_takes_name = False else: assert has_general_setter def __setter(self, name, val): _setter = getattr(self, SET_GENERIC_NAME) _setter(name, val) return user_setter = __setter #user_setter = getattr(cls, SET_GENERIC_NAME) setter_takes_name = True # the final setter is a combination of a basic setter, and # the getter (see how inner_{getter,setter} are used in # _setter below) _inner_setter = PropertyMeta.get_setter(cls, prop_name, user_setter, setter_takes_name, user_getter, getter_takes_name) _inner_getter = type(cls).get_getter(cls, prop_name, user_getter, getter_takes_name) def _setter(self, val): curr_frame = len(self._notify_stack) if prop_name not in self._notify_stack: self._notify_stack.append(prop_name) old = _inner_getter(self) new = type(self).create_value(prop_name, val, self) # to track dependencies olds = self.__before_property_value_change__(prop_name) if \ self._has_observer() else () self._notify_stack.extend( map(operator.itemgetter(1), olds)) # this is the unique place where the value is set: _inner_setter(self, new) if type(self).check_value_change(old, new): self._reset_property_notification(prop_name, old) self.notify_property_value_change(prop_name, old, val) # to notify dependencies self.__after_property_value_change__(prop_name, olds) del self._notify_stack[curr_frame:] return _setter
[ "def", "get_setter", "(", "cls", ",", "prop_name", ",", "# @NoSelf", "user_setter", "=", "None", ",", "setter_takes_name", "=", "False", ",", "user_getter", "=", "None", ",", "getter_takes_name", "=", "False", ")", ":", "has_prop_variable", "=", "cls", ".", "has_prop_attribute", "(", "prop_name", ")", "# WARNING! These are deprecated", "has_specific_setter", "=", "hasattr", "(", "cls", ",", "SET_PROP_NAME", "%", "{", "'prop_name'", ":", "prop_name", "}", ")", "has_general_setter", "=", "hasattr", "(", "cls", ",", "SET_GENERIC_NAME", ")", "if", "not", "(", "has_prop_variable", "or", "has_specific_setter", "or", "has_general_setter", "or", "user_setter", ")", ":", "return", "None", "if", "has_prop_variable", ":", "if", "has_specific_setter", "or", "user_setter", ":", "logger", ".", "warning", "(", "\"In class %s.%s ignoring custom logical \"", "\"setter for property '%s' as a \"", "\"corresponding attribute exists\"", "%", "(", "cls", ".", "__module__", ",", "cls", ".", "__name__", ",", "prop_name", ")", ")", "user_setter", "=", "user_getter", "=", "None", "setter_takes_name", "=", "getter_takes_name", "=", "False", "else", ":", "if", "user_setter", ":", "pass", "else", ":", "if", "has_specific_setter", ":", "def", "__setter", "(", "self", ",", "val", ")", ":", "_setter", "=", "getattr", "(", "self", ",", "SET_PROP_NAME", "%", "{", "'prop_name'", ":", "prop_name", "}", ")", "_setter", "(", "val", ")", "return", "user_setter", "=", "__setter", "#user_setter = getattr(cls, SET_PROP_NAME % \\", "# {'prop_name' : prop_name})", "setter_takes_name", "=", "False", "else", ":", "assert", "has_general_setter", "def", "__setter", "(", "self", ",", "name", ",", "val", ")", ":", "_setter", "=", "getattr", "(", "self", ",", "SET_GENERIC_NAME", ")", "_setter", "(", "name", ",", "val", ")", "return", "user_setter", "=", "__setter", "#user_setter = getattr(cls, SET_GENERIC_NAME)", "setter_takes_name", "=", "True", "# the final setter is a combination of a basic setter, and", "# the getter (see how inner_{getter,setter} are used in", "# _setter below)", "_inner_setter", "=", "PropertyMeta", ".", "get_setter", "(", "cls", ",", "prop_name", ",", "user_setter", ",", "setter_takes_name", ",", "user_getter", ",", "getter_takes_name", ")", "_inner_getter", "=", "type", "(", "cls", ")", ".", "get_getter", "(", "cls", ",", "prop_name", ",", "user_getter", ",", "getter_takes_name", ")", "def", "_setter", "(", "self", ",", "val", ")", ":", "curr_frame", "=", "len", "(", "self", ".", "_notify_stack", ")", "if", "prop_name", "not", "in", "self", ".", "_notify_stack", ":", "self", ".", "_notify_stack", ".", "append", "(", "prop_name", ")", "old", "=", "_inner_getter", "(", "self", ")", "new", "=", "type", "(", "self", ")", ".", "create_value", "(", "prop_name", ",", "val", ",", "self", ")", "# to track dependencies", "olds", "=", "self", ".", "__before_property_value_change__", "(", "prop_name", ")", "if", "self", ".", "_has_observer", "(", ")", "else", "(", ")", "self", ".", "_notify_stack", ".", "extend", "(", "map", "(", "operator", ".", "itemgetter", "(", "1", ")", ",", "olds", ")", ")", "# this is the unique place where the value is set:", "_inner_setter", "(", "self", ",", "new", ")", "if", "type", "(", "self", ")", ".", "check_value_change", "(", "old", ",", "new", ")", ":", "self", ".", "_reset_property_notification", "(", "prop_name", ",", "old", ")", "self", ".", "notify_property_value_change", "(", "prop_name", ",", "old", ",", "val", ")", "# to notify dependencies", "self", ".", "__after_property_value_change__", "(", "prop_name", ",", "olds", ")", "del", "self", ".", "_notify_stack", "[", "curr_frame", ":", "]", "return", "_setter" ]
The setter follows the rules of the getter. First search for property variable, then logical custom setter. If no setter is found, None is returned (i.e. the property is read-only.)
[ "The", "setter", "follows", "the", "rules", "of", "the", "getter", ".", "First", "search", "for", "property", "variable", "then", "logical", "custom", "setter", ".", "If", "no", "setter", "is", "found", "None", "is", "returned", "(", "i", ".", "e", ".", "the", "property", "is", "read", "-", "only", ".", ")" ]
python
train
cloudendpoints/endpoints-management-python
endpoints_management/control/money.py
https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/money.py#L40-L60
def check_valid(money): """Determine if an instance of `Money` is valid. Args: money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the instance to test Raises: ValueError: if the money instance is invalid """ if not isinstance(money, sc_messages.Money): raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,)) currency = money.currencyCode if not currency or len(currency) != 3: raise ValueError(_MSG_3_LETTERS_LONG) units = money.units nanos = money.nanos if ((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0)): raise ValueError(_MSG_UNITS_NANOS_MISMATCH) if abs(nanos) > MAX_NANOS: raise ValueError(_MSG_NANOS_OOB)
[ "def", "check_valid", "(", "money", ")", ":", "if", "not", "isinstance", "(", "money", ",", "sc_messages", ".", "Money", ")", ":", "raise", "ValueError", "(", "u'Inputs should be of type %s'", "%", "(", "sc_messages", ".", "Money", ",", ")", ")", "currency", "=", "money", ".", "currencyCode", "if", "not", "currency", "or", "len", "(", "currency", ")", "!=", "3", ":", "raise", "ValueError", "(", "_MSG_3_LETTERS_LONG", ")", "units", "=", "money", ".", "units", "nanos", "=", "money", ".", "nanos", "if", "(", "(", "units", ">", "0", ")", "and", "(", "nanos", "<", "0", ")", ")", "or", "(", "(", "units", "<", "0", ")", "and", "(", "nanos", ">", "0", ")", ")", ":", "raise", "ValueError", "(", "_MSG_UNITS_NANOS_MISMATCH", ")", "if", "abs", "(", "nanos", ")", ">", "MAX_NANOS", ":", "raise", "ValueError", "(", "_MSG_NANOS_OOB", ")" ]
Determine if an instance of `Money` is valid. Args: money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the instance to test Raises: ValueError: if the money instance is invalid
[ "Determine", "if", "an", "instance", "of", "Money", "is", "valid", "." ]
python
train
twilio/twilio-python
twilio/rest/sync/v1/service/document/document_permission.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/document/document_permission.py#L333-L348
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: DocumentPermissionContext for this DocumentPermissionInstance :rtype: twilio.rest.sync.v1.service.document.document_permission.DocumentPermissionContext """ if self._context is None: self._context = DocumentPermissionContext( self._version, service_sid=self._solution['service_sid'], document_sid=self._solution['document_sid'], identity=self._solution['identity'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "DocumentPermissionContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "document_sid", "=", "self", ".", "_solution", "[", "'document_sid'", "]", ",", "identity", "=", "self", ".", "_solution", "[", "'identity'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: DocumentPermissionContext for this DocumentPermissionInstance :rtype: twilio.rest.sync.v1.service.document.document_permission.DocumentPermissionContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
pyrogram/pyrogram
pyrogram/client/methods/messages/forward_messages.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/messages/forward_messages.py#L27-L129
def forward_messages( self, chat_id: Union[int, str], from_chat_id: Union[int, str], message_ids: Iterable[int], disable_notification: bool = None, as_copy: bool = False, remove_caption: bool = False ) -> "pyrogram.Messages": """Use this method to forward messages of any kind. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). from_chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the source chat where the original message was sent. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_ids (``iterable``): A list of Message identifiers in the chat specified in *from_chat_id* or a single message id. Iterators and Generators are also accepted. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. as_copy (``bool``, *optional*): Pass True to forward messages without the forward header (i.e.: send a copy of the message content). Defaults to False. remove_caption (``bool``, *optional*): If set to True and *as_copy* is enabled as well, media captions are not preserved when copying the message. Has no effect if *as_copy* is not enabled. Defaults to False. Returns: On success and in case *message_ids* was an iterable, the returned value will be a list of the forwarded :obj:`Messages <pyrogram.Message>` even if a list contains just one element, otherwise if *message_ids* was an integer, the single forwarded :obj:`Message <pyrogram.Message>` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ is_iterable = not isinstance(message_ids, int) message_ids = list(message_ids) if is_iterable else [message_ids] if as_copy: forwarded_messages = [] for chunk in [message_ids[i:i + 200] for i in range(0, len(message_ids), 200)]: messages = self.get_messages(chat_id=from_chat_id, message_ids=chunk) # type: pyrogram.Messages for message in messages.messages: forwarded_messages.append( message.forward( chat_id, disable_notification=disable_notification, as_copy=True, remove_caption=remove_caption ) ) return pyrogram.Messages( client=self, total_count=len(forwarded_messages), messages=forwarded_messages ) if is_iterable else forwarded_messages[0] else: r = self.send( functions.messages.ForwardMessages( to_peer=self.resolve_peer(chat_id), from_peer=self.resolve_peer(from_chat_id), id=message_ids, silent=disable_notification or None, random_id=[self.rnd_id() for _ in message_ids] ) ) forwarded_messages = [] users = {i.id: i for i in r.users} chats = {i.id: i for i in r.chats} for i in r.updates: if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)): forwarded_messages.append( pyrogram.Message._parse( self, i.message, users, chats ) ) return pyrogram.Messages( client=self, total_count=len(forwarded_messages), messages=forwarded_messages ) if is_iterable else forwarded_messages[0]
[ "def", "forward_messages", "(", "self", ",", "chat_id", ":", "Union", "[", "int", ",", "str", "]", ",", "from_chat_id", ":", "Union", "[", "int", ",", "str", "]", ",", "message_ids", ":", "Iterable", "[", "int", "]", ",", "disable_notification", ":", "bool", "=", "None", ",", "as_copy", ":", "bool", "=", "False", ",", "remove_caption", ":", "bool", "=", "False", ")", "->", "\"pyrogram.Messages\"", ":", "is_iterable", "=", "not", "isinstance", "(", "message_ids", ",", "int", ")", "message_ids", "=", "list", "(", "message_ids", ")", "if", "is_iterable", "else", "[", "message_ids", "]", "if", "as_copy", ":", "forwarded_messages", "=", "[", "]", "for", "chunk", "in", "[", "message_ids", "[", "i", ":", "i", "+", "200", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "message_ids", ")", ",", "200", ")", "]", ":", "messages", "=", "self", ".", "get_messages", "(", "chat_id", "=", "from_chat_id", ",", "message_ids", "=", "chunk", ")", "# type: pyrogram.Messages", "for", "message", "in", "messages", ".", "messages", ":", "forwarded_messages", ".", "append", "(", "message", ".", "forward", "(", "chat_id", ",", "disable_notification", "=", "disable_notification", ",", "as_copy", "=", "True", ",", "remove_caption", "=", "remove_caption", ")", ")", "return", "pyrogram", ".", "Messages", "(", "client", "=", "self", ",", "total_count", "=", "len", "(", "forwarded_messages", ")", ",", "messages", "=", "forwarded_messages", ")", "if", "is_iterable", "else", "forwarded_messages", "[", "0", "]", "else", ":", "r", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "ForwardMessages", "(", "to_peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "from_peer", "=", "self", ".", "resolve_peer", "(", "from_chat_id", ")", ",", "id", "=", "message_ids", ",", "silent", "=", "disable_notification", "or", "None", ",", "random_id", "=", "[", "self", ".", "rnd_id", "(", ")", "for", "_", "in", "message_ids", "]", ")", ")", "forwarded_messages", "=", "[", "]", "users", "=", "{", "i", ".", "id", ":", "i", "for", "i", "in", "r", ".", "users", "}", "chats", "=", "{", "i", ".", "id", ":", "i", "for", "i", "in", "r", ".", "chats", "}", "for", "i", "in", "r", ".", "updates", ":", "if", "isinstance", "(", "i", ",", "(", "types", ".", "UpdateNewMessage", ",", "types", ".", "UpdateNewChannelMessage", ")", ")", ":", "forwarded_messages", ".", "append", "(", "pyrogram", ".", "Message", ".", "_parse", "(", "self", ",", "i", ".", "message", ",", "users", ",", "chats", ")", ")", "return", "pyrogram", ".", "Messages", "(", "client", "=", "self", ",", "total_count", "=", "len", "(", "forwarded_messages", ")", ",", "messages", "=", "forwarded_messages", ")", "if", "is_iterable", "else", "forwarded_messages", "[", "0", "]" ]
Use this method to forward messages of any kind. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). from_chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the source chat where the original message was sent. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_ids (``iterable``): A list of Message identifiers in the chat specified in *from_chat_id* or a single message id. Iterators and Generators are also accepted. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. as_copy (``bool``, *optional*): Pass True to forward messages without the forward header (i.e.: send a copy of the message content). Defaults to False. remove_caption (``bool``, *optional*): If set to True and *as_copy* is enabled as well, media captions are not preserved when copying the message. Has no effect if *as_copy* is not enabled. Defaults to False. Returns: On success and in case *message_ids* was an iterable, the returned value will be a list of the forwarded :obj:`Messages <pyrogram.Message>` even if a list contains just one element, otherwise if *message_ids* was an integer, the single forwarded :obj:`Message <pyrogram.Message>` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
[ "Use", "this", "method", "to", "forward", "messages", "of", "any", "kind", "." ]
python
train
Parsl/parsl
parsl/channels/ssh/ssh.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/channels/ssh/ssh.py#L229-L243
def isdir(self, path): """Return true if the path refers to an existing directory. Parameters ---------- path : str Path of directory on the remote side to check. """ result = True try: self.sftp_client.lstat(path) except FileNotFoundError: result = False return result
[ "def", "isdir", "(", "self", ",", "path", ")", ":", "result", "=", "True", "try", ":", "self", ".", "sftp_client", ".", "lstat", "(", "path", ")", "except", "FileNotFoundError", ":", "result", "=", "False", "return", "result" ]
Return true if the path refers to an existing directory. Parameters ---------- path : str Path of directory on the remote side to check.
[ "Return", "true", "if", "the", "path", "refers", "to", "an", "existing", "directory", "." ]
python
valid
OpenHumans/open-humans-api
ohapi/api.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/api.py#L27-L60
def oauth2_auth_url(redirect_uri=None, client_id=None, base_url=OH_BASE_URL): """ Returns an OAuth2 authorization URL for a project, given Client ID. This function constructs an authorization URL for a user to follow. The user will be redirected to Authorize Open Humans data for our external application. An OAuth2 project on Open Humans is required for this to properly work. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param redirect_uri: This field is set to `None` by default. However, if provided, it appends it in the URL returned. :param client_id: This field is also set to `None` by default however, is a mandatory field for the final URL to work. It uniquely identifies a given OAuth2 project. :param base_url: It is this URL `https://www.openhumans.org`. """ if not client_id: client_id = os.getenv('OHAPI_CLIENT_ID') if not client_id: raise SettingsError( "Client ID not provided! Provide client_id as a parameter, " "or set OHAPI_CLIENT_ID in your environment.") params = OrderedDict([ ('client_id', client_id), ('response_type', 'code'), ]) if redirect_uri: params['redirect_uri'] = redirect_uri auth_url = urlparse.urljoin( base_url, '/direct-sharing/projects/oauth2/authorize/?{}'.format( urlparse.urlencode(params))) return auth_url
[ "def", "oauth2_auth_url", "(", "redirect_uri", "=", "None", ",", "client_id", "=", "None", ",", "base_url", "=", "OH_BASE_URL", ")", ":", "if", "not", "client_id", ":", "client_id", "=", "os", ".", "getenv", "(", "'OHAPI_CLIENT_ID'", ")", "if", "not", "client_id", ":", "raise", "SettingsError", "(", "\"Client ID not provided! Provide client_id as a parameter, \"", "\"or set OHAPI_CLIENT_ID in your environment.\"", ")", "params", "=", "OrderedDict", "(", "[", "(", "'client_id'", ",", "client_id", ")", ",", "(", "'response_type'", ",", "'code'", ")", ",", "]", ")", "if", "redirect_uri", ":", "params", "[", "'redirect_uri'", "]", "=", "redirect_uri", "auth_url", "=", "urlparse", ".", "urljoin", "(", "base_url", ",", "'/direct-sharing/projects/oauth2/authorize/?{}'", ".", "format", "(", "urlparse", ".", "urlencode", "(", "params", ")", ")", ")", "return", "auth_url" ]
Returns an OAuth2 authorization URL for a project, given Client ID. This function constructs an authorization URL for a user to follow. The user will be redirected to Authorize Open Humans data for our external application. An OAuth2 project on Open Humans is required for this to properly work. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param redirect_uri: This field is set to `None` by default. However, if provided, it appends it in the URL returned. :param client_id: This field is also set to `None` by default however, is a mandatory field for the final URL to work. It uniquely identifies a given OAuth2 project. :param base_url: It is this URL `https://www.openhumans.org`.
[ "Returns", "an", "OAuth2", "authorization", "URL", "for", "a", "project", "given", "Client", "ID", ".", "This", "function", "constructs", "an", "authorization", "URL", "for", "a", "user", "to", "follow", ".", "The", "user", "will", "be", "redirected", "to", "Authorize", "Open", "Humans", "data", "for", "our", "external", "application", ".", "An", "OAuth2", "project", "on", "Open", "Humans", "is", "required", "for", "this", "to", "properly", "work", ".", "To", "learn", "more", "about", "Open", "Humans", "OAuth2", "projects", "go", "to", ":", "https", ":", "//", "www", ".", "openhumans", ".", "org", "/", "direct", "-", "sharing", "/", "oauth2", "-", "features", "/" ]
python
train
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L2598-L2631
def route_table_get(name, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Get details about a specific route table. :param name: The name of the route table to query. :param resource_group: The resource group name assigned to the route table. CLI Example: .. code-block:: bash salt-call azurearm_network.route_table_get test-rt-table testgroup ''' expand = kwargs.get('expand') netconn = __utils__['azurearm.get_client']('network', **kwargs) try: table = netconn.route_tables.get( route_table_name=name, resource_group_name=resource_group, expand=expand ) result = table.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "route_table_get", "(", "name", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "expand", "=", "kwargs", ".", "get", "(", "'expand'", ")", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "table", "=", "netconn", ".", "route_tables", ".", "get", "(", "route_table_name", "=", "name", ",", "resource_group_name", "=", "resource_group", ",", "expand", "=", "expand", ")", "result", "=", "table", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 Get details about a specific route table. :param name: The name of the route table to query. :param resource_group: The resource group name assigned to the route table. CLI Example: .. code-block:: bash salt-call azurearm_network.route_table_get test-rt-table testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L162-L179
def _files_preserve(self): """ create a set of protected files create a set of files, based on self.files_preserve and self.stdin, self,stdout and self.stderr, that should not get closed while daemonizing. :return: set """ result = set() files = [] if not self.files_preserve else self.files_preserve files.extend([self.stdin, self.stdout, self.stderr]) for item in files: if hasattr(item, 'fileno'): result.add(item.fileno()) if isinstance(item, int): result.add(item) return result
[ "def", "_files_preserve", "(", "self", ")", ":", "result", "=", "set", "(", ")", "files", "=", "[", "]", "if", "not", "self", ".", "files_preserve", "else", "self", ".", "files_preserve", "files", ".", "extend", "(", "[", "self", ".", "stdin", ",", "self", ".", "stdout", ",", "self", ".", "stderr", "]", ")", "for", "item", "in", "files", ":", "if", "hasattr", "(", "item", ",", "'fileno'", ")", ":", "result", ".", "add", "(", "item", ".", "fileno", "(", ")", ")", "if", "isinstance", "(", "item", ",", "int", ")", ":", "result", ".", "add", "(", "item", ")", "return", "result" ]
create a set of protected files create a set of files, based on self.files_preserve and self.stdin, self,stdout and self.stderr, that should not get closed while daemonizing. :return: set
[ "create", "a", "set", "of", "protected", "files" ]
python
train
quantopian/pgcontents
pgcontents/query.py
https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/query.py#L311-L323
def _select_file(user_id, api_path, fields, limit): """ Return a SELECT statement that returns the latest N versions of a file. """ query = select(fields).where( _file_where(user_id, api_path), ).order_by( _file_creation_order(), ) if limit is not None: query = query.limit(limit) return query
[ "def", "_select_file", "(", "user_id", ",", "api_path", ",", "fields", ",", "limit", ")", ":", "query", "=", "select", "(", "fields", ")", ".", "where", "(", "_file_where", "(", "user_id", ",", "api_path", ")", ",", ")", ".", "order_by", "(", "_file_creation_order", "(", ")", ",", ")", "if", "limit", "is", "not", "None", ":", "query", "=", "query", ".", "limit", "(", "limit", ")", "return", "query" ]
Return a SELECT statement that returns the latest N versions of a file.
[ "Return", "a", "SELECT", "statement", "that", "returns", "the", "latest", "N", "versions", "of", "a", "file", "." ]
python
test
DataDog/integrations-core
datadog_checks_dev/datadog_checks/dev/tooling/release.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/release.py#L94-L108
def update_agent_requirements(req_file, check, newline): """ Replace the requirements line for the given check """ package_name = get_package_name(check) lines = read_file_lines(req_file) for i, line in enumerate(lines): current_package_name = line.split('==')[0] if current_package_name == package_name: lines[i] = '{}\n'.format(newline) break write_file_lines(req_file, sorted(lines))
[ "def", "update_agent_requirements", "(", "req_file", ",", "check", ",", "newline", ")", ":", "package_name", "=", "get_package_name", "(", "check", ")", "lines", "=", "read_file_lines", "(", "req_file", ")", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "current_package_name", "=", "line", ".", "split", "(", "'=='", ")", "[", "0", "]", "if", "current_package_name", "==", "package_name", ":", "lines", "[", "i", "]", "=", "'{}\\n'", ".", "format", "(", "newline", ")", "break", "write_file_lines", "(", "req_file", ",", "sorted", "(", "lines", ")", ")" ]
Replace the requirements line for the given check
[ "Replace", "the", "requirements", "line", "for", "the", "given", "check" ]
python
train
jmbeach/KEP.py
src/keppy/register.py
https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L12-L15
def next_addr(addr, i): """Gets address after the current + i""" str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:])) return addr[0] + str_addr
[ "def", "next_addr", "(", "addr", ",", "i", ")", ":", "str_addr", "=", "pad_zeroes", "(", "str", "(", "int_addr", "(", "addr", ")", "+", "i", ")", ",", "len", "(", "addr", "[", "1", ":", "]", ")", ")", "return", "addr", "[", "0", "]", "+", "str_addr" ]
Gets address after the current + i
[ "Gets", "address", "after", "the", "current", "+", "i" ]
python
train
Kortemme-Lab/klab
klab/chainsequence.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/chainsequence.py#L100-L145
def replace_seqres(self, pdb, update_atoms = True): """Replace SEQRES lines with a new sequence, optionally removing mutated sidechains""" newpdb = PDB() inserted_seqres = False entries_before_seqres = set(["HEADER", "OBSLTE", "TITLE", "CAVEAT", "COMPND", "SOURCE", "KEYWDS", "EXPDTA", "AUTHOR", "REVDAT", "SPRSDE", "JRNL", "REMARK", "DBREF", "SEQADV"]) mutated_resids = {} if update_atoms: old_seqs = ChainSequences() chainresnums = old_seqs.parse_atoms(pdb) assert self.keys() == old_seqs.keys() for chain in self.keys(): assert len(self[chain]) == len(old_seqs[chain]) for i in xrange(len(self[chain])): if self[chain][i] != old_seqs[chain][i]: resid = chain + chainresnums[chain][i] mutated_resids[resid] = self[chain][i] for line in pdb.lines: entry = line[0:6] if (not inserted_seqres) and entry not in entries_before_seqres: inserted_seqres = True newpdb.lines += self.seqres_lines() if update_atoms and entry == "ATOM ": resid = line[21:27] atom = line[12:16].strip() if not mutated_resids.has_key(resid): newpdb.lines += [line] else: newpdb.lines += [line[:17] + mutated_resids[resid] + line[20:]] elif entry != "SEQRES": newpdb.lines += [line] if update_atoms: newpdb.remove_nonbackbone_atoms(mutated_resids.keys()) return newpdb
[ "def", "replace_seqres", "(", "self", ",", "pdb", ",", "update_atoms", "=", "True", ")", ":", "newpdb", "=", "PDB", "(", ")", "inserted_seqres", "=", "False", "entries_before_seqres", "=", "set", "(", "[", "\"HEADER\"", ",", "\"OBSLTE\"", ",", "\"TITLE\"", ",", "\"CAVEAT\"", ",", "\"COMPND\"", ",", "\"SOURCE\"", ",", "\"KEYWDS\"", ",", "\"EXPDTA\"", ",", "\"AUTHOR\"", ",", "\"REVDAT\"", ",", "\"SPRSDE\"", ",", "\"JRNL\"", ",", "\"REMARK\"", ",", "\"DBREF\"", ",", "\"SEQADV\"", "]", ")", "mutated_resids", "=", "{", "}", "if", "update_atoms", ":", "old_seqs", "=", "ChainSequences", "(", ")", "chainresnums", "=", "old_seqs", ".", "parse_atoms", "(", "pdb", ")", "assert", "self", ".", "keys", "(", ")", "==", "old_seqs", ".", "keys", "(", ")", "for", "chain", "in", "self", ".", "keys", "(", ")", ":", "assert", "len", "(", "self", "[", "chain", "]", ")", "==", "len", "(", "old_seqs", "[", "chain", "]", ")", "for", "i", "in", "xrange", "(", "len", "(", "self", "[", "chain", "]", ")", ")", ":", "if", "self", "[", "chain", "]", "[", "i", "]", "!=", "old_seqs", "[", "chain", "]", "[", "i", "]", ":", "resid", "=", "chain", "+", "chainresnums", "[", "chain", "]", "[", "i", "]", "mutated_resids", "[", "resid", "]", "=", "self", "[", "chain", "]", "[", "i", "]", "for", "line", "in", "pdb", ".", "lines", ":", "entry", "=", "line", "[", "0", ":", "6", "]", "if", "(", "not", "inserted_seqres", ")", "and", "entry", "not", "in", "entries_before_seqres", ":", "inserted_seqres", "=", "True", "newpdb", ".", "lines", "+=", "self", ".", "seqres_lines", "(", ")", "if", "update_atoms", "and", "entry", "==", "\"ATOM \"", ":", "resid", "=", "line", "[", "21", ":", "27", "]", "atom", "=", "line", "[", "12", ":", "16", "]", ".", "strip", "(", ")", "if", "not", "mutated_resids", ".", "has_key", "(", "resid", ")", ":", "newpdb", ".", "lines", "+=", "[", "line", "]", "else", ":", "newpdb", ".", "lines", "+=", "[", "line", "[", ":", "17", "]", "+", "mutated_resids", "[", "resid", "]", "+", "line", "[", "20", ":", "]", "]", "elif", "entry", "!=", "\"SEQRES\"", ":", "newpdb", ".", "lines", "+=", "[", "line", "]", "if", "update_atoms", ":", "newpdb", ".", "remove_nonbackbone_atoms", "(", "mutated_resids", ".", "keys", "(", ")", ")", "return", "newpdb" ]
Replace SEQRES lines with a new sequence, optionally removing mutated sidechains
[ "Replace", "SEQRES", "lines", "with", "a", "new", "sequence", "optionally", "removing", "mutated", "sidechains" ]
python
train
sphinx-gallery/sphinx-gallery
sphinx_gallery/downloads.py
https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/downloads.py#L81-L95
def list_downloadable_sources(target_dir): """Returns a list of python source files is target_dir Parameters ---------- target_dir : str path to the directory where python source file are Returns ------- list list of paths to all Python source files in `target_dir` """ return [os.path.join(target_dir, fname) for fname in os.listdir(target_dir) if fname.endswith('.py')]
[ "def", "list_downloadable_sources", "(", "target_dir", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "target_dir", ",", "fname", ")", "for", "fname", "in", "os", ".", "listdir", "(", "target_dir", ")", "if", "fname", ".", "endswith", "(", "'.py'", ")", "]" ]
Returns a list of python source files is target_dir Parameters ---------- target_dir : str path to the directory where python source file are Returns ------- list list of paths to all Python source files in `target_dir`
[ "Returns", "a", "list", "of", "python", "source", "files", "is", "target_dir" ]
python
train
reingart/pyafipws
padron.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/padron.py#L279-L286
def ConsultarDomicilios(self, nro_doc, tipo_doc=80, cat_iva=None): "Busca los domicilios, devuelve la cantidad y establece la lista" self.cursor.execute("SELECT direccion FROM domicilio WHERE " " tipo_doc=? AND nro_doc=? ORDER BY id ", [tipo_doc, nro_doc]) filas = self.cursor.fetchall() self.domicilios = [fila['direccion'] for fila in filas] return len(filas)
[ "def", "ConsultarDomicilios", "(", "self", ",", "nro_doc", ",", "tipo_doc", "=", "80", ",", "cat_iva", "=", "None", ")", ":", "self", ".", "cursor", ".", "execute", "(", "\"SELECT direccion FROM domicilio WHERE \"", "\" tipo_doc=? AND nro_doc=? ORDER BY id \"", ",", "[", "tipo_doc", ",", "nro_doc", "]", ")", "filas", "=", "self", ".", "cursor", ".", "fetchall", "(", ")", "self", ".", "domicilios", "=", "[", "fila", "[", "'direccion'", "]", "for", "fila", "in", "filas", "]", "return", "len", "(", "filas", ")" ]
Busca los domicilios, devuelve la cantidad y establece la lista
[ "Busca", "los", "domicilios", "devuelve", "la", "cantidad", "y", "establece", "la", "lista" ]
python
train
dev-platypus/platyutil
python/platyutil/system.py
https://github.com/dev-platypus/platyutil/blob/5f3dadbdc2445e71755fb09d6020641c77d13c47/python/platyutil/system.py#L50-L135
def systemCall(cmd, sh=True, log=None): '''Fancy magic version of os.system''' if log is None: log = logging log.debug('System call [sh:%s]: %s' \ % (sh, cmd)) out = [] proc = None poller = None outBuf = [''] errBuf = [''] def pollOutput(): ''' Read, log and store output (if any) from processes pipes. ''' removeChars = '\r\n' # collect fds with new output fds = [entry[0] for entry in poller.poll()] if proc.stdout.fileno() in fds: while True: try: tmp = proc.stdout.read(100) except IOError: break outBuf[0] += tmp while '\n' in outBuf[0]: line, _, outBuf[0] = outBuf[0].partition('\n') log.debug(line) out.append(line + '\n') if not tmp: break if proc.stderr.fileno() in fds: while True: try: tmp = proc.stderr.read(100) except IOError: break errBuf[0] += tmp while '\n' in errBuf[0]: line, _, errBuf[0] = errBuf[0].partition('\n') log.warning(line) if not tmp: break while True: if proc is None: # create and start process proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=sh) # create poll select poller = select.poll() flags = fcntl.fcntl(proc.stdout, fcntl.F_GETFL) fcntl.fcntl(proc.stdout, fcntl.F_SETFL, flags| os.O_NONBLOCK) flags = fcntl.fcntl(proc.stderr, fcntl.F_GETFL) fcntl.fcntl(proc.stderr, fcntl.F_SETFL, flags| os.O_NONBLOCK) # register pipes to polling poller.register(proc.stdout, select.POLLIN) poller.register(proc.stderr, select.POLLIN) pollOutput() if proc.poll() is not None: # proc finished break # poll once after the process ended to collect all the missing output pollOutput() # check return code if proc.returncode != 0: raise RuntimeError( CalledProcessError(proc.returncode, cmd, ''.join(out)) ) return ''.join(out)
[ "def", "systemCall", "(", "cmd", ",", "sh", "=", "True", ",", "log", "=", "None", ")", ":", "if", "log", "is", "None", ":", "log", "=", "logging", "log", ".", "debug", "(", "'System call [sh:%s]: %s'", "%", "(", "sh", ",", "cmd", ")", ")", "out", "=", "[", "]", "proc", "=", "None", "poller", "=", "None", "outBuf", "=", "[", "''", "]", "errBuf", "=", "[", "''", "]", "def", "pollOutput", "(", ")", ":", "'''\n Read, log and store output (if any) from processes pipes.\n '''", "removeChars", "=", "'\\r\\n'", "# collect fds with new output", "fds", "=", "[", "entry", "[", "0", "]", "for", "entry", "in", "poller", ".", "poll", "(", ")", "]", "if", "proc", ".", "stdout", ".", "fileno", "(", ")", "in", "fds", ":", "while", "True", ":", "try", ":", "tmp", "=", "proc", ".", "stdout", ".", "read", "(", "100", ")", "except", "IOError", ":", "break", "outBuf", "[", "0", "]", "+=", "tmp", "while", "'\\n'", "in", "outBuf", "[", "0", "]", ":", "line", ",", "_", ",", "outBuf", "[", "0", "]", "=", "outBuf", "[", "0", "]", ".", "partition", "(", "'\\n'", ")", "log", ".", "debug", "(", "line", ")", "out", ".", "append", "(", "line", "+", "'\\n'", ")", "if", "not", "tmp", ":", "break", "if", "proc", ".", "stderr", ".", "fileno", "(", ")", "in", "fds", ":", "while", "True", ":", "try", ":", "tmp", "=", "proc", ".", "stderr", ".", "read", "(", "100", ")", "except", "IOError", ":", "break", "errBuf", "[", "0", "]", "+=", "tmp", "while", "'\\n'", "in", "errBuf", "[", "0", "]", ":", "line", ",", "_", ",", "errBuf", "[", "0", "]", "=", "errBuf", "[", "0", "]", ".", "partition", "(", "'\\n'", ")", "log", ".", "warning", "(", "line", ")", "if", "not", "tmp", ":", "break", "while", "True", ":", "if", "proc", "is", "None", ":", "# create and start process", "proc", "=", "Popen", "(", "cmd", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "shell", "=", "sh", ")", "# create poll select", "poller", "=", "select", ".", "poll", "(", ")", "flags", "=", "fcntl", ".", "fcntl", "(", "proc", ".", "stdout", ",", "fcntl", ".", "F_GETFL", ")", "fcntl", ".", "fcntl", "(", "proc", ".", "stdout", ",", "fcntl", ".", "F_SETFL", ",", "flags", "|", "os", ".", "O_NONBLOCK", ")", "flags", "=", "fcntl", ".", "fcntl", "(", "proc", ".", "stderr", ",", "fcntl", ".", "F_GETFL", ")", "fcntl", ".", "fcntl", "(", "proc", ".", "stderr", ",", "fcntl", ".", "F_SETFL", ",", "flags", "|", "os", ".", "O_NONBLOCK", ")", "# register pipes to polling", "poller", ".", "register", "(", "proc", ".", "stdout", ",", "select", ".", "POLLIN", ")", "poller", ".", "register", "(", "proc", ".", "stderr", ",", "select", ".", "POLLIN", ")", "pollOutput", "(", ")", "if", "proc", ".", "poll", "(", ")", "is", "not", "None", ":", "# proc finished", "break", "# poll once after the process ended to collect all the missing output", "pollOutput", "(", ")", "# check return code", "if", "proc", ".", "returncode", "!=", "0", ":", "raise", "RuntimeError", "(", "CalledProcessError", "(", "proc", ".", "returncode", ",", "cmd", ",", "''", ".", "join", "(", "out", ")", ")", ")", "return", "''", ".", "join", "(", "out", ")" ]
Fancy magic version of os.system
[ "Fancy", "magic", "version", "of", "os", ".", "system" ]
python
train
log2timeline/plaso
plaso/filters/file_entry.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/filters/file_entry.py#L232-L244
def Matches(self, file_entry): """Compares the file entry against the filter. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches the filter. """ if not self._names or not file_entry.IsFile(): return False return file_entry.name.lower() in self._names
[ "def", "Matches", "(", "self", ",", "file_entry", ")", ":", "if", "not", "self", ".", "_names", "or", "not", "file_entry", ".", "IsFile", "(", ")", ":", "return", "False", "return", "file_entry", ".", "name", ".", "lower", "(", ")", "in", "self", ".", "_names" ]
Compares the file entry against the filter. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches the filter.
[ "Compares", "the", "file", "entry", "against", "the", "filter", "." ]
python
train
nicolargo/glances
glances/plugins/glances_gpu.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_gpu.py#L134-L217
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if stats exist, not empty (issue #871) and plugin not disabled if not self.stats or (self.stats == []) or self.is_disable(): return ret # Check if all GPU have the same name same_name = all(s['name'] == self.stats[0]['name'] for s in self.stats) # gpu_stats contain the first GPU in the list gpu_stats = self.stats[0] # Header header = '' if len(self.stats) > 1: header += '{} '.format(len(self.stats)) if same_name: header += '{} {}'.format('GPU', gpu_stats['name']) else: header += '{}'.format('GPU') msg = header[:17] ret.append(self.curse_add_line(msg, "TITLE")) # Build the string message if len(self.stats) == 1 or args.meangpu: # GPU stat summary or mono GPU # New line ret.append(self.curse_new_line()) # GPU PROC try: mean_proc = sum(s['proc'] for s in self.stats if s is not None) / len(self.stats) except TypeError: mean_proc_msg = '{:>4}'.format('N/A') else: mean_proc_msg = '{:>3.0f}%'.format(mean_proc) if len(self.stats) > 1: msg = '{:13}'.format('proc mean:') else: msg = '{:13}'.format('proc:') ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line( mean_proc_msg, self.get_views(item=gpu_stats[self.get_key()], key='proc', option='decoration'))) # New line ret.append(self.curse_new_line()) # GPU MEM try: mean_mem = sum(s['mem'] for s in self.stats if s is not None) / len(self.stats) except TypeError: mean_mem_msg = '{:>4}'.format('N/A') else: mean_mem_msg = '{:>3.0f}%'.format(mean_mem) if len(self.stats) > 1: msg = '{:13}'.format('mem mean:') else: msg = '{:13}'.format('mem:') ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line( mean_mem_msg, self.get_views(item=gpu_stats[self.get_key()], key='mem', option='decoration'))) else: # Multi GPU for gpu_stats in self.stats: # New line ret.append(self.curse_new_line()) # GPU ID + PROC + MEM id_msg = '{}'.format(gpu_stats['gpu_id']) try: proc_msg = '{:>3.0f}%'.format(gpu_stats['proc']) except ValueError: proc_msg = '{:>4}'.format('N/A') try: mem_msg = '{:>3.0f}%'.format(gpu_stats['mem']) except ValueError: mem_msg = '{:>4}'.format('N/A') msg = '{}: {} mem: {}'.format(id_msg, proc_msg, mem_msg) ret.append(self.curse_add_line(msg)) return ret
[ "def", "msg_curse", "(", "self", ",", "args", "=", "None", ",", "max_width", "=", "None", ")", ":", "# Init the return message", "ret", "=", "[", "]", "# Only process if stats exist, not empty (issue #871) and plugin not disabled", "if", "not", "self", ".", "stats", "or", "(", "self", ".", "stats", "==", "[", "]", ")", "or", "self", ".", "is_disable", "(", ")", ":", "return", "ret", "# Check if all GPU have the same name", "same_name", "=", "all", "(", "s", "[", "'name'", "]", "==", "self", ".", "stats", "[", "0", "]", "[", "'name'", "]", "for", "s", "in", "self", ".", "stats", ")", "# gpu_stats contain the first GPU in the list", "gpu_stats", "=", "self", ".", "stats", "[", "0", "]", "# Header", "header", "=", "''", "if", "len", "(", "self", ".", "stats", ")", ">", "1", ":", "header", "+=", "'{} '", ".", "format", "(", "len", "(", "self", ".", "stats", ")", ")", "if", "same_name", ":", "header", "+=", "'{} {}'", ".", "format", "(", "'GPU'", ",", "gpu_stats", "[", "'name'", "]", ")", "else", ":", "header", "+=", "'{}'", ".", "format", "(", "'GPU'", ")", "msg", "=", "header", "[", ":", "17", "]", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "\"TITLE\"", ")", ")", "# Build the string message", "if", "len", "(", "self", ".", "stats", ")", "==", "1", "or", "args", ".", "meangpu", ":", "# GPU stat summary or mono GPU", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# GPU PROC", "try", ":", "mean_proc", "=", "sum", "(", "s", "[", "'proc'", "]", "for", "s", "in", "self", ".", "stats", "if", "s", "is", "not", "None", ")", "/", "len", "(", "self", ".", "stats", ")", "except", "TypeError", ":", "mean_proc_msg", "=", "'{:>4}'", ".", "format", "(", "'N/A'", ")", "else", ":", "mean_proc_msg", "=", "'{:>3.0f}%'", ".", "format", "(", "mean_proc", ")", "if", "len", "(", "self", ".", "stats", ")", ">", "1", ":", "msg", "=", "'{:13}'", ".", "format", "(", "'proc mean:'", ")", "else", ":", "msg", "=", "'{:13}'", ".", "format", "(", "'proc:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "mean_proc_msg", ",", "self", ".", "get_views", "(", "item", "=", "gpu_stats", "[", "self", ".", "get_key", "(", ")", "]", ",", "key", "=", "'proc'", ",", "option", "=", "'decoration'", ")", ")", ")", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# GPU MEM", "try", ":", "mean_mem", "=", "sum", "(", "s", "[", "'mem'", "]", "for", "s", "in", "self", ".", "stats", "if", "s", "is", "not", "None", ")", "/", "len", "(", "self", ".", "stats", ")", "except", "TypeError", ":", "mean_mem_msg", "=", "'{:>4}'", ".", "format", "(", "'N/A'", ")", "else", ":", "mean_mem_msg", "=", "'{:>3.0f}%'", ".", "format", "(", "mean_mem", ")", "if", "len", "(", "self", ".", "stats", ")", ">", "1", ":", "msg", "=", "'{:13}'", ".", "format", "(", "'mem mean:'", ")", "else", ":", "msg", "=", "'{:13}'", ".", "format", "(", "'mem:'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "mean_mem_msg", ",", "self", ".", "get_views", "(", "item", "=", "gpu_stats", "[", "self", ".", "get_key", "(", ")", "]", ",", "key", "=", "'mem'", ",", "option", "=", "'decoration'", ")", ")", ")", "else", ":", "# Multi GPU", "for", "gpu_stats", "in", "self", ".", "stats", ":", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# GPU ID + PROC + MEM", "id_msg", "=", "'{}'", ".", "format", "(", "gpu_stats", "[", "'gpu_id'", "]", ")", "try", ":", "proc_msg", "=", "'{:>3.0f}%'", ".", "format", "(", "gpu_stats", "[", "'proc'", "]", ")", "except", "ValueError", ":", "proc_msg", "=", "'{:>4}'", ".", "format", "(", "'N/A'", ")", "try", ":", "mem_msg", "=", "'{:>3.0f}%'", ".", "format", "(", "gpu_stats", "[", "'mem'", "]", ")", "except", "ValueError", ":", "mem_msg", "=", "'{:>4}'", ".", "format", "(", "'N/A'", ")", "msg", "=", "'{}: {} mem: {}'", ".", "format", "(", "id_msg", ",", "proc_msg", ",", "mem_msg", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "return", "ret" ]
Return the dict to display in the curse interface.
[ "Return", "the", "dict", "to", "display", "in", "the", "curse", "interface", "." ]
python
train
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jar_publish.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jar_publish.py#L499-L516
def _ivy_jvm_options(self, repo): """Get the JVM options for ivy authentication, if needed.""" # Get authentication for the publish repo if needed. if not repo.get('auth'): # No need to copy here, as this list isn't modified by the caller. return self._jvm_options # Create a copy of the options, so that the modification is appropriately transient. jvm_options = copy(self._jvm_options) user = repo.get('username') password = repo.get('password') if user and password: jvm_options.append('-Dlogin={}'.format(user)) jvm_options.append('-Dpassword={}'.format(password)) else: raise TaskError('Unable to publish to {}. {}' .format(repo.get('resolver'), repo.get('help', ''))) return jvm_options
[ "def", "_ivy_jvm_options", "(", "self", ",", "repo", ")", ":", "# Get authentication for the publish repo if needed.", "if", "not", "repo", ".", "get", "(", "'auth'", ")", ":", "# No need to copy here, as this list isn't modified by the caller.", "return", "self", ".", "_jvm_options", "# Create a copy of the options, so that the modification is appropriately transient.", "jvm_options", "=", "copy", "(", "self", ".", "_jvm_options", ")", "user", "=", "repo", ".", "get", "(", "'username'", ")", "password", "=", "repo", ".", "get", "(", "'password'", ")", "if", "user", "and", "password", ":", "jvm_options", ".", "append", "(", "'-Dlogin={}'", ".", "format", "(", "user", ")", ")", "jvm_options", ".", "append", "(", "'-Dpassword={}'", ".", "format", "(", "password", ")", ")", "else", ":", "raise", "TaskError", "(", "'Unable to publish to {}. {}'", ".", "format", "(", "repo", ".", "get", "(", "'resolver'", ")", ",", "repo", ".", "get", "(", "'help'", ",", "''", ")", ")", ")", "return", "jvm_options" ]
Get the JVM options for ivy authentication, if needed.
[ "Get", "the", "JVM", "options", "for", "ivy", "authentication", "if", "needed", "." ]
python
train
bitcraft/PyTMX
pytmx/pytmx.py
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L568-L596
def get_tile_properties(self, x, y, layer): """ Return the tile image GID for this location :param x: x coordinate :param y: y coordinate :param layer: layer number :rtype: python dict if found, otherwise None """ try: assert (x >= 0 and y >= 0 and layer >= 0) except AssertionError: raise ValueError try: gid = self.layers[int(layer)].data[int(y)][int(x)] except (IndexError, ValueError): msg = "Coords: ({0},{1}) in layer {2} is invalid." logger.debug(msg.format(x, y, layer)) raise Exception else: try: return self.tile_properties[gid] except (IndexError, ValueError): msg = "Coords: ({0},{1}) in layer {2} has invalid GID: {3}" logger.debug(msg.format(x, y, layer, gid)) raise Exception except KeyError: return None
[ "def", "get_tile_properties", "(", "self", ",", "x", ",", "y", ",", "layer", ")", ":", "try", ":", "assert", "(", "x", ">=", "0", "and", "y", ">=", "0", "and", "layer", ">=", "0", ")", "except", "AssertionError", ":", "raise", "ValueError", "try", ":", "gid", "=", "self", ".", "layers", "[", "int", "(", "layer", ")", "]", ".", "data", "[", "int", "(", "y", ")", "]", "[", "int", "(", "x", ")", "]", "except", "(", "IndexError", ",", "ValueError", ")", ":", "msg", "=", "\"Coords: ({0},{1}) in layer {2} is invalid.\"", "logger", ".", "debug", "(", "msg", ".", "format", "(", "x", ",", "y", ",", "layer", ")", ")", "raise", "Exception", "else", ":", "try", ":", "return", "self", ".", "tile_properties", "[", "gid", "]", "except", "(", "IndexError", ",", "ValueError", ")", ":", "msg", "=", "\"Coords: ({0},{1}) in layer {2} has invalid GID: {3}\"", "logger", ".", "debug", "(", "msg", ".", "format", "(", "x", ",", "y", ",", "layer", ",", "gid", ")", ")", "raise", "Exception", "except", "KeyError", ":", "return", "None" ]
Return the tile image GID for this location :param x: x coordinate :param y: y coordinate :param layer: layer number :rtype: python dict if found, otherwise None
[ "Return", "the", "tile", "image", "GID", "for", "this", "location" ]
python
train
zqfang/GSEApy
gseapy/enrichr.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L98-L126
def parse_genelists(self): """parse gene list""" if isinstance(self.gene_list, list): genes = self.gene_list elif isinstance(self.gene_list, pd.DataFrame): # input type is bed file if self.gene_list.shape[1] >=3: genes= self.gene_list.iloc[:,:3].apply(lambda x: "\t".join([str(i) for i in x]), axis=1).tolist() # input type with weight values elif self.gene_list.shape[1] == 2: genes= self.gene_list.apply(lambda x: ",".join([str(i) for i in x]), axis=1).tolist() else: genes = self.gene_list.squeeze().tolist() elif isinstance(self.gene_list, pd.Series): genes = self.gene_list.squeeze().tolist() else: # get gene lists or bed file, or gene list with weighted values. genes=[] with open(self.gene_list) as f: for gene in f: genes.append(gene.strip()) self._isezid = all(map(self._is_entrez_id, genes)) if self._isezid: self._gls = set(map(int, self._gls)) else: self._gls = genes return '\n'.join(genes)
[ "def", "parse_genelists", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "gene_list", ",", "list", ")", ":", "genes", "=", "self", ".", "gene_list", "elif", "isinstance", "(", "self", ".", "gene_list", ",", "pd", ".", "DataFrame", ")", ":", "# input type is bed file", "if", "self", ".", "gene_list", ".", "shape", "[", "1", "]", ">=", "3", ":", "genes", "=", "self", ".", "gene_list", ".", "iloc", "[", ":", ",", ":", "3", "]", ".", "apply", "(", "lambda", "x", ":", "\"\\t\"", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "x", "]", ")", ",", "axis", "=", "1", ")", ".", "tolist", "(", ")", "# input type with weight values", "elif", "self", ".", "gene_list", ".", "shape", "[", "1", "]", "==", "2", ":", "genes", "=", "self", ".", "gene_list", ".", "apply", "(", "lambda", "x", ":", "\",\"", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "x", "]", ")", ",", "axis", "=", "1", ")", ".", "tolist", "(", ")", "else", ":", "genes", "=", "self", ".", "gene_list", ".", "squeeze", "(", ")", ".", "tolist", "(", ")", "elif", "isinstance", "(", "self", ".", "gene_list", ",", "pd", ".", "Series", ")", ":", "genes", "=", "self", ".", "gene_list", ".", "squeeze", "(", ")", ".", "tolist", "(", ")", "else", ":", "# get gene lists or bed file, or gene list with weighted values.", "genes", "=", "[", "]", "with", "open", "(", "self", ".", "gene_list", ")", "as", "f", ":", "for", "gene", "in", "f", ":", "genes", ".", "append", "(", "gene", ".", "strip", "(", ")", ")", "self", ".", "_isezid", "=", "all", "(", "map", "(", "self", ".", "_is_entrez_id", ",", "genes", ")", ")", "if", "self", ".", "_isezid", ":", "self", ".", "_gls", "=", "set", "(", "map", "(", "int", ",", "self", ".", "_gls", ")", ")", "else", ":", "self", ".", "_gls", "=", "genes", "return", "'\\n'", ".", "join", "(", "genes", ")" ]
parse gene list
[ "parse", "gene", "list" ]
python
test
MarcMeszaros/envitro
envitro/docker.py
https://github.com/MarcMeszaros/envitro/blob/19e925cd152c08d4db8126542afed35188cafff4/envitro/docker.py#L64-L86
def protocol(alias_name, default=None, allow_none=False): """Get the protocol from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.protocol('DB') tcp """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return _split_docker_link(alias_name)[0] except KeyError as err: if default or allow_none: return default else: raise err
[ "def", "protocol", "(", "alias_name", ",", "default", "=", "None", ",", "allow_none", "=", "False", ")", ":", "warnings", ".", "warn", "(", "'Will be removed in v1.0'", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "try", ":", "return", "_split_docker_link", "(", "alias_name", ")", "[", "0", "]", "except", "KeyError", "as", "err", ":", "if", "default", "or", "allow_none", ":", "return", "default", "else", ":", "raise", "err" ]
Get the protocol from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.protocol('DB') tcp
[ "Get", "the", "protocol", "from", "the", "docker", "link", "alias", "or", "return", "the", "default", "." ]
python
train
sixty-north/cosmic-ray
src/cosmic_ray/cloning.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/cloning.py#L95-L109
def replace_variables(self, text): """Replace variable placeholders in `text` with values from the virtual env. The variables are: - {python-executable} Args: text: The text to do replacment int. Returns: The text after replacement. """ variables = { 'python-executable': str(self._venv_path / 'bin' / 'python') } return text.format(**variables)
[ "def", "replace_variables", "(", "self", ",", "text", ")", ":", "variables", "=", "{", "'python-executable'", ":", "str", "(", "self", ".", "_venv_path", "/", "'bin'", "/", "'python'", ")", "}", "return", "text", ".", "format", "(", "*", "*", "variables", ")" ]
Replace variable placeholders in `text` with values from the virtual env. The variables are: - {python-executable} Args: text: The text to do replacment int. Returns: The text after replacement.
[ "Replace", "variable", "placeholders", "in", "text", "with", "values", "from", "the", "virtual", "env", "." ]
python
train
PyCQA/pylint
pylint/extensions/_check_docs_utils.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/extensions/_check_docs_utils.py#L60-L81
def get_setters_property(node): """Get the property node for the given setter node. :param node: The node to get the property for. :type node: astroid.FunctionDef :rtype: astroid.FunctionDef or None :returns: The node relating to the property of the given setter node, or None if one could not be found. """ property_ = None property_name = get_setters_property_name(node) class_node = utils.node_frame_class(node) if property_name and class_node: class_attrs = class_node.getattr(node.name) for attr in class_attrs: if utils.decorated_with_property(attr): property_ = attr break return property_
[ "def", "get_setters_property", "(", "node", ")", ":", "property_", "=", "None", "property_name", "=", "get_setters_property_name", "(", "node", ")", "class_node", "=", "utils", ".", "node_frame_class", "(", "node", ")", "if", "property_name", "and", "class_node", ":", "class_attrs", "=", "class_node", ".", "getattr", "(", "node", ".", "name", ")", "for", "attr", "in", "class_attrs", ":", "if", "utils", ".", "decorated_with_property", "(", "attr", ")", ":", "property_", "=", "attr", "break", "return", "property_" ]
Get the property node for the given setter node. :param node: The node to get the property for. :type node: astroid.FunctionDef :rtype: astroid.FunctionDef or None :returns: The node relating to the property of the given setter node, or None if one could not be found.
[ "Get", "the", "property", "node", "for", "the", "given", "setter", "node", "." ]
python
test
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py#L289-L322
def timedelta(self, start, end, start_key=min, end_key=max): """compute the difference between two sets of timestamps The default behavior is to use the earliest of the first and the latest of the second list, but this can be changed by passing a different Parameters ---------- start : one or more datetime objects (e.g. ar.submitted) end : one or more datetime objects (e.g. ar.received) start_key : callable Function to call on `start` to extract the relevant entry [defalt: min] end_key : callable Function to call on `end` to extract the relevant entry [default: max] Returns ------- dt : float The time elapsed (in seconds) between the two selected timestamps. """ if not isinstance(start, datetime): # handle single_result AsyncResults, where ar.stamp is single object, # not a list start = start_key(start) if not isinstance(end, datetime): # handle single_result AsyncResults, where ar.stamp is single object, # not a list end = end_key(end) return _total_seconds(end - start)
[ "def", "timedelta", "(", "self", ",", "start", ",", "end", ",", "start_key", "=", "min", ",", "end_key", "=", "max", ")", ":", "if", "not", "isinstance", "(", "start", ",", "datetime", ")", ":", "# handle single_result AsyncResults, where ar.stamp is single object,", "# not a list", "start", "=", "start_key", "(", "start", ")", "if", "not", "isinstance", "(", "end", ",", "datetime", ")", ":", "# handle single_result AsyncResults, where ar.stamp is single object,", "# not a list", "end", "=", "end_key", "(", "end", ")", "return", "_total_seconds", "(", "end", "-", "start", ")" ]
compute the difference between two sets of timestamps The default behavior is to use the earliest of the first and the latest of the second list, but this can be changed by passing a different Parameters ---------- start : one or more datetime objects (e.g. ar.submitted) end : one or more datetime objects (e.g. ar.received) start_key : callable Function to call on `start` to extract the relevant entry [defalt: min] end_key : callable Function to call on `end` to extract the relevant entry [default: max] Returns ------- dt : float The time elapsed (in seconds) between the two selected timestamps.
[ "compute", "the", "difference", "between", "two", "sets", "of", "timestamps", "The", "default", "behavior", "is", "to", "use", "the", "earliest", "of", "the", "first", "and", "the", "latest", "of", "the", "second", "list", "but", "this", "can", "be", "changed", "by", "passing", "a", "different", "Parameters", "----------", "start", ":", "one", "or", "more", "datetime", "objects", "(", "e", ".", "g", ".", "ar", ".", "submitted", ")", "end", ":", "one", "or", "more", "datetime", "objects", "(", "e", ".", "g", ".", "ar", ".", "received", ")", "start_key", ":", "callable", "Function", "to", "call", "on", "start", "to", "extract", "the", "relevant", "entry", "[", "defalt", ":", "min", "]", "end_key", ":", "callable", "Function", "to", "call", "on", "end", "to", "extract", "the", "relevant", "entry", "[", "default", ":", "max", "]", "Returns", "-------", "dt", ":", "float", "The", "time", "elapsed", "(", "in", "seconds", ")", "between", "the", "two", "selected", "timestamps", "." ]
python
test
markfinger/assembla
assembla/api.py
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L471-L481
def user(self, extra_params=None): """ The User currently assigned to the Ticket """ if self.get('assigned_to_id', None): users = self.space.users( id=self['assigned_to_id'], extra_params=extra_params ) if users: return users[0]
[ "def", "user", "(", "self", ",", "extra_params", "=", "None", ")", ":", "if", "self", ".", "get", "(", "'assigned_to_id'", ",", "None", ")", ":", "users", "=", "self", ".", "space", ".", "users", "(", "id", "=", "self", "[", "'assigned_to_id'", "]", ",", "extra_params", "=", "extra_params", ")", "if", "users", ":", "return", "users", "[", "0", "]" ]
The User currently assigned to the Ticket
[ "The", "User", "currently", "assigned", "to", "the", "Ticket" ]
python
train
log2timeline/plaso
plaso/cli/pinfo_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/pinfo_tool.py#L657-L703
def ParseOptions(self, options): """Parses the options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid. """ self._ParseInformationalOptions(options) self._verbose = getattr(options, 'verbose', False) self._output_filename = getattr(options, 'write', None) argument_helper_names = ['process_resources', 'storage_file'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) # TODO: move check into _CheckStorageFile. if not self._storage_file_path: raise errors.BadConfigOption('Missing storage file option.') if not os.path.isfile(self._storage_file_path): raise errors.BadConfigOption( 'No such storage file: {0:s}.'.format(self._storage_file_path)) compare_storage_file_path = self.ParseStringOption( options, 'compare_storage_file') if compare_storage_file_path: if not os.path.isfile(compare_storage_file_path): raise errors.BadConfigOption( 'No such storage file: {0:s}.'.format(compare_storage_file_path)) self._compare_storage_file_path = compare_storage_file_path self.compare_storage_information = True self._output_format = self.ParseStringOption(options, 'output_format') if self._output_filename: if os.path.exists(self._output_filename): raise errors.BadConfigOption( 'Output file already exists: {0:s}.'.format(self._output_filename)) output_file_object = open(self._output_filename, 'wb') self._output_writer = tools.FileObjectOutputWriter(output_file_object) self._EnforceProcessMemoryLimit(self._process_memory_limit)
[ "def", "ParseOptions", "(", "self", ",", "options", ")", ":", "self", ".", "_ParseInformationalOptions", "(", "options", ")", "self", ".", "_verbose", "=", "getattr", "(", "options", ",", "'verbose'", ",", "False", ")", "self", ".", "_output_filename", "=", "getattr", "(", "options", ",", "'write'", ",", "None", ")", "argument_helper_names", "=", "[", "'process_resources'", ",", "'storage_file'", "]", "helpers_manager", ".", "ArgumentHelperManager", ".", "ParseOptions", "(", "options", ",", "self", ",", "names", "=", "argument_helper_names", ")", "# TODO: move check into _CheckStorageFile.", "if", "not", "self", ".", "_storage_file_path", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Missing storage file option.'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "_storage_file_path", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'No such storage file: {0:s}.'", ".", "format", "(", "self", ".", "_storage_file_path", ")", ")", "compare_storage_file_path", "=", "self", ".", "ParseStringOption", "(", "options", ",", "'compare_storage_file'", ")", "if", "compare_storage_file_path", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "compare_storage_file_path", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'No such storage file: {0:s}.'", ".", "format", "(", "compare_storage_file_path", ")", ")", "self", ".", "_compare_storage_file_path", "=", "compare_storage_file_path", "self", ".", "compare_storage_information", "=", "True", "self", ".", "_output_format", "=", "self", ".", "ParseStringOption", "(", "options", ",", "'output_format'", ")", "if", "self", ".", "_output_filename", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_output_filename", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Output file already exists: {0:s}.'", ".", "format", "(", "self", ".", "_output_filename", ")", ")", "output_file_object", "=", "open", "(", "self", ".", "_output_filename", ",", "'wb'", ")", "self", ".", "_output_writer", "=", "tools", ".", "FileObjectOutputWriter", "(", "output_file_object", ")", "self", ".", "_EnforceProcessMemoryLimit", "(", "self", ".", "_process_memory_limit", ")" ]
Parses the options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
[ "Parses", "the", "options", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L3345-L3352
def JNG(cpu, target): """ Jumps short if not greater. :param cpu: current CPU. :param target: destination operand. """ cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.OR(cpu.ZF, cpu.SF != cpu.OF), target.read(), cpu.PC)
[ "def", "JNG", "(", "cpu", ",", "target", ")", ":", "cpu", ".", "PC", "=", "Operators", ".", "ITEBV", "(", "cpu", ".", "address_bit_size", ",", "Operators", ".", "OR", "(", "cpu", ".", "ZF", ",", "cpu", ".", "SF", "!=", "cpu", ".", "OF", ")", ",", "target", ".", "read", "(", ")", ",", "cpu", ".", "PC", ")" ]
Jumps short if not greater. :param cpu: current CPU. :param target: destination operand.
[ "Jumps", "short", "if", "not", "greater", "." ]
python
valid
wummel/patool
patoolib/programs/arc.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/arc.py#L26-L34
def list_arc (archive, compression, cmd, verbosity, interactive): """List a ARC archive.""" cmdlist = [cmd] if verbosity > 1: cmdlist.append('v') else: cmdlist.append('l') cmdlist.append(archive) return cmdlist
[ "def", "list_arc", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ")", ":", "cmdlist", "=", "[", "cmd", "]", "if", "verbosity", ">", "1", ":", "cmdlist", ".", "append", "(", "'v'", ")", "else", ":", "cmdlist", ".", "append", "(", "'l'", ")", "cmdlist", ".", "append", "(", "archive", ")", "return", "cmdlist" ]
List a ARC archive.
[ "List", "a", "ARC", "archive", "." ]
python
train
rsalmei/clearly
clearly/event_core/streaming_dispatcher.py
https://github.com/rsalmei/clearly/blob/fd784843d13f0fed28fc192565bec3668f1363f4/clearly/event_core/streaming_dispatcher.py#L67-L76
def __stop(self): # pragma: no cover """Stops the background engine.""" if not self.dispatcher_thread: return logger.info('Stopping dispatcher') self.running = False # graceful shutdown self.dispatcher_thread.join() self.dispatcher_thread = None
[ "def", "__stop", "(", "self", ")", ":", "# pragma: no cover", "if", "not", "self", ".", "dispatcher_thread", ":", "return", "logger", ".", "info", "(", "'Stopping dispatcher'", ")", "self", ".", "running", "=", "False", "# graceful shutdown", "self", ".", "dispatcher_thread", ".", "join", "(", ")", "self", ".", "dispatcher_thread", "=", "None" ]
Stops the background engine.
[ "Stops", "the", "background", "engine", "." ]
python
train
joke2k/faker
faker/providers/date_time/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/date_time/__init__.py#L1905-L1941
def time_series( self, start_date='-30d', end_date='now', precision=None, distrib=None, tzinfo=None): """ Returns a generator yielding tuples of ``(<datetime>, <value>)``. The data points will start at ``start_date``, and be at every time interval specified by ``precision``. ``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>`` """ start_date = self._parse_date_time(start_date, tzinfo=tzinfo) end_date = self._parse_date_time(end_date, tzinfo=tzinfo) if end_date < start_date: raise ValueError("`end_date` must be greater than `start_date`.") if precision is None: precision = (end_date - start_date) / 30 precision = self._parse_timedelta(precision) if distrib is None: def distrib(dt): return self.generator.random.uniform(0, precision) # noqa if not callable(distrib): raise ValueError( "`distrib` must be a callable. Got {} instead.".format(distrib)) datapoint = start_date while datapoint < end_date: dt = timestamp_to_datetime(datapoint, tzinfo) datapoint += precision yield (dt, distrib(dt))
[ "def", "time_series", "(", "self", ",", "start_date", "=", "'-30d'", ",", "end_date", "=", "'now'", ",", "precision", "=", "None", ",", "distrib", "=", "None", ",", "tzinfo", "=", "None", ")", ":", "start_date", "=", "self", ".", "_parse_date_time", "(", "start_date", ",", "tzinfo", "=", "tzinfo", ")", "end_date", "=", "self", ".", "_parse_date_time", "(", "end_date", ",", "tzinfo", "=", "tzinfo", ")", "if", "end_date", "<", "start_date", ":", "raise", "ValueError", "(", "\"`end_date` must be greater than `start_date`.\"", ")", "if", "precision", "is", "None", ":", "precision", "=", "(", "end_date", "-", "start_date", ")", "/", "30", "precision", "=", "self", ".", "_parse_timedelta", "(", "precision", ")", "if", "distrib", "is", "None", ":", "def", "distrib", "(", "dt", ")", ":", "return", "self", ".", "generator", ".", "random", ".", "uniform", "(", "0", ",", "precision", ")", "# noqa", "if", "not", "callable", "(", "distrib", ")", ":", "raise", "ValueError", "(", "\"`distrib` must be a callable. Got {} instead.\"", ".", "format", "(", "distrib", ")", ")", "datapoint", "=", "start_date", "while", "datapoint", "<", "end_date", ":", "dt", "=", "timestamp_to_datetime", "(", "datapoint", ",", "tzinfo", ")", "datapoint", "+=", "precision", "yield", "(", "dt", ",", "distrib", "(", "dt", ")", ")" ]
Returns a generator yielding tuples of ``(<datetime>, <value>)``. The data points will start at ``start_date``, and be at every time interval specified by ``precision``. ``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>``
[ "Returns", "a", "generator", "yielding", "tuples", "of", "(", "<datetime", ">", "<value", ">", ")", "." ]
python
train
veeti/decent
decent/validators.py
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L63-L73
def Default(default): """ Creates a validator callable that replaces ``None`` with the specified default value. """ @wraps(Default) def built(value): if value == None: return default return value return built
[ "def", "Default", "(", "default", ")", ":", "@", "wraps", "(", "Default", ")", "def", "built", "(", "value", ")", ":", "if", "value", "==", "None", ":", "return", "default", "return", "value", "return", "built" ]
Creates a validator callable that replaces ``None`` with the specified default value.
[ "Creates", "a", "validator", "callable", "that", "replaces", "None", "with", "the", "specified", "default", "value", "." ]
python
train
boakley/robotframework-hub
rfhub/kwdb.py
https://github.com/boakley/robotframework-hub/blob/f3dc7562fe6218a7b8d7aac7b9ef234e1a573f7c/rfhub/kwdb.py#L377-L414
def search(self, pattern="*", mode="both"): """Perform a pattern-based search on keyword names and documentation The pattern matching is insensitive to case. The function returns a list of tuples of the form library_id, library_name, keyword_name, keyword_synopsis, sorted by library id, library name, and then keyword name If a pattern begins with "name:", only the keyword names will be searched. Otherwise, the pattern is searched for in both the name and keyword documentation. You can limit the search to a single library by specifying "in:" followed by the name of the library or resource file. For example, "screenshot in:Selenium2Library" will only search for the word 'screenshot' in the Selenium2Library. """ pattern = self._glob_to_sql(pattern) COND = "(keyword.name like ? OR keyword.doc like ?)" args = [pattern, pattern] if mode == "name": COND = "(keyword.name like ?)" args = [pattern,] sql = """SELECT collection.collection_id, collection.name, keyword.name, keyword.doc FROM collection_table as collection JOIN keyword_table as keyword WHERE collection.collection_id == keyword.collection_id AND %s ORDER by collection.collection_id, collection.name, keyword.name """ % COND cursor = self._execute(sql, args) result = [(row[0], row[1], row[2], row[3].strip().split("\n")[0]) for row in cursor.fetchall()] return list(set(result))
[ "def", "search", "(", "self", ",", "pattern", "=", "\"*\"", ",", "mode", "=", "\"both\"", ")", ":", "pattern", "=", "self", ".", "_glob_to_sql", "(", "pattern", ")", "COND", "=", "\"(keyword.name like ? OR keyword.doc like ?)\"", "args", "=", "[", "pattern", ",", "pattern", "]", "if", "mode", "==", "\"name\"", ":", "COND", "=", "\"(keyword.name like ?)\"", "args", "=", "[", "pattern", ",", "]", "sql", "=", "\"\"\"SELECT collection.collection_id, collection.name, keyword.name, keyword.doc\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND %s\n ORDER by collection.collection_id, collection.name, keyword.name\n \"\"\"", "%", "COND", "cursor", "=", "self", ".", "_execute", "(", "sql", ",", "args", ")", "result", "=", "[", "(", "row", "[", "0", "]", ",", "row", "[", "1", "]", ",", "row", "[", "2", "]", ",", "row", "[", "3", "]", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "[", "0", "]", ")", "for", "row", "in", "cursor", ".", "fetchall", "(", ")", "]", "return", "list", "(", "set", "(", "result", ")", ")" ]
Perform a pattern-based search on keyword names and documentation The pattern matching is insensitive to case. The function returns a list of tuples of the form library_id, library_name, keyword_name, keyword_synopsis, sorted by library id, library name, and then keyword name If a pattern begins with "name:", only the keyword names will be searched. Otherwise, the pattern is searched for in both the name and keyword documentation. You can limit the search to a single library by specifying "in:" followed by the name of the library or resource file. For example, "screenshot in:Selenium2Library" will only search for the word 'screenshot' in the Selenium2Library.
[ "Perform", "a", "pattern", "-", "based", "search", "on", "keyword", "names", "and", "documentation" ]
python
train
ahopkins/sanic-jwt
example/custom_authentication_cls_complex.py
https://github.com/ahopkins/sanic-jwt/blob/fca7750499c8cedde823d778512f613777fb5282/example/custom_authentication_cls_complex.py#L65-L132
def _verify( self, request, return_payload=False, verify=True, raise_missing=False, request_args=None, request_kwargs=None, *args, **kwargs ): """ If there is a "permakey", then we will verify the token by checking the database. Otherwise, just do the normal verification. Typically, any method that begins with an underscore in sanic-jwt should not be touched. In this case, we are trying to break the rules a bit to handle a unique use case: handle both expirable and non-expirable tokens. """ if "permakey" in request.headers: # Extract the permakey from the headers permakey = request.headers.get("permakey") # In production, probably should have some exception handling Here # in case the permakey is an empty string or some other bad value payload = self._decode(permakey, verify=verify) # Sometimes, the application will call _verify(...return_payload=True) # So, let's make sure to handle this scenario. if return_payload: return payload # Retrieve the user from the database user_id = payload.get("user_id", None) user = userid_table.get(user_id) # If wer cannot find a user, then this method should return # is_valid == False # reason == some text for why # status == some status code, probably a 401 if not user_id or not user: is_valid = False reason = "No user found" status = 401 else: # After finding a user, make sure the permakey matches, # or else return a bad status or some other error. # In production, both this scenario, and the above "No user found" # scenario should return an identical message and status code. # This is to prevent your application accidentally # leaking information about the existence or non-existence of users. is_valid = user.permakey == permakey reason = None if is_valid else "Permakey mismatch" status = 200 if is_valid else 401 return is_valid, status, reason else: return super()._verify( request=request, return_payload=return_payload, verify=verify, raise_missing=raise_missing, request_args=request_args, request_kwargs=request_kwargs, *args, **kwargs )
[ "def", "_verify", "(", "self", ",", "request", ",", "return_payload", "=", "False", ",", "verify", "=", "True", ",", "raise_missing", "=", "False", ",", "request_args", "=", "None", ",", "request_kwargs", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"permakey\"", "in", "request", ".", "headers", ":", "# Extract the permakey from the headers", "permakey", "=", "request", ".", "headers", ".", "get", "(", "\"permakey\"", ")", "# In production, probably should have some exception handling Here", "# in case the permakey is an empty string or some other bad value", "payload", "=", "self", ".", "_decode", "(", "permakey", ",", "verify", "=", "verify", ")", "# Sometimes, the application will call _verify(...return_payload=True)", "# So, let's make sure to handle this scenario.", "if", "return_payload", ":", "return", "payload", "# Retrieve the user from the database", "user_id", "=", "payload", ".", "get", "(", "\"user_id\"", ",", "None", ")", "user", "=", "userid_table", ".", "get", "(", "user_id", ")", "# If wer cannot find a user, then this method should return", "# is_valid == False", "# reason == some text for why", "# status == some status code, probably a 401", "if", "not", "user_id", "or", "not", "user", ":", "is_valid", "=", "False", "reason", "=", "\"No user found\"", "status", "=", "401", "else", ":", "# After finding a user, make sure the permakey matches,", "# or else return a bad status or some other error.", "# In production, both this scenario, and the above \"No user found\"", "# scenario should return an identical message and status code.", "# This is to prevent your application accidentally", "# leaking information about the existence or non-existence of users.", "is_valid", "=", "user", ".", "permakey", "==", "permakey", "reason", "=", "None", "if", "is_valid", "else", "\"Permakey mismatch\"", "status", "=", "200", "if", "is_valid", "else", "401", "return", "is_valid", ",", "status", ",", "reason", "else", ":", "return", "super", "(", ")", ".", "_verify", "(", "request", "=", "request", ",", "return_payload", "=", "return_payload", ",", "verify", "=", "verify", ",", "raise_missing", "=", "raise_missing", ",", "request_args", "=", "request_args", ",", "request_kwargs", "=", "request_kwargs", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
If there is a "permakey", then we will verify the token by checking the database. Otherwise, just do the normal verification. Typically, any method that begins with an underscore in sanic-jwt should not be touched. In this case, we are trying to break the rules a bit to handle a unique use case: handle both expirable and non-expirable tokens.
[ "If", "there", "is", "a", "permakey", "then", "we", "will", "verify", "the", "token", "by", "checking", "the", "database", ".", "Otherwise", "just", "do", "the", "normal", "verification", "." ]
python
train
cmcqueen/simplerandom
python/python2/simplerandom/iterators/_iterators_py.py
https://github.com/cmcqueen/simplerandom/blob/3f19ffdfeaa8256986adf7173f08c1c719164d01/python/python2/simplerandom/iterators/_iterators_py.py#L40-L59
def _repeat_iter(input_iter): """Iterate over the input iter values. Then repeat the last value indefinitely. This is useful to repeat seed values when an insufficient number of seeds are provided. E.g. KISS(1) effectively becomes KISS(1, 1, 1, 1), rather than (if we just used default values) KISS(1, default-value, default-value, default-value) It is better to repeat the last seed value, rather than just using default values. Given two generators seeded with an insufficient number of seeds, repeating the last seed value means their states are more different from each other, with less correlation between their generated outputs. """ last_value = None for value in input_iter: last_value = value yield value if last_value is not None: while True: yield last_value
[ "def", "_repeat_iter", "(", "input_iter", ")", ":", "last_value", "=", "None", "for", "value", "in", "input_iter", ":", "last_value", "=", "value", "yield", "value", "if", "last_value", "is", "not", "None", ":", "while", "True", ":", "yield", "last_value" ]
Iterate over the input iter values. Then repeat the last value indefinitely. This is useful to repeat seed values when an insufficient number of seeds are provided. E.g. KISS(1) effectively becomes KISS(1, 1, 1, 1), rather than (if we just used default values) KISS(1, default-value, default-value, default-value) It is better to repeat the last seed value, rather than just using default values. Given two generators seeded with an insufficient number of seeds, repeating the last seed value means their states are more different from each other, with less correlation between their generated outputs.
[ "Iterate", "over", "the", "input", "iter", "values", ".", "Then", "repeat", "the", "last", "value", "indefinitely", ".", "This", "is", "useful", "to", "repeat", "seed", "values", "when", "an", "insufficient", "number", "of", "seeds", "are", "provided", "." ]
python
train
bram85/topydo
topydo/lib/Filter.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/Filter.py#L232-L250
def compare_operands(self, p_operand1, p_operand2): """ Returns True if conditional constructed from both operands and self.operator is valid. Returns False otherwise. """ if self.operator == '<': return p_operand1 < p_operand2 elif self.operator == '<=': return p_operand1 <= p_operand2 elif self.operator == '=': return p_operand1 == p_operand2 elif self.operator == '>=': return p_operand1 >= p_operand2 elif self.operator == '>': return p_operand1 > p_operand2 elif self.operator == '!': return p_operand1 != p_operand2 return False
[ "def", "compare_operands", "(", "self", ",", "p_operand1", ",", "p_operand2", ")", ":", "if", "self", ".", "operator", "==", "'<'", ":", "return", "p_operand1", "<", "p_operand2", "elif", "self", ".", "operator", "==", "'<='", ":", "return", "p_operand1", "<=", "p_operand2", "elif", "self", ".", "operator", "==", "'='", ":", "return", "p_operand1", "==", "p_operand2", "elif", "self", ".", "operator", "==", "'>='", ":", "return", "p_operand1", ">=", "p_operand2", "elif", "self", ".", "operator", "==", "'>'", ":", "return", "p_operand1", ">", "p_operand2", "elif", "self", ".", "operator", "==", "'!'", ":", "return", "p_operand1", "!=", "p_operand2", "return", "False" ]
Returns True if conditional constructed from both operands and self.operator is valid. Returns False otherwise.
[ "Returns", "True", "if", "conditional", "constructed", "from", "both", "operands", "and", "self", ".", "operator", "is", "valid", ".", "Returns", "False", "otherwise", "." ]
python
train
cedricbonhomme/Stegano
stegano/tools.py
https://github.com/cedricbonhomme/Stegano/blob/502e6303791d348e479290c22108551ba3be254f/stegano/tools.py#L96-L103
def binary2base64(binary_file: str) -> str: """Convert a binary file (OGG, executable, etc.) to a printable string. """ # Use mode = "rb" to read binary file with open(binary_file, "rb") as bin_file: encoded_string = base64.b64encode(bin_file.read()) return encoded_string.decode()
[ "def", "binary2base64", "(", "binary_file", ":", "str", ")", "->", "str", ":", "# Use mode = \"rb\" to read binary file", "with", "open", "(", "binary_file", ",", "\"rb\"", ")", "as", "bin_file", ":", "encoded_string", "=", "base64", ".", "b64encode", "(", "bin_file", ".", "read", "(", ")", ")", "return", "encoded_string", ".", "decode", "(", ")" ]
Convert a binary file (OGG, executable, etc.) to a printable string.
[ "Convert", "a", "binary", "file", "(", "OGG", "executable", "etc", ".", ")", "to", "a", "printable", "string", "." ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L1507-L1513
def unpickle(self, parent): """Sets the parent pointer references for the module *and* all of its child classes that also have pointer references.""" self.parent = parent self._unpickle_collection(self.members) self._unpickle_collection(self.executables) self._unpickle_collection(self.types)
[ "def", "unpickle", "(", "self", ",", "parent", ")", ":", "self", ".", "parent", "=", "parent", "self", ".", "_unpickle_collection", "(", "self", ".", "members", ")", "self", ".", "_unpickle_collection", "(", "self", ".", "executables", ")", "self", ".", "_unpickle_collection", "(", "self", ".", "types", ")" ]
Sets the parent pointer references for the module *and* all of its child classes that also have pointer references.
[ "Sets", "the", "parent", "pointer", "references", "for", "the", "module", "*", "and", "*", "all", "of", "its", "child", "classes", "that", "also", "have", "pointer", "references", "." ]
python
train
whiteclover/dbpy
samples/orm.py
https://github.com/whiteclover/dbpy/blob/3d9ce85f55cfb39cced22081e525f79581b26b3a/samples/orm.py#L46-L54
def find(self, uid): """Find and load the user from database by uid(user id)""" data = (db.select(self.table).select('username', 'email', 'real_name', 'password', 'bio', 'status', 'role', 'uid'). condition('uid', uid).execute() ) if data: logger.info('data %s', data) return self.load(data[0], self.model)
[ "def", "find", "(", "self", ",", "uid", ")", ":", "data", "=", "(", "db", ".", "select", "(", "self", ".", "table", ")", ".", "select", "(", "'username'", ",", "'email'", ",", "'real_name'", ",", "'password'", ",", "'bio'", ",", "'status'", ",", "'role'", ",", "'uid'", ")", ".", "condition", "(", "'uid'", ",", "uid", ")", ".", "execute", "(", ")", ")", "if", "data", ":", "logger", ".", "info", "(", "'data %s'", ",", "data", ")", "return", "self", ".", "load", "(", "data", "[", "0", "]", ",", "self", ".", "model", ")" ]
Find and load the user from database by uid(user id)
[ "Find", "and", "load", "the", "user", "from", "database", "by", "uid", "(", "user", "id", ")" ]
python
train
lsbardel/python-stdnet
stdnet/odm/models.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L97-L102
def clear_cache_fields(self): '''Set cache fields to ``None``. Check :attr:`Field.as_cache` for information regarding fields which are considered cache.''' for field in self._meta.scalarfields: if field.as_cache: setattr(self, field.name, None)
[ "def", "clear_cache_fields", "(", "self", ")", ":", "for", "field", "in", "self", ".", "_meta", ".", "scalarfields", ":", "if", "field", ".", "as_cache", ":", "setattr", "(", "self", ",", "field", ".", "name", ",", "None", ")" ]
Set cache fields to ``None``. Check :attr:`Field.as_cache` for information regarding fields which are considered cache.
[ "Set", "cache", "fields", "to", "None", ".", "Check", ":", "attr", ":", "Field", ".", "as_cache", "for", "information", "regarding", "fields", "which", "are", "considered", "cache", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mwcc.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mwcc.py#L155-L194
def generate(env): """Add Builders and construction variables for the mwcc to an Environment.""" import SCons.Defaults import SCons.Tool set_vars(env) static_obj, shared_obj = SCons.Tool.createObjBuilders(env) for suffix in CSuffixes: static_obj.add_action(suffix, SCons.Defaults.CAction) shared_obj.add_action(suffix, SCons.Defaults.ShCAction) for suffix in CXXSuffixes: static_obj.add_action(suffix, SCons.Defaults.CXXAction) shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction) env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES' env['CC'] = 'mwcc' env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS' env['CXX'] = 'mwcc' env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS' env['SHCC'] = '$CC' env['SHCCFLAGS'] = '$CCFLAGS' env['SHCFLAGS'] = '$CFLAGS' env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS' env['SHCXX'] = '$CXX' env['SHCXXFLAGS'] = '$CXXFLAGS' env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS' env['CFILESUFFIX'] = '.c' env['CXXFILESUFFIX'] = '.cpp' env['CPPDEFPREFIX'] = '-D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '-I' env['INCSUFFIX'] = ''
[ "def", "generate", "(", "env", ")", ":", "import", "SCons", ".", "Defaults", "import", "SCons", ".", "Tool", "set_vars", "(", "env", ")", "static_obj", ",", "shared_obj", "=", "SCons", ".", "Tool", ".", "createObjBuilders", "(", "env", ")", "for", "suffix", "in", "CSuffixes", ":", "static_obj", ".", "add_action", "(", "suffix", ",", "SCons", ".", "Defaults", ".", "CAction", ")", "shared_obj", ".", "add_action", "(", "suffix", ",", "SCons", ".", "Defaults", ".", "ShCAction", ")", "for", "suffix", "in", "CXXSuffixes", ":", "static_obj", ".", "add_action", "(", "suffix", ",", "SCons", ".", "Defaults", ".", "CXXAction", ")", "shared_obj", ".", "add_action", "(", "suffix", ",", "SCons", ".", "Defaults", ".", "ShCXXAction", ")", "env", "[", "'CCCOMFLAGS'", "]", "=", "'$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'", "env", "[", "'CC'", "]", "=", "'mwcc'", "env", "[", "'CCCOM'", "]", "=", "'$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'", "env", "[", "'CXX'", "]", "=", "'mwcc'", "env", "[", "'CXXCOM'", "]", "=", "'$CXX $CXXFLAGS $CCCOMFLAGS'", "env", "[", "'SHCC'", "]", "=", "'$CC'", "env", "[", "'SHCCFLAGS'", "]", "=", "'$CCFLAGS'", "env", "[", "'SHCFLAGS'", "]", "=", "'$CFLAGS'", "env", "[", "'SHCCCOM'", "]", "=", "'$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'", "env", "[", "'SHCXX'", "]", "=", "'$CXX'", "env", "[", "'SHCXXFLAGS'", "]", "=", "'$CXXFLAGS'", "env", "[", "'SHCXXCOM'", "]", "=", "'$SHCXX $SHCXXFLAGS $CCCOMFLAGS'", "env", "[", "'CFILESUFFIX'", "]", "=", "'.c'", "env", "[", "'CXXFILESUFFIX'", "]", "=", "'.cpp'", "env", "[", "'CPPDEFPREFIX'", "]", "=", "'-D'", "env", "[", "'CPPDEFSUFFIX'", "]", "=", "''", "env", "[", "'INCPREFIX'", "]", "=", "'-I'", "env", "[", "'INCSUFFIX'", "]", "=", "''" ]
Add Builders and construction variables for the mwcc to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "the", "mwcc", "to", "an", "Environment", "." ]
python
train
tyarkoni/pliers
pliers/utils/base.py
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/utils/base.py#L45-L52
def batch_iterable(l, n): ''' Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery''' i = iter(l) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
[ "def", "batch_iterable", "(", "l", ",", "n", ")", ":", "i", "=", "iter", "(", "l", ")", "piece", "=", "list", "(", "islice", "(", "i", ",", "n", ")", ")", "while", "piece", ":", "yield", "piece", "piece", "=", "list", "(", "islice", "(", "i", ",", "n", ")", ")" ]
Chunks iterable into n sized batches Solution from: http://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
[ "Chunks", "iterable", "into", "n", "sized", "batches", "Solution", "from", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "1915170", "/", "split", "-", "a", "-", "generator", "-", "iterable", "-", "every", "-", "n", "-", "items", "-", "in", "-", "python", "-", "splitevery" ]
python
train
robotools/fontParts
Lib/fontParts/base/info.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/info.py#L257-L286
def interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True): """ Interpolate all pairs between minInfo and maxInfo. The interpolation occurs on a 0 to 1.0 range where minInfo is located at 0 and maxInfo is located at 1.0. factor is the interpolation value. It may be less than 0 and greater than 1.0. It may be a number (integer, float) or a tuple of two numbers. If it is a tuple, the first number indicates the x factor and the second number indicates the y factor. round indicates if the result should be rounded to integers. suppressError indicates if incompatible data should be ignored or if an error should be raised when such incompatibilities are found. """ factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minInfo, BaseInfo): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, minInfo.__class__.__name__)) if not isinstance(maxInfo, BaseInfo): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, maxInfo.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minInfo, maxInfo, round=round, suppressError=suppressError)
[ "def", "interpolate", "(", "self", ",", "factor", ",", "minInfo", ",", "maxInfo", ",", "round", "=", "True", ",", "suppressError", "=", "True", ")", ":", "factor", "=", "normalizers", ".", "normalizeInterpolationFactor", "(", "factor", ")", "if", "not", "isinstance", "(", "minInfo", ",", "BaseInfo", ")", ":", "raise", "TypeError", "(", "(", "\"Interpolation to an instance of %r can not be \"", "\"performed from an instance of %r.\"", ")", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "minInfo", ".", "__class__", ".", "__name__", ")", ")", "if", "not", "isinstance", "(", "maxInfo", ",", "BaseInfo", ")", ":", "raise", "TypeError", "(", "(", "\"Interpolation to an instance of %r can not be \"", "\"performed from an instance of %r.\"", ")", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "maxInfo", ".", "__class__", ".", "__name__", ")", ")", "round", "=", "normalizers", ".", "normalizeBoolean", "(", "round", ")", "suppressError", "=", "normalizers", ".", "normalizeBoolean", "(", "suppressError", ")", "self", ".", "_interpolate", "(", "factor", ",", "minInfo", ",", "maxInfo", ",", "round", "=", "round", ",", "suppressError", "=", "suppressError", ")" ]
Interpolate all pairs between minInfo and maxInfo. The interpolation occurs on a 0 to 1.0 range where minInfo is located at 0 and maxInfo is located at 1.0. factor is the interpolation value. It may be less than 0 and greater than 1.0. It may be a number (integer, float) or a tuple of two numbers. If it is a tuple, the first number indicates the x factor and the second number indicates the y factor. round indicates if the result should be rounded to integers. suppressError indicates if incompatible data should be ignored or if an error should be raised when such incompatibilities are found.
[ "Interpolate", "all", "pairs", "between", "minInfo", "and", "maxInfo", ".", "The", "interpolation", "occurs", "on", "a", "0", "to", "1", ".", "0", "range", "where", "minInfo", "is", "located", "at", "0", "and", "maxInfo", "is", "located", "at", "1", ".", "0", "." ]
python
train
IvanMalison/okcupyd
okcupyd/messaging.py
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/messaging.py#L133-L156
def content(self): """ :returns: The text body of the message. """ # The code that follows is obviously pretty disgusting. # It seems like it might be impossible to completely replicate # the text of the original message if it has trailing whitespace message = self._content_xpb.one_(self._message_element) first_line = message.text if message.text[:2] == ' ': first_line = message.text[2:] else: log.debug("message did not have expected leading whitespace") subsequent_lines = ''.join([ html.tostring(child, encoding='unicode').replace('<br>', '\n') for child in message.iterchildren() ]) message_text = first_line + subsequent_lines if len(message_text) > 0 and message_text[-1] == ' ': message_text = message_text[:-1] else: log.debug("message did not have expected leading whitespace") return message_text
[ "def", "content", "(", "self", ")", ":", "# The code that follows is obviously pretty disgusting.", "# It seems like it might be impossible to completely replicate", "# the text of the original message if it has trailing whitespace", "message", "=", "self", ".", "_content_xpb", ".", "one_", "(", "self", ".", "_message_element", ")", "first_line", "=", "message", ".", "text", "if", "message", ".", "text", "[", ":", "2", "]", "==", "' '", ":", "first_line", "=", "message", ".", "text", "[", "2", ":", "]", "else", ":", "log", ".", "debug", "(", "\"message did not have expected leading whitespace\"", ")", "subsequent_lines", "=", "''", ".", "join", "(", "[", "html", ".", "tostring", "(", "child", ",", "encoding", "=", "'unicode'", ")", ".", "replace", "(", "'<br>'", ",", "'\\n'", ")", "for", "child", "in", "message", ".", "iterchildren", "(", ")", "]", ")", "message_text", "=", "first_line", "+", "subsequent_lines", "if", "len", "(", "message_text", ")", ">", "0", "and", "message_text", "[", "-", "1", "]", "==", "' '", ":", "message_text", "=", "message_text", "[", ":", "-", "1", "]", "else", ":", "log", ".", "debug", "(", "\"message did not have expected leading whitespace\"", ")", "return", "message_text" ]
:returns: The text body of the message.
[ ":", "returns", ":", "The", "text", "body", "of", "the", "message", "." ]
python
train
Zsailer/kubeconf
kubeconf/kubeconf.py
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L274-L291
def add_exec_to_user( self, name, env, command, args, **attrs ): """Add an exec option to your user.""" # Add exec option. exec_options = { 'command': command, 'env': env, 'args': args, } exec_options.update(attrs) # Add exec to user. self.add_to_user(name=name, exec=exec_options)
[ "def", "add_exec_to_user", "(", "self", ",", "name", ",", "env", ",", "command", ",", "args", ",", "*", "*", "attrs", ")", ":", "# Add exec option.", "exec_options", "=", "{", "'command'", ":", "command", ",", "'env'", ":", "env", ",", "'args'", ":", "args", ",", "}", "exec_options", ".", "update", "(", "attrs", ")", "# Add exec to user.", "self", ".", "add_to_user", "(", "name", "=", "name", ",", "exec", "=", "exec_options", ")" ]
Add an exec option to your user.
[ "Add", "an", "exec", "option", "to", "your", "user", "." ]
python
train
mwouts/jupytext
jupytext/cell_metadata.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_metadata.py#L98-L113
def update_metadata_from_rmd_options(name, value, metadata): """ Update metadata using the _BOOLEAN_OPTIONS_DICTIONARY mapping :param name: option name :param value: option value :param metadata: :return: """ for jupyter_option, rmd_option, rev in _BOOLEAN_OPTIONS_DICTIONARY: if name == rmd_option: try: metadata[jupyter_option] = _py_logical_values(value) != rev return True except RLogicalValueError: pass return False
[ "def", "update_metadata_from_rmd_options", "(", "name", ",", "value", ",", "metadata", ")", ":", "for", "jupyter_option", ",", "rmd_option", ",", "rev", "in", "_BOOLEAN_OPTIONS_DICTIONARY", ":", "if", "name", "==", "rmd_option", ":", "try", ":", "metadata", "[", "jupyter_option", "]", "=", "_py_logical_values", "(", "value", ")", "!=", "rev", "return", "True", "except", "RLogicalValueError", ":", "pass", "return", "False" ]
Update metadata using the _BOOLEAN_OPTIONS_DICTIONARY mapping :param name: option name :param value: option value :param metadata: :return:
[ "Update", "metadata", "using", "the", "_BOOLEAN_OPTIONS_DICTIONARY", "mapping", ":", "param", "name", ":", "option", "name", ":", "param", "value", ":", "option", "value", ":", "param", "metadata", ":", ":", "return", ":" ]
python
train
gem/oq-engine
openquake/hmtk/strain/shift.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/strain/shift.py#L279-L367
def calculate_activity_rate(self, strain_data, cumulative=False, in_seconds=False): ''' Main function to calculate the activity rate (for each of the magnitudes in target_magnitudes) for all of the cells specified in the input strain model file :param strain_data: Strain model as an instance of :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain :param bool cumulative: Set to true if the cumulative rate is required, False for incremental :param bool in_seconds: Returns the activity rate in seconds (True) or else as an annual activity rate ''' self.strain = strain_data self.strain.target_magnitudes = self.target_magnitudes # Adjust strain rates from annual to seconds (SI) for key in STRAIN_VARIABLES: self.strain.data[key] = self.strain.data[key] / SECS_PER_YEAR if 'region' not in self.strain.data: raise ValueError('Cannot implment SHIFT methodology without ' 'definition of regionalisation') else: self._reclassify_Bird_regions_with_data() # Initially all seismicity rates assigned to background rate self.strain.seismicity_rate = np.tile( self.base_rate, [self.strain.get_number_observations(), 1]) regionalisation_zones = ( np.unique(self.strain.data['region'])).tolist() for region in regionalisation_zones: id0 = self.strain.data['region'] == region if b'IPL' in region: # For intra-plate seismicity everything is refered to # the background rate continue elif b'OSR_special_1' in region: # Special case 1 - normal and transform faulting calculated_rate = self.get_rate_osr_normal_transform( self.threshold_moment, id0) elif b'OSR_special_2' in region: # Special case 2 - convergent and transform faulting calculated_rate = self.get_rate_osr_convergent_transform( self.threshold_moment, id0) else: region = region.decode('utf-8') calculated_rate = \ self.regionalisation[region]['adjustment_factor'] * \ self.continuum_seismicity(self.threshold_moment, self.strain.data['e1h'][id0], self.strain.data['e2h'][id0], self.strain.data['err'][id0], self.regionalisation[region]) for jloc, iloc in enumerate(np.where(id0)[0]): # Where the calculated rate exceeds the base rate then becomes # calculated rate. In this version the magnitudes are treated # independently (i.e. if Rate(M < 7) > Base Rate (M < 7) but # Rate (M > 7) < Base Rate (M > 7) then returned Rate (M < 7) # = Rate (M < 7) and returned Rate (M > 7) = Base Rate (M > 7) id1 = calculated_rate[jloc] > self.base_rate self.strain.seismicity_rate[iloc, id1] = calculated_rate[jloc, id1] if not cumulative and self.number_magnitudes > 1: # Seismicity rates are currently cumulative - need to turn them # into discrete for iloc in range(0, self.number_magnitudes - 1): self.strain.seismicity_rate[:, iloc] = \ self.strain.seismicity_rate[:, iloc] -\ self.strain.seismicity_rate[:, iloc + 1] if not in_seconds: self.strain.seismicity_rate = self.strain.seismicity_rate * \ SECS_PER_YEAR for key in STRAIN_VARIABLES: self.strain.data[key] = self.strain.data[key] * SECS_PER_YEAR
[ "def", "calculate_activity_rate", "(", "self", ",", "strain_data", ",", "cumulative", "=", "False", ",", "in_seconds", "=", "False", ")", ":", "self", ".", "strain", "=", "strain_data", "self", ".", "strain", ".", "target_magnitudes", "=", "self", ".", "target_magnitudes", "# Adjust strain rates from annual to seconds (SI)", "for", "key", "in", "STRAIN_VARIABLES", ":", "self", ".", "strain", ".", "data", "[", "key", "]", "=", "self", ".", "strain", ".", "data", "[", "key", "]", "/", "SECS_PER_YEAR", "if", "'region'", "not", "in", "self", ".", "strain", ".", "data", ":", "raise", "ValueError", "(", "'Cannot implment SHIFT methodology without '", "'definition of regionalisation'", ")", "else", ":", "self", ".", "_reclassify_Bird_regions_with_data", "(", ")", "# Initially all seismicity rates assigned to background rate", "self", ".", "strain", ".", "seismicity_rate", "=", "np", ".", "tile", "(", "self", ".", "base_rate", ",", "[", "self", ".", "strain", ".", "get_number_observations", "(", ")", ",", "1", "]", ")", "regionalisation_zones", "=", "(", "np", ".", "unique", "(", "self", ".", "strain", ".", "data", "[", "'region'", "]", ")", ")", ".", "tolist", "(", ")", "for", "region", "in", "regionalisation_zones", ":", "id0", "=", "self", ".", "strain", ".", "data", "[", "'region'", "]", "==", "region", "if", "b'IPL'", "in", "region", ":", "# For intra-plate seismicity everything is refered to", "# the background rate", "continue", "elif", "b'OSR_special_1'", "in", "region", ":", "# Special case 1 - normal and transform faulting", "calculated_rate", "=", "self", ".", "get_rate_osr_normal_transform", "(", "self", ".", "threshold_moment", ",", "id0", ")", "elif", "b'OSR_special_2'", "in", "region", ":", "# Special case 2 - convergent and transform faulting", "calculated_rate", "=", "self", ".", "get_rate_osr_convergent_transform", "(", "self", ".", "threshold_moment", ",", "id0", ")", "else", ":", "region", "=", "region", ".", "decode", "(", "'utf-8'", ")", "calculated_rate", "=", "self", ".", "regionalisation", "[", "region", "]", "[", "'adjustment_factor'", "]", "*", "self", ".", "continuum_seismicity", "(", "self", ".", "threshold_moment", ",", "self", ".", "strain", ".", "data", "[", "'e1h'", "]", "[", "id0", "]", ",", "self", ".", "strain", ".", "data", "[", "'e2h'", "]", "[", "id0", "]", ",", "self", ".", "strain", ".", "data", "[", "'err'", "]", "[", "id0", "]", ",", "self", ".", "regionalisation", "[", "region", "]", ")", "for", "jloc", ",", "iloc", "in", "enumerate", "(", "np", ".", "where", "(", "id0", ")", "[", "0", "]", ")", ":", "# Where the calculated rate exceeds the base rate then becomes", "# calculated rate. In this version the magnitudes are treated", "# independently (i.e. if Rate(M < 7) > Base Rate (M < 7) but", "# Rate (M > 7) < Base Rate (M > 7) then returned Rate (M < 7)", "# = Rate (M < 7) and returned Rate (M > 7) = Base Rate (M > 7)", "id1", "=", "calculated_rate", "[", "jloc", "]", ">", "self", ".", "base_rate", "self", ".", "strain", ".", "seismicity_rate", "[", "iloc", ",", "id1", "]", "=", "calculated_rate", "[", "jloc", ",", "id1", "]", "if", "not", "cumulative", "and", "self", ".", "number_magnitudes", ">", "1", ":", "# Seismicity rates are currently cumulative - need to turn them", "# into discrete", "for", "iloc", "in", "range", "(", "0", ",", "self", ".", "number_magnitudes", "-", "1", ")", ":", "self", ".", "strain", ".", "seismicity_rate", "[", ":", ",", "iloc", "]", "=", "self", ".", "strain", ".", "seismicity_rate", "[", ":", ",", "iloc", "]", "-", "self", ".", "strain", ".", "seismicity_rate", "[", ":", ",", "iloc", "+", "1", "]", "if", "not", "in_seconds", ":", "self", ".", "strain", ".", "seismicity_rate", "=", "self", ".", "strain", ".", "seismicity_rate", "*", "SECS_PER_YEAR", "for", "key", "in", "STRAIN_VARIABLES", ":", "self", ".", "strain", ".", "data", "[", "key", "]", "=", "self", ".", "strain", ".", "data", "[", "key", "]", "*", "SECS_PER_YEAR" ]
Main function to calculate the activity rate (for each of the magnitudes in target_magnitudes) for all of the cells specified in the input strain model file :param strain_data: Strain model as an instance of :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain :param bool cumulative: Set to true if the cumulative rate is required, False for incremental :param bool in_seconds: Returns the activity rate in seconds (True) or else as an annual activity rate
[ "Main", "function", "to", "calculate", "the", "activity", "rate", "(", "for", "each", "of", "the", "magnitudes", "in", "target_magnitudes", ")", "for", "all", "of", "the", "cells", "specified", "in", "the", "input", "strain", "model", "file" ]
python
train
singularityhub/sregistry-cli
sregistry/main/workers/tasks.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/workers/tasks.py#L73-L81
def post(url,data=None,return_json=True): '''post will use requests to get a particular url ''' bot.debug("POST %s" %url) return call(url, headers=headers, func=requests.post, data=data, return_json=return_json)
[ "def", "post", "(", "url", ",", "data", "=", "None", ",", "return_json", "=", "True", ")", ":", "bot", ".", "debug", "(", "\"POST %s\"", "%", "url", ")", "return", "call", "(", "url", ",", "headers", "=", "headers", ",", "func", "=", "requests", ".", "post", ",", "data", "=", "data", ",", "return_json", "=", "return_json", ")" ]
post will use requests to get a particular url
[ "post", "will", "use", "requests", "to", "get", "a", "particular", "url" ]
python
test
HumanCellAtlas/dcp-cli
hca/dss/upload_to_cloud.py
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/upload_to_cloud.py#L53-L102
def upload_to_cloud(file_handles, staging_bucket, replica, from_cloud=False): """ Upload files to cloud. :param file_handles: If from_cloud, file_handles is a aws s3 directory path to files with appropriate metadata uploaded. Else, a list of binary file_handles to upload. :param staging_bucket: The aws bucket to upload the files to. :param replica: The cloud replica to write to. One of 'aws', 'gc', or 'azure'. No functionality now. :return: a list of file uuids, key-names, and absolute file paths (local) for uploaded files """ s3 = boto3.resource("s3") file_uuids = [] key_names = [] abs_file_paths = [] if from_cloud: file_uuids, key_names = _copy_from_s3(file_handles[0], s3) else: destination_bucket = s3.Bucket(staging_bucket) for raw_fh in file_handles: file_size = os.path.getsize(raw_fh.name) multipart_chunksize = s3_multipart.get_s3_multipart_chunk_size(file_size) tx_cfg = TransferConfig(multipart_threshold=s3_multipart.MULTIPART_THRESHOLD, multipart_chunksize=multipart_chunksize) with ChecksummingBufferedReader(raw_fh, multipart_chunksize) as fh: file_uuid = str(uuid.uuid4()) key_name = "{}/{}".format(file_uuid, os.path.basename(fh.raw.name)) destination_bucket.upload_fileobj( fh, key_name, Config=tx_cfg, ExtraArgs={ 'ContentType': _mime_type(fh.raw.name), } ) sums = fh.get_checksums() metadata = { "hca-dss-s3_etag": sums["s3_etag"], "hca-dss-sha1": sums["sha1"], "hca-dss-sha256": sums["sha256"], "hca-dss-crc32c": sums["crc32c"], } s3.meta.client.put_object_tagging(Bucket=destination_bucket.name, Key=key_name, Tagging=dict(TagSet=encode_tags(metadata))) file_uuids.append(file_uuid) key_names.append(key_name) abs_file_paths.append(fh.raw.name) return file_uuids, key_names, abs_file_paths
[ "def", "upload_to_cloud", "(", "file_handles", ",", "staging_bucket", ",", "replica", ",", "from_cloud", "=", "False", ")", ":", "s3", "=", "boto3", ".", "resource", "(", "\"s3\"", ")", "file_uuids", "=", "[", "]", "key_names", "=", "[", "]", "abs_file_paths", "=", "[", "]", "if", "from_cloud", ":", "file_uuids", ",", "key_names", "=", "_copy_from_s3", "(", "file_handles", "[", "0", "]", ",", "s3", ")", "else", ":", "destination_bucket", "=", "s3", ".", "Bucket", "(", "staging_bucket", ")", "for", "raw_fh", "in", "file_handles", ":", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "raw_fh", ".", "name", ")", "multipart_chunksize", "=", "s3_multipart", ".", "get_s3_multipart_chunk_size", "(", "file_size", ")", "tx_cfg", "=", "TransferConfig", "(", "multipart_threshold", "=", "s3_multipart", ".", "MULTIPART_THRESHOLD", ",", "multipart_chunksize", "=", "multipart_chunksize", ")", "with", "ChecksummingBufferedReader", "(", "raw_fh", ",", "multipart_chunksize", ")", "as", "fh", ":", "file_uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "key_name", "=", "\"{}/{}\"", ".", "format", "(", "file_uuid", ",", "os", ".", "path", ".", "basename", "(", "fh", ".", "raw", ".", "name", ")", ")", "destination_bucket", ".", "upload_fileobj", "(", "fh", ",", "key_name", ",", "Config", "=", "tx_cfg", ",", "ExtraArgs", "=", "{", "'ContentType'", ":", "_mime_type", "(", "fh", ".", "raw", ".", "name", ")", ",", "}", ")", "sums", "=", "fh", ".", "get_checksums", "(", ")", "metadata", "=", "{", "\"hca-dss-s3_etag\"", ":", "sums", "[", "\"s3_etag\"", "]", ",", "\"hca-dss-sha1\"", ":", "sums", "[", "\"sha1\"", "]", ",", "\"hca-dss-sha256\"", ":", "sums", "[", "\"sha256\"", "]", ",", "\"hca-dss-crc32c\"", ":", "sums", "[", "\"crc32c\"", "]", ",", "}", "s3", ".", "meta", ".", "client", ".", "put_object_tagging", "(", "Bucket", "=", "destination_bucket", ".", "name", ",", "Key", "=", "key_name", ",", "Tagging", "=", "dict", "(", "TagSet", "=", "encode_tags", "(", "metadata", ")", ")", ")", "file_uuids", ".", "append", "(", "file_uuid", ")", "key_names", ".", "append", "(", "key_name", ")", "abs_file_paths", ".", "append", "(", "fh", ".", "raw", ".", "name", ")", "return", "file_uuids", ",", "key_names", ",", "abs_file_paths" ]
Upload files to cloud. :param file_handles: If from_cloud, file_handles is a aws s3 directory path to files with appropriate metadata uploaded. Else, a list of binary file_handles to upload. :param staging_bucket: The aws bucket to upload the files to. :param replica: The cloud replica to write to. One of 'aws', 'gc', or 'azure'. No functionality now. :return: a list of file uuids, key-names, and absolute file paths (local) for uploaded files
[ "Upload", "files", "to", "cloud", "." ]
python
train
quantopian/zipline
zipline/data/minute_bars.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L732-L760
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'): """ Write the OHLCV data for the given sid. If there is no bcolz ctable yet created for the sid, create it. If the length of the bcolz ctable is not exactly to the date before the first day provided, fill the ctable with 0s up to that date. Parameters ---------- sid : int The asset identifier for the data being written. dts : datetime64 array The dts corresponding to values in cols. cols : dict of str -> np.array dict of market data with the following characteristics. keys are ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 """ if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES): raise BcolzMinuteWriterColumnMismatch( "Length of dts={0} should match cols: {1}".format( len(dts), " ".join("{0}={1}".format(name, len(cols[name])) for name in self.COL_NAMES))) self._write_cols(sid, dts, cols, invalid_data_behavior)
[ "def", "write_cols", "(", "self", ",", "sid", ",", "dts", ",", "cols", ",", "invalid_data_behavior", "=", "'warn'", ")", ":", "if", "not", "all", "(", "len", "(", "dts", ")", "==", "len", "(", "cols", "[", "name", "]", ")", "for", "name", "in", "self", ".", "COL_NAMES", ")", ":", "raise", "BcolzMinuteWriterColumnMismatch", "(", "\"Length of dts={0} should match cols: {1}\"", ".", "format", "(", "len", "(", "dts", ")", ",", "\" \"", ".", "join", "(", "\"{0}={1}\"", ".", "format", "(", "name", ",", "len", "(", "cols", "[", "name", "]", ")", ")", "for", "name", "in", "self", ".", "COL_NAMES", ")", ")", ")", "self", ".", "_write_cols", "(", "sid", ",", "dts", ",", "cols", ",", "invalid_data_behavior", ")" ]
Write the OHLCV data for the given sid. If there is no bcolz ctable yet created for the sid, create it. If the length of the bcolz ctable is not exactly to the date before the first day provided, fill the ctable with 0s up to that date. Parameters ---------- sid : int The asset identifier for the data being written. dts : datetime64 array The dts corresponding to values in cols. cols : dict of str -> np.array dict of market data with the following characteristics. keys are ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64
[ "Write", "the", "OHLCV", "data", "for", "the", "given", "sid", ".", "If", "there", "is", "no", "bcolz", "ctable", "yet", "created", "for", "the", "sid", "create", "it", ".", "If", "the", "length", "of", "the", "bcolz", "ctable", "is", "not", "exactly", "to", "the", "date", "before", "the", "first", "day", "provided", "fill", "the", "ctable", "with", "0s", "up", "to", "that", "date", "." ]
python
train
globality-corp/microcosm-postgres
microcosm_postgres/store.py
https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/store.py#L256-L269
def _delete(self, *criterion): """ Delete a model by some criterion. Avoids race-condition check-then-delete logic by checking the count of affected rows. :raises `ResourceNotFound` if the row cannot be deleted. """ with self.flushing(): count = self._query(*criterion).delete() if count == 0: raise ModelNotFoundError return True
[ "def", "_delete", "(", "self", ",", "*", "criterion", ")", ":", "with", "self", ".", "flushing", "(", ")", ":", "count", "=", "self", ".", "_query", "(", "*", "criterion", ")", ".", "delete", "(", ")", "if", "count", "==", "0", ":", "raise", "ModelNotFoundError", "return", "True" ]
Delete a model by some criterion. Avoids race-condition check-then-delete logic by checking the count of affected rows. :raises `ResourceNotFound` if the row cannot be deleted.
[ "Delete", "a", "model", "by", "some", "criterion", "." ]
python
train
quantumlib/Cirq
cirq/schedules/schedule.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/schedules/schedule.py#L156-L174
def include(self, scheduled_operation: ScheduledOperation): """Adds a scheduled operation to the schedule. Args: scheduled_operation: The operation to add. Raises: ValueError: The operation collided with something already in the schedule. """ collisions = self.query(time=scheduled_operation.time, duration=scheduled_operation.duration, qubits=scheduled_operation.operation.qubits) if collisions: raise ValueError('Operation {} has collisions: {}'.format( scheduled_operation.operation, collisions)) self.scheduled_operations.add(scheduled_operation) self._max_duration = max(self._max_duration, scheduled_operation.duration)
[ "def", "include", "(", "self", ",", "scheduled_operation", ":", "ScheduledOperation", ")", ":", "collisions", "=", "self", ".", "query", "(", "time", "=", "scheduled_operation", ".", "time", ",", "duration", "=", "scheduled_operation", ".", "duration", ",", "qubits", "=", "scheduled_operation", ".", "operation", ".", "qubits", ")", "if", "collisions", ":", "raise", "ValueError", "(", "'Operation {} has collisions: {}'", ".", "format", "(", "scheduled_operation", ".", "operation", ",", "collisions", ")", ")", "self", ".", "scheduled_operations", ".", "add", "(", "scheduled_operation", ")", "self", ".", "_max_duration", "=", "max", "(", "self", ".", "_max_duration", ",", "scheduled_operation", ".", "duration", ")" ]
Adds a scheduled operation to the schedule. Args: scheduled_operation: The operation to add. Raises: ValueError: The operation collided with something already in the schedule.
[ "Adds", "a", "scheduled", "operation", "to", "the", "schedule", "." ]
python
train
ibm-watson-iot/iot-python
tmp/src/things/things.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/tmp/src/things/things.py#L860-L872
def deleteLogicalInterface(self, logicalInterfaceId): """ Deletes a logical interface. Parameters: logicalInterfaceId (string). Throws APIException on failure. """ req = ApiClient.oneLogicalInterfaceUrl % (self.host, "/draft", logicalInterfaceId) resp = requests.delete(req, auth=self.credentials, verify=self.verify) if resp.status_code == 204: self.logger.debug("logical interface deleted") else: raise ibmiotf.APIException(resp.status_code, "HTTP error deleting a logical interface", resp) return resp
[ "def", "deleteLogicalInterface", "(", "self", ",", "logicalInterfaceId", ")", ":", "req", "=", "ApiClient", ".", "oneLogicalInterfaceUrl", "%", "(", "self", ".", "host", ",", "\"/draft\"", ",", "logicalInterfaceId", ")", "resp", "=", "requests", ".", "delete", "(", "req", ",", "auth", "=", "self", ".", "credentials", ",", "verify", "=", "self", ".", "verify", ")", "if", "resp", ".", "status_code", "==", "204", ":", "self", ".", "logger", ".", "debug", "(", "\"logical interface deleted\"", ")", "else", ":", "raise", "ibmiotf", ".", "APIException", "(", "resp", ".", "status_code", ",", "\"HTTP error deleting a logical interface\"", ",", "resp", ")", "return", "resp" ]
Deletes a logical interface. Parameters: logicalInterfaceId (string). Throws APIException on failure.
[ "Deletes", "a", "logical", "interface", ".", "Parameters", ":", "logicalInterfaceId", "(", "string", ")", ".", "Throws", "APIException", "on", "failure", "." ]
python
test
InspectorMustache/base16-builder-python
pybase16_builder/updater.py
https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/updater.py#L72-L95
def git_clone_job_list(job_list): """Deal with all git clone jobs in $job_list.""" queue = Queue() for job in job_list: queue.put(job) if len(job_list) < 20: thread_num = len(job_list) else: thread_num = 20 threads = [] for _ in range(thread_num): thread = Thread(target=git_clone_worker, args=(queue, )) thread.start() threads.append(thread) queue.join() for _ in range(thread_num): queue.put(None) for thread in threads: thread.join()
[ "def", "git_clone_job_list", "(", "job_list", ")", ":", "queue", "=", "Queue", "(", ")", "for", "job", "in", "job_list", ":", "queue", ".", "put", "(", "job", ")", "if", "len", "(", "job_list", ")", "<", "20", ":", "thread_num", "=", "len", "(", "job_list", ")", "else", ":", "thread_num", "=", "20", "threads", "=", "[", "]", "for", "_", "in", "range", "(", "thread_num", ")", ":", "thread", "=", "Thread", "(", "target", "=", "git_clone_worker", ",", "args", "=", "(", "queue", ",", ")", ")", "thread", ".", "start", "(", ")", "threads", ".", "append", "(", "thread", ")", "queue", ".", "join", "(", ")", "for", "_", "in", "range", "(", "thread_num", ")", ":", "queue", ".", "put", "(", "None", ")", "for", "thread", "in", "threads", ":", "thread", ".", "join", "(", ")" ]
Deal with all git clone jobs in $job_list.
[ "Deal", "with", "all", "git", "clone", "jobs", "in", "$job_list", "." ]
python
train
juju/python-libjuju
juju/model.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L328-L341
def safe_data(self): """The data dictionary for this entity. If this `ModelEntity` points to the dead state, it will raise `DeadEntityException`. """ if self.data is None: raise DeadEntityException( "Entity {}:{} is dead - its attributes can no longer be " "accessed. Use the .previous() method on this object to get " "a copy of the object at its previous state.".format( self.entity_type, self.entity_id)) return self.data
[ "def", "safe_data", "(", "self", ")", ":", "if", "self", ".", "data", "is", "None", ":", "raise", "DeadEntityException", "(", "\"Entity {}:{} is dead - its attributes can no longer be \"", "\"accessed. Use the .previous() method on this object to get \"", "\"a copy of the object at its previous state.\"", ".", "format", "(", "self", ".", "entity_type", ",", "self", ".", "entity_id", ")", ")", "return", "self", ".", "data" ]
The data dictionary for this entity. If this `ModelEntity` points to the dead state, it will raise `DeadEntityException`.
[ "The", "data", "dictionary", "for", "this", "entity", "." ]
python
train
shoebot/shoebot
lib/photobot/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/photobot/__init__.py#L57-L83
def layer(self, img, x=0, y=0, name=""): """Creates a new layer from file, Layer, PIL Image. If img is an image file or PIL Image object, Creates a new layer with the given image file. The image is positioned on the canvas at x, y. If img is a Layer, uses that layer's x and y position and name. """ from types import StringType if isinstance(img, Image.Image): img = img.convert("RGBA") self.layers.append(Layer(self, img, x, y, name)) return len(self.layers)-1 if isinstance(img, Layer): img.canvas = self self.layers.append(img) return len(self.layers)-1 if type(img) == StringType: img = Image.open(img) img = img.convert("RGBA") self.layers.append(Layer(self, img, x, y, name)) return len(self.layers)-1
[ "def", "layer", "(", "self", ",", "img", ",", "x", "=", "0", ",", "y", "=", "0", ",", "name", "=", "\"\"", ")", ":", "from", "types", "import", "StringType", "if", "isinstance", "(", "img", ",", "Image", ".", "Image", ")", ":", "img", "=", "img", ".", "convert", "(", "\"RGBA\"", ")", "self", ".", "layers", ".", "append", "(", "Layer", "(", "self", ",", "img", ",", "x", ",", "y", ",", "name", ")", ")", "return", "len", "(", "self", ".", "layers", ")", "-", "1", "if", "isinstance", "(", "img", ",", "Layer", ")", ":", "img", ".", "canvas", "=", "self", "self", ".", "layers", ".", "append", "(", "img", ")", "return", "len", "(", "self", ".", "layers", ")", "-", "1", "if", "type", "(", "img", ")", "==", "StringType", ":", "img", "=", "Image", ".", "open", "(", "img", ")", "img", "=", "img", ".", "convert", "(", "\"RGBA\"", ")", "self", ".", "layers", ".", "append", "(", "Layer", "(", "self", ",", "img", ",", "x", ",", "y", ",", "name", ")", ")", "return", "len", "(", "self", ".", "layers", ")", "-", "1" ]
Creates a new layer from file, Layer, PIL Image. If img is an image file or PIL Image object, Creates a new layer with the given image file. The image is positioned on the canvas at x, y. If img is a Layer, uses that layer's x and y position and name.
[ "Creates", "a", "new", "layer", "from", "file", "Layer", "PIL", "Image", ".", "If", "img", "is", "an", "image", "file", "or", "PIL", "Image", "object", "Creates", "a", "new", "layer", "with", "the", "given", "image", "file", ".", "The", "image", "is", "positioned", "on", "the", "canvas", "at", "x", "y", ".", "If", "img", "is", "a", "Layer", "uses", "that", "layer", "s", "x", "and", "y", "position", "and", "name", "." ]
python
valid
rorr73/LifeSOSpy
lifesospy/baseunit.py
https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/baseunit.py#L285-L298
def start(self) -> None: """ Start monitoring the base unit. """ self._shutdown = False # Start listening (if server) / Open connection (if client) if isinstance(self._protocol, Server): self.create_task(self._async_listen) elif isinstance(self._protocol, Client): self.create_task(self._async_open) else: raise NotImplementedError
[ "def", "start", "(", "self", ")", "->", "None", ":", "self", ".", "_shutdown", "=", "False", "# Start listening (if server) / Open connection (if client)", "if", "isinstance", "(", "self", ".", "_protocol", ",", "Server", ")", ":", "self", ".", "create_task", "(", "self", ".", "_async_listen", ")", "elif", "isinstance", "(", "self", ".", "_protocol", ",", "Client", ")", ":", "self", ".", "create_task", "(", "self", ".", "_async_open", ")", "else", ":", "raise", "NotImplementedError" ]
Start monitoring the base unit.
[ "Start", "monitoring", "the", "base", "unit", "." ]
python
train
basho/riak-python-client
riak/transports/tcp/connection.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L105-L122
def _auth(self): """ Perform an authorization request against Riak returns True upon success, False otherwise Note: Riak will sleep for a short period of time upon a failed auth request/response to prevent denial of service attacks """ codec = PbufCodec() username = self._client._credentials.username password = self._client._credentials.password if not password: password = '' msg = codec.encode_auth(username, password) resp_code, _ = self._non_connect_send_recv_msg(msg) if resp_code == riak.pb.messages.MSG_CODE_AUTH_RESP: return True else: return False
[ "def", "_auth", "(", "self", ")", ":", "codec", "=", "PbufCodec", "(", ")", "username", "=", "self", ".", "_client", ".", "_credentials", ".", "username", "password", "=", "self", ".", "_client", ".", "_credentials", ".", "password", "if", "not", "password", ":", "password", "=", "''", "msg", "=", "codec", ".", "encode_auth", "(", "username", ",", "password", ")", "resp_code", ",", "_", "=", "self", ".", "_non_connect_send_recv_msg", "(", "msg", ")", "if", "resp_code", "==", "riak", ".", "pb", ".", "messages", ".", "MSG_CODE_AUTH_RESP", ":", "return", "True", "else", ":", "return", "False" ]
Perform an authorization request against Riak returns True upon success, False otherwise Note: Riak will sleep for a short period of time upon a failed auth request/response to prevent denial of service attacks
[ "Perform", "an", "authorization", "request", "against", "Riak", "returns", "True", "upon", "success", "False", "otherwise", "Note", ":", "Riak", "will", "sleep", "for", "a", "short", "period", "of", "time", "upon", "a", "failed", "auth", "request", "/", "response", "to", "prevent", "denial", "of", "service", "attacks" ]
python
train
lawsie/guizero
guizero/base.py
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/base.py#L77-L100
def _set_tk_config(self, keys, value): """ Gets the config from the widget's tk object :param string/List keys: The tk config key or a list of tk keys. :param variable value: The value to set. If the value is `None`, the config value will be reset to its default. """ # if a single key is passed, convert to list if isinstance(keys, str): keys = [keys] # loop through all the keys for key in keys: if key in self.tk.keys(): if value is None: # reset to default self.tk[key] = self._tk_defaults[key] else: self.tk[key] = value
[ "def", "_set_tk_config", "(", "self", ",", "keys", ",", "value", ")", ":", "# if a single key is passed, convert to list", "if", "isinstance", "(", "keys", ",", "str", ")", ":", "keys", "=", "[", "keys", "]", "# loop through all the keys", "for", "key", "in", "keys", ":", "if", "key", "in", "self", ".", "tk", ".", "keys", "(", ")", ":", "if", "value", "is", "None", ":", "# reset to default", "self", ".", "tk", "[", "key", "]", "=", "self", ".", "_tk_defaults", "[", "key", "]", "else", ":", "self", ".", "tk", "[", "key", "]", "=", "value" ]
Gets the config from the widget's tk object :param string/List keys: The tk config key or a list of tk keys. :param variable value: The value to set. If the value is `None`, the config value will be reset to its default.
[ "Gets", "the", "config", "from", "the", "widget", "s", "tk", "object" ]
python
train
kxxoling/flask-decorators
flask_decorators/__init__.py
https://github.com/kxxoling/flask-decorators/blob/e0bf4fc1a5260548063ef8b8adbb782151cd72cc/flask_decorators/__init__.py#L5-L17
def json_or_jsonp(func): """Wrap response in JSON or JSONP style""" @wraps(func) def _(*args, **kwargs): mimetype = 'application/javascript' callback = request.args.get('callback', None) if callback is None: content = func(*args, **kwargs) else: content = "%s(%s)" % (callback, func(*args, **kwargs)) return current_app.response_class(content, mimetype=mimetype) return _
[ "def", "json_or_jsonp", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "_", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "mimetype", "=", "'application/javascript'", "callback", "=", "request", ".", "args", ".", "get", "(", "'callback'", ",", "None", ")", "if", "callback", "is", "None", ":", "content", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "content", "=", "\"%s(%s)\"", "%", "(", "callback", ",", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "current_app", ".", "response_class", "(", "content", ",", "mimetype", "=", "mimetype", ")", "return", "_" ]
Wrap response in JSON or JSONP style
[ "Wrap", "response", "in", "JSON", "or", "JSONP", "style" ]
python
train
marrow/util
marrow/util/object.py
https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L51-L65
def merge(s, t): """Merge dictionary t into s.""" for k, v in t.items(): if isinstance(v, dict): if k not in s: s[k] = v continue s[k] = merge(s[k], v) continue s[k] = v return s
[ "def", "merge", "(", "s", ",", "t", ")", ":", "for", "k", ",", "v", "in", "t", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "if", "k", "not", "in", "s", ":", "s", "[", "k", "]", "=", "v", "continue", "s", "[", "k", "]", "=", "merge", "(", "s", "[", "k", "]", ",", "v", ")", "continue", "s", "[", "k", "]", "=", "v", "return", "s" ]
Merge dictionary t into s.
[ "Merge", "dictionary", "t", "into", "s", "." ]
python
train
SmokinCaterpillar/pypet
pypet/trajectory.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/trajectory.py#L1489-L1502
def f_load_skeleton(self): """Loads the full skeleton from the storage service. This needs to be done after a successful exploration in order to update the trajectory tree with all results and derived parameters from the individual single runs. This will only add empty results and derived parameters (i.e. the skeleton) and load annotations. """ self.f_load(self.v_name, as_new=False, load_parameters=pypetconstants.LOAD_SKELETON, load_derived_parameters=pypetconstants.LOAD_SKELETON, load_results=pypetconstants.LOAD_SKELETON, load_other_data=pypetconstants.LOAD_SKELETON, with_run_information=False)
[ "def", "f_load_skeleton", "(", "self", ")", ":", "self", ".", "f_load", "(", "self", ".", "v_name", ",", "as_new", "=", "False", ",", "load_parameters", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "load_derived_parameters", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "load_results", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "load_other_data", "=", "pypetconstants", ".", "LOAD_SKELETON", ",", "with_run_information", "=", "False", ")" ]
Loads the full skeleton from the storage service. This needs to be done after a successful exploration in order to update the trajectory tree with all results and derived parameters from the individual single runs. This will only add empty results and derived parameters (i.e. the skeleton) and load annotations.
[ "Loads", "the", "full", "skeleton", "from", "the", "storage", "service", "." ]
python
test
gbiggs/rtctree
rtctree/path.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/path.py#L28-L87
def parse_path(path): '''Parses an address into directory and port parts. The last segment of the address will be checked to see if it matches a port specification (i.e. contains a colon followed by text). This will be returned separately from the directory parts. If a leading / is given, that will be returned as the first directory component. All other / characters are removed. All leading / characters are condensed into a single leading /. Any path components that are . will be removed, as they just point to the previous path component. For example, '/localhost/.' will become '/localhost'. Any path components that are .. will be removed, along with the previous path component. If this renders the path empty, it will be replaced with '/'. Examples: >>> parse_path('localhost:30000/manager/comp0.rtc') (['localhost:30000', 'manager', 'comp0.rtc'], None) >>> parse_path('localhost/manager/comp0.rtc:in') (['localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('/localhost/manager/comp0.rtc') (['/', 'localhost', 'manager', 'comp0.rtc'], None) >>> parse_path('/localhost/manager/comp0.rtc:in') (['/', 'localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('manager/comp0.rtc') (['manager', 'comp0.rtc'], None) >>> parse_path('comp0.rtc') (['comp0.rtc'], None) ''' bits = path.lstrip('/').split('/') if not bits: raise exceptions.BadPathError(path) if bits[-1]: bits[-1], port = get_port(bits[-1]) else: port = None if path[0] == '/': bits = ['/'] + bits condensed_bits = [] for bit in bits: if bit == '.': continue if bit == '..': condensed_bits = condensed_bits[:-1] continue condensed_bits.append(bit) if not condensed_bits: condensed_bits = ['/'] return condensed_bits, port
[ "def", "parse_path", "(", "path", ")", ":", "bits", "=", "path", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "if", "not", "bits", ":", "raise", "exceptions", ".", "BadPathError", "(", "path", ")", "if", "bits", "[", "-", "1", "]", ":", "bits", "[", "-", "1", "]", ",", "port", "=", "get_port", "(", "bits", "[", "-", "1", "]", ")", "else", ":", "port", "=", "None", "if", "path", "[", "0", "]", "==", "'/'", ":", "bits", "=", "[", "'/'", "]", "+", "bits", "condensed_bits", "=", "[", "]", "for", "bit", "in", "bits", ":", "if", "bit", "==", "'.'", ":", "continue", "if", "bit", "==", "'..'", ":", "condensed_bits", "=", "condensed_bits", "[", ":", "-", "1", "]", "continue", "condensed_bits", ".", "append", "(", "bit", ")", "if", "not", "condensed_bits", ":", "condensed_bits", "=", "[", "'/'", "]", "return", "condensed_bits", ",", "port" ]
Parses an address into directory and port parts. The last segment of the address will be checked to see if it matches a port specification (i.e. contains a colon followed by text). This will be returned separately from the directory parts. If a leading / is given, that will be returned as the first directory component. All other / characters are removed. All leading / characters are condensed into a single leading /. Any path components that are . will be removed, as they just point to the previous path component. For example, '/localhost/.' will become '/localhost'. Any path components that are .. will be removed, along with the previous path component. If this renders the path empty, it will be replaced with '/'. Examples: >>> parse_path('localhost:30000/manager/comp0.rtc') (['localhost:30000', 'manager', 'comp0.rtc'], None) >>> parse_path('localhost/manager/comp0.rtc:in') (['localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('/localhost/manager/comp0.rtc') (['/', 'localhost', 'manager', 'comp0.rtc'], None) >>> parse_path('/localhost/manager/comp0.rtc:in') (['/', 'localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('manager/comp0.rtc') (['manager', 'comp0.rtc'], None) >>> parse_path('comp0.rtc') (['comp0.rtc'], None)
[ "Parses", "an", "address", "into", "directory", "and", "port", "parts", ".", "The", "last", "segment", "of", "the", "address", "will", "be", "checked", "to", "see", "if", "it", "matches", "a", "port", "specification", "(", "i", ".", "e", ".", "contains", "a", "colon", "followed", "by", "text", ")", ".", "This", "will", "be", "returned", "separately", "from", "the", "directory", "parts", ".", "If", "a", "leading", "/", "is", "given", "that", "will", "be", "returned", "as", "the", "first", "directory", "component", ".", "All", "other", "/", "characters", "are", "removed", ".", "All", "leading", "/", "characters", "are", "condensed", "into", "a", "single", "leading", "/", ".", "Any", "path", "components", "that", "are", ".", "will", "be", "removed", "as", "they", "just", "point", "to", "the", "previous", "path", "component", ".", "For", "example", "/", "localhost", "/", ".", "will", "become", "/", "localhost", ".", "Any", "path", "components", "that", "are", "..", "will", "be", "removed", "along", "with", "the", "previous", "path", "component", ".", "If", "this", "renders", "the", "path", "empty", "it", "will", "be", "replaced", "with", "/", ".", "Examples", ":", ">>>", "parse_path", "(", "localhost", ":", "30000", "/", "manager", "/", "comp0", ".", "rtc", ")", "(", "[", "localhost", ":", "30000", "manager", "comp0", ".", "rtc", "]", "None", ")", ">>>", "parse_path", "(", "localhost", "/", "manager", "/", "comp0", ".", "rtc", ":", "in", ")", "(", "[", "localhost", "manager", "comp0", ".", "rtc", "]", "in", ")", ">>>", "parse_path", "(", "/", "localhost", "/", "manager", "/", "comp0", ".", "rtc", ")", "(", "[", "/", "localhost", "manager", "comp0", ".", "rtc", "]", "None", ")", ">>>", "parse_path", "(", "/", "localhost", "/", "manager", "/", "comp0", ".", "rtc", ":", "in", ")", "(", "[", "/", "localhost", "manager", "comp0", ".", "rtc", "]", "in", ")", ">>>", "parse_path", "(", "manager", "/", "comp0", ".", "rtc", ")", "(", "[", "manager", "comp0", ".", "rtc", "]", "None", ")", ">>>", "parse_path", "(", "comp0", ".", "rtc", ")", "(", "[", "comp0", ".", "rtc", "]", "None", ")" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/__init__.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/__init__.py#L48-L72
def bind_kernel(**kwargs): """Bind an Engine's Kernel to be used as a full IPython kernel. This allows a running Engine to be used simultaneously as a full IPython kernel with the QtConsole or other frontends. This function returns immediately. """ from IPython.zmq.ipkernel import IPKernelApp from IPython.parallel.apps.ipengineapp import IPEngineApp # first check for IPKernelApp, in which case this should be a no-op # because there is already a bound kernel if IPKernelApp.initialized() and isinstance(IPKernelApp._instance, IPKernelApp): return if IPEngineApp.initialized(): try: app = IPEngineApp.instance() except MultipleInstanceError: pass else: return app.bind_kernel(**kwargs) raise RuntimeError("bind_kernel be called from an IPEngineApp instance")
[ "def", "bind_kernel", "(", "*", "*", "kwargs", ")", ":", "from", "IPython", ".", "zmq", ".", "ipkernel", "import", "IPKernelApp", "from", "IPython", ".", "parallel", ".", "apps", ".", "ipengineapp", "import", "IPEngineApp", "# first check for IPKernelApp, in which case this should be a no-op", "# because there is already a bound kernel", "if", "IPKernelApp", ".", "initialized", "(", ")", "and", "isinstance", "(", "IPKernelApp", ".", "_instance", ",", "IPKernelApp", ")", ":", "return", "if", "IPEngineApp", ".", "initialized", "(", ")", ":", "try", ":", "app", "=", "IPEngineApp", ".", "instance", "(", ")", "except", "MultipleInstanceError", ":", "pass", "else", ":", "return", "app", ".", "bind_kernel", "(", "*", "*", "kwargs", ")", "raise", "RuntimeError", "(", "\"bind_kernel be called from an IPEngineApp instance\"", ")" ]
Bind an Engine's Kernel to be used as a full IPython kernel. This allows a running Engine to be used simultaneously as a full IPython kernel with the QtConsole or other frontends. This function returns immediately.
[ "Bind", "an", "Engine", "s", "Kernel", "to", "be", "used", "as", "a", "full", "IPython", "kernel", ".", "This", "allows", "a", "running", "Engine", "to", "be", "used", "simultaneously", "as", "a", "full", "IPython", "kernel", "with", "the", "QtConsole", "or", "other", "frontends", ".", "This", "function", "returns", "immediately", "." ]
python
test
fermiPy/fermipy
fermipy/jobs/target_extension.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/target_extension.py#L53-L72
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") if is_not_null(args.roi_baseline): gta = GTAnalysis.create(args.roi_baseline, args.config) else: gta = GTAnalysis(args.config, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) gta.print_roi() test_source = args.target gta.sed(test_source, outfile='sed_%s.fits' % 'FL8Y', make_plots=True) gta.extension(test_source, make_plots=True) return gta
[ "def", "run_analysis", "(", "self", ",", "argv", ")", ":", "args", "=", "self", ".", "_parser", ".", "parse_args", "(", "argv", ")", "if", "not", "HAVE_ST", ":", "raise", "RuntimeError", "(", "\"Trying to run fermipy analysis, but don't have ST\"", ")", "if", "is_not_null", "(", "args", ".", "roi_baseline", ")", ":", "gta", "=", "GTAnalysis", ".", "create", "(", "args", ".", "roi_baseline", ",", "args", ".", "config", ")", "else", ":", "gta", "=", "GTAnalysis", "(", "args", ".", "config", ",", "logging", "=", "{", "'verbosity'", ":", "3", "}", ",", "fileio", "=", "{", "'workdir_regex'", ":", "'\\.xml$|\\.npy$'", "}", ")", "gta", ".", "print_roi", "(", ")", "test_source", "=", "args", ".", "target", "gta", ".", "sed", "(", "test_source", ",", "outfile", "=", "'sed_%s.fits'", "%", "'FL8Y'", ",", "make_plots", "=", "True", ")", "gta", ".", "extension", "(", "test_source", ",", "make_plots", "=", "True", ")", "return", "gta" ]
Run this analysis
[ "Run", "this", "analysis" ]
python
train
josuebrunel/myql
myql/contrib/table/base.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/table/base.py#L121-L142
def removeInput(self, key_id, input_type='key'): """Remove key (key, value, map) from Input key_id : id of the input element i.e <key id='artist' /> input_type : type of the input ; key, value or map """ root = self.etree t_inputs = root.find('inputs') if not t_inputs: return False keys = t_inputs.findall(input_type) key = [ key for key in keys if key.get('id') == key_id ] try: t_inputs.remove(key[0]) return True except (Exception,) as e: print(e) return False
[ "def", "removeInput", "(", "self", ",", "key_id", ",", "input_type", "=", "'key'", ")", ":", "root", "=", "self", ".", "etree", "t_inputs", "=", "root", ".", "find", "(", "'inputs'", ")", "if", "not", "t_inputs", ":", "return", "False", "keys", "=", "t_inputs", ".", "findall", "(", "input_type", ")", "key", "=", "[", "key", "for", "key", "in", "keys", "if", "key", ".", "get", "(", "'id'", ")", "==", "key_id", "]", "try", ":", "t_inputs", ".", "remove", "(", "key", "[", "0", "]", ")", "return", "True", "except", "(", "Exception", ",", ")", "as", "e", ":", "print", "(", "e", ")", "return", "False" ]
Remove key (key, value, map) from Input key_id : id of the input element i.e <key id='artist' /> input_type : type of the input ; key, value or map
[ "Remove", "key", "(", "key", "value", "map", ")", "from", "Input", "key_id", ":", "id", "of", "the", "input", "element", "i", ".", "e", "<key", "id", "=", "artist", "/", ">", "input_type", ":", "type", "of", "the", "input", ";", "key", "value", "or", "map" ]
python
train
markovmodel/PyEMMA
pyemma/util/annotators.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/annotators.py#L196-L228
def deprecated(*optional_message): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. Parameters ---------- *optional_message : str an optional user level hint which should indicate which feature to use otherwise. """ def _deprecated(func, *args, **kw): filename, lineno = get_culprit() user_msg = 'Call to deprecated function "%s". Called from %s line %i. %s' \ % (func.__name__, filename, lineno, msg) warnings.warn_explicit( user_msg, category=PyEMMA_DeprecationWarning, filename=filename, lineno=lineno ) return func(*args, **kw) # add deprecation notice to func docstring: if len(optional_message) == 1 and callable(optional_message[0]): # this is the function itself, decorate! msg = "" return decorate(optional_message[0], _deprecated) else: # actually got a message (or empty parenthesis) msg = optional_message[0] if len(optional_message) > 0 else "" return decorator(_deprecated)
[ "def", "deprecated", "(", "*", "optional_message", ")", ":", "def", "_deprecated", "(", "func", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "filename", ",", "lineno", "=", "get_culprit", "(", ")", "user_msg", "=", "'Call to deprecated function \"%s\". Called from %s line %i. %s'", "%", "(", "func", ".", "__name__", ",", "filename", ",", "lineno", ",", "msg", ")", "warnings", ".", "warn_explicit", "(", "user_msg", ",", "category", "=", "PyEMMA_DeprecationWarning", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kw", ")", "# add deprecation notice to func docstring:", "if", "len", "(", "optional_message", ")", "==", "1", "and", "callable", "(", "optional_message", "[", "0", "]", ")", ":", "# this is the function itself, decorate!", "msg", "=", "\"\"", "return", "decorate", "(", "optional_message", "[", "0", "]", ",", "_deprecated", ")", "else", ":", "# actually got a message (or empty parenthesis)", "msg", "=", "optional_message", "[", "0", "]", "if", "len", "(", "optional_message", ")", ">", "0", "else", "\"\"", "return", "decorator", "(", "_deprecated", ")" ]
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. Parameters ---------- *optional_message : str an optional user level hint which should indicate which feature to use otherwise.
[ "This", "is", "a", "decorator", "which", "can", "be", "used", "to", "mark", "functions", "as", "deprecated", ".", "It", "will", "result", "in", "a", "warning", "being", "emitted", "when", "the", "function", "is", "used", "." ]
python
train
mushkevych/scheduler
synergy/db/dao/job_dao.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/db/dao/job_dao.py#L56-L65
def get_by_id(self, process_name, db_id): """ method finds a single job record and returns it to the caller""" collection_name = self._get_job_collection_name(process_name) collection = self.ds.connection(collection_name) document = collection.find_one({'_id': ObjectId(db_id)}) if document is None: raise LookupError('MongoDB has no job record in collection {0} for {1}' .format(collection, db_id)) return Job.from_json(document)
[ "def", "get_by_id", "(", "self", ",", "process_name", ",", "db_id", ")", ":", "collection_name", "=", "self", ".", "_get_job_collection_name", "(", "process_name", ")", "collection", "=", "self", ".", "ds", ".", "connection", "(", "collection_name", ")", "document", "=", "collection", ".", "find_one", "(", "{", "'_id'", ":", "ObjectId", "(", "db_id", ")", "}", ")", "if", "document", "is", "None", ":", "raise", "LookupError", "(", "'MongoDB has no job record in collection {0} for {1}'", ".", "format", "(", "collection", ",", "db_id", ")", ")", "return", "Job", ".", "from_json", "(", "document", ")" ]
method finds a single job record and returns it to the caller
[ "method", "finds", "a", "single", "job", "record", "and", "returns", "it", "to", "the", "caller" ]
python
train
numenta/htmresearch
htmresearch/frameworks/rl/dqn.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/rl/dqn.py#L79-L100
def select_action(self, state): """ Select the best action for the given state using e-greedy exploration to minimize overfitting :return: tuple(action, value) """ value = 0 if self.steps < self.min_steps: action = np.random.randint(self.actions) else: self.eps = max(self.eps_end, self.eps * self.eps_decay) if random.random() < self.eps: action = np.random.randint(self.actions) else: self.local.eval() with torch.no_grad(): state = torch.tensor(state, device=self.device, dtype=torch.float).unsqueeze(0) Q = self.local(state) value, action = torch.max(Q, 1) return int(action), float(value)
[ "def", "select_action", "(", "self", ",", "state", ")", ":", "value", "=", "0", "if", "self", ".", "steps", "<", "self", ".", "min_steps", ":", "action", "=", "np", ".", "random", ".", "randint", "(", "self", ".", "actions", ")", "else", ":", "self", ".", "eps", "=", "max", "(", "self", ".", "eps_end", ",", "self", ".", "eps", "*", "self", ".", "eps_decay", ")", "if", "random", ".", "random", "(", ")", "<", "self", ".", "eps", ":", "action", "=", "np", ".", "random", ".", "randint", "(", "self", ".", "actions", ")", "else", ":", "self", ".", "local", ".", "eval", "(", ")", "with", "torch", ".", "no_grad", "(", ")", ":", "state", "=", "torch", ".", "tensor", "(", "state", ",", "device", "=", "self", ".", "device", ",", "dtype", "=", "torch", ".", "float", ")", ".", "unsqueeze", "(", "0", ")", "Q", "=", "self", ".", "local", "(", "state", ")", "value", ",", "action", "=", "torch", ".", "max", "(", "Q", ",", "1", ")", "return", "int", "(", "action", ")", ",", "float", "(", "value", ")" ]
Select the best action for the given state using e-greedy exploration to minimize overfitting :return: tuple(action, value)
[ "Select", "the", "best", "action", "for", "the", "given", "state", "using", "e", "-", "greedy", "exploration", "to", "minimize", "overfitting" ]
python
train
mvn23/pyotgw
pyotgw/pyotgw.py
https://github.com/mvn23/pyotgw/blob/7612378ef4332b250176505af33e7536d6c9da78/pyotgw/pyotgw.py#L43-L103
async def connect(self, loop, port, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, connection_timeout=10, inactivity_timeout=5): """ Connect to Opentherm Gateway at @port. Initialize the parameters obtained from the PS= and PR= commands and returns the status dict with the obtained values. If called while connected, reconnect to the gateway. This method is a coroutine """ if self._connected: # We are actually reconnecting, cleanup first. _LOGGER.debug("Reconnecting to serial device on %s", port) if self._gpio_task: self._gpio_task.cancel() self._connected = False self._transport.close() await asyncio.sleep(3) self.loop = loop transport = None while transport is None: try: transport, protocol = ( await serial_asyncio.create_serial_connection( loop, otgw.protocol, port, baudrate, bytesize, parity, stopbits, connection_timeout)) except serial.serialutil.SerialException as e: if not self._conn_error: _LOGGER.error( "Could not connect to serial device on %s. " "Will keep trying. Reported error was: %s", port, e) self._conn_error = True transport = None await asyncio.sleep(5) self._conn_error = False _LOGGER.debug("Connected to serial device on %s", port) self._transport = transport self._protocol = protocol self.loop.create_task(self._protocol.set_update_cb(self._send_report)) if 0 < inactivity_timeout < 3: _LOGGER.error("Inactivity timeout too low. Should be at least 3 " "seconds, got %d", inactivity_timeout) if inactivity_timeout >= 3: async def reconnect(): """Reconnect to the OpenTherm Gateway.""" _LOGGER.debug("Scheduling reconnect...") await self.connect( loop, port, baudrate, bytesize, parity, stopbits, connection_timeout, inactivity_timeout) self.loop.create_task( self._protocol.setup_watchdog(reconnect, inactivity_timeout)) self._gpio_task = None self._connected = True await self.get_reports() await self.get_status() if (self._protocol.status.get(OTGW_GPIO_A) or self._protocol.status.get(OTGW_GPIO_B)): await self._poll_gpio(True) return dict(self._protocol.status)
[ "async", "def", "connect", "(", "self", ",", "loop", ",", "port", ",", "baudrate", "=", "9600", ",", "bytesize", "=", "serial", ".", "EIGHTBITS", ",", "parity", "=", "serial", ".", "PARITY_NONE", ",", "stopbits", "=", "serial", ".", "STOPBITS_ONE", ",", "connection_timeout", "=", "10", ",", "inactivity_timeout", "=", "5", ")", ":", "if", "self", ".", "_connected", ":", "# We are actually reconnecting, cleanup first.", "_LOGGER", ".", "debug", "(", "\"Reconnecting to serial device on %s\"", ",", "port", ")", "if", "self", ".", "_gpio_task", ":", "self", ".", "_gpio_task", ".", "cancel", "(", ")", "self", ".", "_connected", "=", "False", "self", ".", "_transport", ".", "close", "(", ")", "await", "asyncio", ".", "sleep", "(", "3", ")", "self", ".", "loop", "=", "loop", "transport", "=", "None", "while", "transport", "is", "None", ":", "try", ":", "transport", ",", "protocol", "=", "(", "await", "serial_asyncio", ".", "create_serial_connection", "(", "loop", ",", "otgw", ".", "protocol", ",", "port", ",", "baudrate", ",", "bytesize", ",", "parity", ",", "stopbits", ",", "connection_timeout", ")", ")", "except", "serial", ".", "serialutil", ".", "SerialException", "as", "e", ":", "if", "not", "self", ".", "_conn_error", ":", "_LOGGER", ".", "error", "(", "\"Could not connect to serial device on %s. \"", "\"Will keep trying. Reported error was: %s\"", ",", "port", ",", "e", ")", "self", ".", "_conn_error", "=", "True", "transport", "=", "None", "await", "asyncio", ".", "sleep", "(", "5", ")", "self", ".", "_conn_error", "=", "False", "_LOGGER", ".", "debug", "(", "\"Connected to serial device on %s\"", ",", "port", ")", "self", ".", "_transport", "=", "transport", "self", ".", "_protocol", "=", "protocol", "self", ".", "loop", ".", "create_task", "(", "self", ".", "_protocol", ".", "set_update_cb", "(", "self", ".", "_send_report", ")", ")", "if", "0", "<", "inactivity_timeout", "<", "3", ":", "_LOGGER", ".", "error", "(", "\"Inactivity timeout too low. Should be at least 3 \"", "\"seconds, got %d\"", ",", "inactivity_timeout", ")", "if", "inactivity_timeout", ">=", "3", ":", "async", "def", "reconnect", "(", ")", ":", "\"\"\"Reconnect to the OpenTherm Gateway.\"\"\"", "_LOGGER", ".", "debug", "(", "\"Scheduling reconnect...\"", ")", "await", "self", ".", "connect", "(", "loop", ",", "port", ",", "baudrate", ",", "bytesize", ",", "parity", ",", "stopbits", ",", "connection_timeout", ",", "inactivity_timeout", ")", "self", ".", "loop", ".", "create_task", "(", "self", ".", "_protocol", ".", "setup_watchdog", "(", "reconnect", ",", "inactivity_timeout", ")", ")", "self", ".", "_gpio_task", "=", "None", "self", ".", "_connected", "=", "True", "await", "self", ".", "get_reports", "(", ")", "await", "self", ".", "get_status", "(", ")", "if", "(", "self", ".", "_protocol", ".", "status", ".", "get", "(", "OTGW_GPIO_A", ")", "or", "self", ".", "_protocol", ".", "status", ".", "get", "(", "OTGW_GPIO_B", ")", ")", ":", "await", "self", ".", "_poll_gpio", "(", "True", ")", "return", "dict", "(", "self", ".", "_protocol", ".", "status", ")" ]
Connect to Opentherm Gateway at @port. Initialize the parameters obtained from the PS= and PR= commands and returns the status dict with the obtained values. If called while connected, reconnect to the gateway. This method is a coroutine
[ "Connect", "to", "Opentherm", "Gateway", "at", "@port", ".", "Initialize", "the", "parameters", "obtained", "from", "the", "PS", "=", "and", "PR", "=", "commands", "and", "returns", "the", "status", "dict", "with", "the", "obtained", "values", ".", "If", "called", "while", "connected", "reconnect", "to", "the", "gateway", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/utils.py#L153-L157
def collapse_initials(name): """Remove the space between initials, eg T. A. --> T.A.""" if len(name.split(".")) > 1: name = re.sub(r'([A-Z]\.)[\s\-]+(?=[A-Z]\.)', r'\1', name) return name
[ "def", "collapse_initials", "(", "name", ")", ":", "if", "len", "(", "name", ".", "split", "(", "\".\"", ")", ")", ">", "1", ":", "name", "=", "re", ".", "sub", "(", "r'([A-Z]\\.)[\\s\\-]+(?=[A-Z]\\.)'", ",", "r'\\1'", ",", "name", ")", "return", "name" ]
Remove the space between initials, eg T. A. --> T.A.
[ "Remove", "the", "space", "between", "initials", "eg", "T", ".", "A", ".", "--", ">", "T", ".", "A", "." ]
python
valid
GiulioRossetti/dynetx
dynetx/classes/dyndigraph.py
https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyndigraph.py#L872-L886
def has_predecessor(self, u, v, t=None): """Return True if node u has predecessor v at time t (optional). This is true if graph has the edge u<-v. Parameters ---------- u, v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. t : snapshot id (default=None) If None will be returned the presence of the interaction on the flattened graph. """ return self.has_interaction(v, u, t)
[ "def", "has_predecessor", "(", "self", ",", "u", ",", "v", ",", "t", "=", "None", ")", ":", "return", "self", ".", "has_interaction", "(", "v", ",", "u", ",", "t", ")" ]
Return True if node u has predecessor v at time t (optional). This is true if graph has the edge u<-v. Parameters ---------- u, v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. t : snapshot id (default=None) If None will be returned the presence of the interaction on the flattened graph.
[ "Return", "True", "if", "node", "u", "has", "predecessor", "v", "at", "time", "t", "(", "optional", ")", "." ]
python
train
Kozea/pygal
pygal/svg.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/svg.py#L263-L300
def slice( self, serie_node, node, radius, small_radius, angle, start_angle, center, val, i, metadata ): """Draw a pie slice""" if angle == 2 * pi: angle = nearly_2pi if angle > 0: to = [ coord_abs_project(center, radius, start_angle), coord_abs_project(center, radius, start_angle + angle), coord_abs_project(center, small_radius, start_angle + angle), coord_abs_project(center, small_radius, start_angle) ] rv = self.node( node, 'path', d='M%s A%s 0 %d 1 %s L%s A%s 0 %d 0 %s z' % ( to[0], coord_dual(radius), int(angle > pi), to[1], to[2], coord_dual(small_radius), int(angle > pi), to[3] ), class_='slice reactive tooltip-trigger' ) else: rv = None x, y = coord_diff( center, coord_project((radius + small_radius) / 2, start_angle + angle / 2) ) self.graph._tooltip_data( node, val, x, y, "centered", self.graph._x_labels and self.graph._x_labels[i][0] ) if angle >= 0.3: # 0.3 radians is about 17 degrees self.graph._static_value(serie_node, val, x, y, metadata) return rv
[ "def", "slice", "(", "self", ",", "serie_node", ",", "node", ",", "radius", ",", "small_radius", ",", "angle", ",", "start_angle", ",", "center", ",", "val", ",", "i", ",", "metadata", ")", ":", "if", "angle", "==", "2", "*", "pi", ":", "angle", "=", "nearly_2pi", "if", "angle", ">", "0", ":", "to", "=", "[", "coord_abs_project", "(", "center", ",", "radius", ",", "start_angle", ")", ",", "coord_abs_project", "(", "center", ",", "radius", ",", "start_angle", "+", "angle", ")", ",", "coord_abs_project", "(", "center", ",", "small_radius", ",", "start_angle", "+", "angle", ")", ",", "coord_abs_project", "(", "center", ",", "small_radius", ",", "start_angle", ")", "]", "rv", "=", "self", ".", "node", "(", "node", ",", "'path'", ",", "d", "=", "'M%s A%s 0 %d 1 %s L%s A%s 0 %d 0 %s z'", "%", "(", "to", "[", "0", "]", ",", "coord_dual", "(", "radius", ")", ",", "int", "(", "angle", ">", "pi", ")", ",", "to", "[", "1", "]", ",", "to", "[", "2", "]", ",", "coord_dual", "(", "small_radius", ")", ",", "int", "(", "angle", ">", "pi", ")", ",", "to", "[", "3", "]", ")", ",", "class_", "=", "'slice reactive tooltip-trigger'", ")", "else", ":", "rv", "=", "None", "x", ",", "y", "=", "coord_diff", "(", "center", ",", "coord_project", "(", "(", "radius", "+", "small_radius", ")", "/", "2", ",", "start_angle", "+", "angle", "/", "2", ")", ")", "self", ".", "graph", ".", "_tooltip_data", "(", "node", ",", "val", ",", "x", ",", "y", ",", "\"centered\"", ",", "self", ".", "graph", ".", "_x_labels", "and", "self", ".", "graph", ".", "_x_labels", "[", "i", "]", "[", "0", "]", ")", "if", "angle", ">=", "0.3", ":", "# 0.3 radians is about 17 degrees", "self", ".", "graph", ".", "_static_value", "(", "serie_node", ",", "val", ",", "x", ",", "y", ",", "metadata", ")", "return", "rv" ]
Draw a pie slice
[ "Draw", "a", "pie", "slice" ]
python
train
ianclegg/winrmlib
winrmlib/shell.py
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L77-L101
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False): """This function does something. :param command: The command to be executed :type name: str. :param arguments: A list of arguments to be passed to the command :type state: str. :returns: int -- the return code. :raises: AttributeError, KeyError iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism rather than replying on 80's style callbacks? """ logging.info('running command: ' + command) resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True) resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True) command = OrderedDict([('rsp:Command', command)]) command['rsp:Arguments'] = list(arguments) response = self.session.command(resource, {'rsp:CommandLine': command}) command_id = response['rsp:CommandResponse']['rsp:CommandId'] logging.info('receive command: ' + command_id) return command_id
[ "def", "run", "(", "self", ",", "command", ",", "arguments", "=", "(", ")", ",", "console_mode_stdin", "=", "True", ",", "skip_cmd_shell", "=", "False", ")", ":", "logging", ".", "info", "(", "'running command: '", "+", "command", ")", "resource", "=", "ResourceLocator", "(", "CommandShell", ".", "ShellResource", ")", "resource", ".", "add_selector", "(", "'ShellId'", ",", "self", ".", "__shell_id", ")", "resource", ".", "add_option", "(", "'WINRS_SKIP_CMD_SHELL'", ",", "[", "'FALSE'", ",", "'TRUE'", "]", "[", "bool", "(", "skip_cmd_shell", ")", "]", ",", "True", ")", "resource", ".", "add_option", "(", "'WINRS_CONSOLEMODE_STDIN'", ",", "[", "'FALSE'", ",", "'TRUE'", "]", "[", "bool", "(", "console_mode_stdin", ")", "]", ",", "True", ")", "command", "=", "OrderedDict", "(", "[", "(", "'rsp:Command'", ",", "command", ")", "]", ")", "command", "[", "'rsp:Arguments'", "]", "=", "list", "(", "arguments", ")", "response", "=", "self", ".", "session", ".", "command", "(", "resource", ",", "{", "'rsp:CommandLine'", ":", "command", "}", ")", "command_id", "=", "response", "[", "'rsp:CommandResponse'", "]", "[", "'rsp:CommandId'", "]", "logging", ".", "info", "(", "'receive command: '", "+", "command_id", ")", "return", "command_id" ]
This function does something. :param command: The command to be executed :type name: str. :param arguments: A list of arguments to be passed to the command :type state: str. :returns: int -- the return code. :raises: AttributeError, KeyError iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism rather than replying on 80's style callbacks?
[ "This", "function", "does", "something", ".", ":", "param", "command", ":", "The", "command", "to", "be", "executed", ":", "type", "name", ":", "str", ".", ":", "param", "arguments", ":", "A", "list", "of", "arguments", "to", "be", "passed", "to", "the", "command", ":", "type", "state", ":", "str", ".", ":", "returns", ":", "int", "--", "the", "return", "code", ".", ":", "raises", ":", "AttributeError", "KeyError" ]
python
train
larryng/narwal
narwal/reddit.py
https://github.com/larryng/narwal/blob/58c409a475c8ed865579a61d7010162ed8cef597/narwal/reddit.py#L459-L472
def edit(self, id_, text): """Login required. Sends POST to change selftext or comment text to ``text``. Returns :class:`things.Comment` or :class:`things.Link` object depending on what's being edited. Raises :class:`UnexpectedResponse` if neither is returned. URL: ``http://www.reddit.com/api/editusertext/`` :param id\_: full id of link or comment to edit :param text: new self or comment text """ data = dict(thing_id=id_, text=text) j = self.post('api', 'editusertext', data=data) try: return self._thingify(j['json']['data']['things'][0]) except Exception: raise UnexpectedResponse(j)
[ "def", "edit", "(", "self", ",", "id_", ",", "text", ")", ":", "data", "=", "dict", "(", "thing_id", "=", "id_", ",", "text", "=", "text", ")", "j", "=", "self", ".", "post", "(", "'api'", ",", "'editusertext'", ",", "data", "=", "data", ")", "try", ":", "return", "self", ".", "_thingify", "(", "j", "[", "'json'", "]", "[", "'data'", "]", "[", "'things'", "]", "[", "0", "]", ")", "except", "Exception", ":", "raise", "UnexpectedResponse", "(", "j", ")" ]
Login required. Sends POST to change selftext or comment text to ``text``. Returns :class:`things.Comment` or :class:`things.Link` object depending on what's being edited. Raises :class:`UnexpectedResponse` if neither is returned. URL: ``http://www.reddit.com/api/editusertext/`` :param id\_: full id of link or comment to edit :param text: new self or comment text
[ "Login", "required", ".", "Sends", "POST", "to", "change", "selftext", "or", "comment", "text", "to", "text", ".", "Returns", ":", "class", ":", "things", ".", "Comment", "or", ":", "class", ":", "things", ".", "Link", "object", "depending", "on", "what", "s", "being", "edited", ".", "Raises", ":", "class", ":", "UnexpectedResponse", "if", "neither", "is", "returned", ".", "URL", ":", "http", ":", "//", "www", ".", "reddit", ".", "com", "/", "api", "/", "editusertext", "/", ":", "param", "id", "\\", "_", ":", "full", "id", "of", "link", "or", "comment", "to", "edit", ":", "param", "text", ":", "new", "self", "or", "comment", "text" ]
python
train
javipalanca/spade
spade/trace.py
https://github.com/javipalanca/spade/blob/59942bd1a1edae4c807d06cabb178d5630cbf61b/spade/trace.py#L60-L71
def received(self, limit=None): """ Returns all the events that have been received (excluding sent events), until a limit if defined Args: limit (int, optional): the max length of the events to return (Default value = None) Returns: list: a list of received events """ return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1]
[ "def", "received", "(", "self", ",", "limit", "=", "None", ")", ":", "return", "list", "(", "itertools", ".", "islice", "(", "(", "itertools", ".", "filterfalse", "(", "lambda", "x", ":", "x", "[", "1", "]", ".", "sent", ",", "self", ".", "store", ")", ")", ",", "limit", ")", ")", "[", ":", ":", "-", "1", "]" ]
Returns all the events that have been received (excluding sent events), until a limit if defined Args: limit (int, optional): the max length of the events to return (Default value = None) Returns: list: a list of received events
[ "Returns", "all", "the", "events", "that", "have", "been", "received", "(", "excluding", "sent", "events", ")", "until", "a", "limit", "if", "defined" ]
python
train
pandas-dev/pandas
pandas/io/formats/format.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L592-L650
def to_string(self): """ Render a DataFrame to a console-friendly tabular output. """ from pandas import Series frame = self.frame if len(frame.columns) == 0 or len(frame.index) == 0: info_line = ('Empty {name}\nColumns: {col}\nIndex: {idx}' .format(name=type(self.frame).__name__, col=pprint_thing(frame.columns), idx=pprint_thing(frame.index))) text = info_line else: strcols = self._to_str_columns() if self.line_width is None: # no need to wrap around just print # the whole frame text = self.adj.adjoin(1, *strcols) elif (not isinstance(self.max_cols, int) or self.max_cols > 0): # need to wrap around text = self._join_multiline(*strcols) else: # max_cols == 0. Try to fit frame to terminal text = self.adj.adjoin(1, *strcols).split('\n') max_len = Series(text).str.len().max() # plus truncate dot col dif = max_len - self.w # '+ 1' to avoid too wide repr (GH PR #17023) adj_dif = dif + 1 col_lens = Series([Series(ele).apply(len).max() for ele in strcols]) n_cols = len(col_lens) counter = 0 while adj_dif > 0 and n_cols > 1: counter += 1 mid = int(round(n_cols / 2.)) mid_ix = col_lens.index[mid] col_len = col_lens[mid_ix] # adjoin adds one adj_dif -= (col_len + 1) col_lens = col_lens.drop(mid_ix) n_cols = len(col_lens) # subtract index column max_cols_adj = n_cols - self.index # GH-21180. Ensure that we print at least two. max_cols_adj = max(max_cols_adj, 2) self.max_cols_adj = max_cols_adj # Call again _chk_truncate to cut frame appropriately # and then generate string representation self._chk_truncate() strcols = self._to_str_columns() text = self.adj.adjoin(1, *strcols) self.buf.writelines(text) if self.should_show_dimensions: self.buf.write("\n\n[{nrows} rows x {ncols} columns]" .format(nrows=len(frame), ncols=len(frame.columns)))
[ "def", "to_string", "(", "self", ")", ":", "from", "pandas", "import", "Series", "frame", "=", "self", ".", "frame", "if", "len", "(", "frame", ".", "columns", ")", "==", "0", "or", "len", "(", "frame", ".", "index", ")", "==", "0", ":", "info_line", "=", "(", "'Empty {name}\\nColumns: {col}\\nIndex: {idx}'", ".", "format", "(", "name", "=", "type", "(", "self", ".", "frame", ")", ".", "__name__", ",", "col", "=", "pprint_thing", "(", "frame", ".", "columns", ")", ",", "idx", "=", "pprint_thing", "(", "frame", ".", "index", ")", ")", ")", "text", "=", "info_line", "else", ":", "strcols", "=", "self", ".", "_to_str_columns", "(", ")", "if", "self", ".", "line_width", "is", "None", ":", "# no need to wrap around just print", "# the whole frame", "text", "=", "self", ".", "adj", ".", "adjoin", "(", "1", ",", "*", "strcols", ")", "elif", "(", "not", "isinstance", "(", "self", ".", "max_cols", ",", "int", ")", "or", "self", ".", "max_cols", ">", "0", ")", ":", "# need to wrap around", "text", "=", "self", ".", "_join_multiline", "(", "*", "strcols", ")", "else", ":", "# max_cols == 0. Try to fit frame to terminal", "text", "=", "self", ".", "adj", ".", "adjoin", "(", "1", ",", "*", "strcols", ")", ".", "split", "(", "'\\n'", ")", "max_len", "=", "Series", "(", "text", ")", ".", "str", ".", "len", "(", ")", ".", "max", "(", ")", "# plus truncate dot col", "dif", "=", "max_len", "-", "self", ".", "w", "# '+ 1' to avoid too wide repr (GH PR #17023)", "adj_dif", "=", "dif", "+", "1", "col_lens", "=", "Series", "(", "[", "Series", "(", "ele", ")", ".", "apply", "(", "len", ")", ".", "max", "(", ")", "for", "ele", "in", "strcols", "]", ")", "n_cols", "=", "len", "(", "col_lens", ")", "counter", "=", "0", "while", "adj_dif", ">", "0", "and", "n_cols", ">", "1", ":", "counter", "+=", "1", "mid", "=", "int", "(", "round", "(", "n_cols", "/", "2.", ")", ")", "mid_ix", "=", "col_lens", ".", "index", "[", "mid", "]", "col_len", "=", "col_lens", "[", "mid_ix", "]", "# adjoin adds one", "adj_dif", "-=", "(", "col_len", "+", "1", ")", "col_lens", "=", "col_lens", ".", "drop", "(", "mid_ix", ")", "n_cols", "=", "len", "(", "col_lens", ")", "# subtract index column", "max_cols_adj", "=", "n_cols", "-", "self", ".", "index", "# GH-21180. Ensure that we print at least two.", "max_cols_adj", "=", "max", "(", "max_cols_adj", ",", "2", ")", "self", ".", "max_cols_adj", "=", "max_cols_adj", "# Call again _chk_truncate to cut frame appropriately", "# and then generate string representation", "self", ".", "_chk_truncate", "(", ")", "strcols", "=", "self", ".", "_to_str_columns", "(", ")", "text", "=", "self", ".", "adj", ".", "adjoin", "(", "1", ",", "*", "strcols", ")", "self", ".", "buf", ".", "writelines", "(", "text", ")", "if", "self", ".", "should_show_dimensions", ":", "self", ".", "buf", ".", "write", "(", "\"\\n\\n[{nrows} rows x {ncols} columns]\"", ".", "format", "(", "nrows", "=", "len", "(", "frame", ")", ",", "ncols", "=", "len", "(", "frame", ".", "columns", ")", ")", ")" ]
Render a DataFrame to a console-friendly tabular output.
[ "Render", "a", "DataFrame", "to", "a", "console", "-", "friendly", "tabular", "output", "." ]
python
train
raiden-network/raiden
tools/debugging/replay_wal.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/tools/debugging/replay_wal.py#L81-L95
def _address_rxp(self, addr): """ Create a regex string for addresses, that matches several representations: - with(out) '0x' prefix - `pex` version This function takes care of maintaining additional lookup keys for substring matches. In case the given string is no address, it returns the original string. """ try: addr = to_checksum_address(addr) rxp = '(?:0x)?' + pex(address_checksum_and_decode(addr)) + f'(?:{addr.lower()[10:]})?' self._extra_keys[pex(address_checksum_and_decode(addr))] = addr.lower() self._extra_keys[addr[2:].lower()] = addr.lower() except ValueError: rxp = addr return rxp
[ "def", "_address_rxp", "(", "self", ",", "addr", ")", ":", "try", ":", "addr", "=", "to_checksum_address", "(", "addr", ")", "rxp", "=", "'(?:0x)?'", "+", "pex", "(", "address_checksum_and_decode", "(", "addr", ")", ")", "+", "f'(?:{addr.lower()[10:]})?'", "self", ".", "_extra_keys", "[", "pex", "(", "address_checksum_and_decode", "(", "addr", ")", ")", "]", "=", "addr", ".", "lower", "(", ")", "self", ".", "_extra_keys", "[", "addr", "[", "2", ":", "]", ".", "lower", "(", ")", "]", "=", "addr", ".", "lower", "(", ")", "except", "ValueError", ":", "rxp", "=", "addr", "return", "rxp" ]
Create a regex string for addresses, that matches several representations: - with(out) '0x' prefix - `pex` version This function takes care of maintaining additional lookup keys for substring matches. In case the given string is no address, it returns the original string.
[ "Create", "a", "regex", "string", "for", "addresses", "that", "matches", "several", "representations", ":", "-", "with", "(", "out", ")", "0x", "prefix", "-", "pex", "version", "This", "function", "takes", "care", "of", "maintaining", "additional", "lookup", "keys", "for", "substring", "matches", ".", "In", "case", "the", "given", "string", "is", "no", "address", "it", "returns", "the", "original", "string", "." ]
python
train
luckydonald/pytgbot
code_generation/output/pytgbot/bot.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/bot.py#L2055-L2087
def get_chat(self, chat_id): """ Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Returns a Chat object on success. https://core.telegram.org/bots/api#getchat Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns a Chat object on success :rtype: pytgbot.api_types.receivable.peer.Chat """ assert_type_or_raise(chat_id, (int, unicode_type), parameter_name="chat_id") result = self.do("getChat", chat_id=chat_id) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) from pytgbot.api_types.receivable.peer import Chat try: return Chat.from_array(result) except TgApiParseException: logger.debug("Failed parsing as api_type Chat", exc_info=True) # end try # no valid parsing so far raise TgApiParseException("Could not parse result.") # See debug log for details! # end if return_python_objects return result
[ "def", "get_chat", "(", "self", ",", "chat_id", ")", ":", "assert_type_or_raise", "(", "chat_id", ",", "(", "int", ",", "unicode_type", ")", ",", "parameter_name", "=", "\"chat_id\"", ")", "result", "=", "self", ".", "do", "(", "\"getChat\"", ",", "chat_id", "=", "chat_id", ")", "if", "self", ".", "return_python_objects", ":", "logger", ".", "debug", "(", "\"Trying to parse {data}\"", ".", "format", "(", "data", "=", "repr", "(", "result", ")", ")", ")", "from", "pytgbot", ".", "api_types", ".", "receivable", ".", "peer", "import", "Chat", "try", ":", "return", "Chat", ".", "from_array", "(", "result", ")", "except", "TgApiParseException", ":", "logger", ".", "debug", "(", "\"Failed parsing as api_type Chat\"", ",", "exc_info", "=", "True", ")", "# end try", "# no valid parsing so far", "raise", "TgApiParseException", "(", "\"Could not parse result.\"", ")", "# See debug log for details!", "# end if return_python_objects", "return", "result" ]
Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Returns a Chat object on success. https://core.telegram.org/bots/api#getchat Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns a Chat object on success :rtype: pytgbot.api_types.receivable.peer.Chat
[ "Use", "this", "method", "to", "get", "up", "to", "date", "information", "about", "the", "chat", "(", "current", "name", "of", "the", "user", "for", "one", "-", "on", "-", "one", "conversations", "current", "username", "of", "a", "user", "group", "or", "channel", "etc", ".", ")", ".", "Returns", "a", "Chat", "object", "on", "success", "." ]
python
train
uogbuji/amara3-xml
pylib/uxml/uxpath/functions.py
https://github.com/uogbuji/amara3-xml/blob/88c18876418cffc89bb85b4a3193e5002b6b39a6/pylib/uxml/uxpath/functions.py#L16-L22
def boolean_arg(ctx, obj): ''' Handles LiteralObjects as well as computable arguments ''' if hasattr(obj, 'compute'): obj = next(obj.compute(ctx), False) return to_boolean(obj)
[ "def", "boolean_arg", "(", "ctx", ",", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'compute'", ")", ":", "obj", "=", "next", "(", "obj", ".", "compute", "(", "ctx", ")", ",", "False", ")", "return", "to_boolean", "(", "obj", ")" ]
Handles LiteralObjects as well as computable arguments
[ "Handles", "LiteralObjects", "as", "well", "as", "computable", "arguments" ]
python
test
fermiPy/fermipy
fermipy/jobs/slac_impl.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/slac_impl.py#L16-L27
def make_nfs_path(path): """Make a nfs version of a file path. This just puts /nfs at the beginning instead of /gpfs""" if os.path.isabs(path): fullpath = path else: fullpath = os.path.abspath(path) if len(fullpath) < 6: return fullpath if fullpath[0:6] == '/gpfs/': fullpath = fullpath.replace('/gpfs/', '/nfs/') return fullpath
[ "def", "make_nfs_path", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "fullpath", "=", "path", "else", ":", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "len", "(", "fullpath", ")", "<", "6", ":", "return", "fullpath", "if", "fullpath", "[", "0", ":", "6", "]", "==", "'/gpfs/'", ":", "fullpath", "=", "fullpath", ".", "replace", "(", "'/gpfs/'", ",", "'/nfs/'", ")", "return", "fullpath" ]
Make a nfs version of a file path. This just puts /nfs at the beginning instead of /gpfs
[ "Make", "a", "nfs", "version", "of", "a", "file", "path", ".", "This", "just", "puts", "/", "nfs", "at", "the", "beginning", "instead", "of", "/", "gpfs" ]
python
train
pioneers/python-grizzly
grizzly/__init__.py
https://github.com/pioneers/python-grizzly/blob/a6482c722d5712d6ebe12d48921815276c826c7f/grizzly/__init__.py#L100-L124
def get_all_ids(idVendor = GrizzlyUSB.ID_VENDOR, idProduct=GrizzlyUSB.ID_PRODUCT): """ Scans for grizzlies that have not been bound, or constructed, and returns a list of their id's, or motor number.""" all_dev = GrizzlyUSB.get_all_usb_devices(idVendor, idProduct) if len(all_dev) <= 0: raise usb.USBError("Could not find any GrizzlyBear device (idVendor=%d, idProduct=%d)" % (idVendor, idProduct)) else: all_addresses = [] # bound devices is a list of devices that are already busy. bound_devices = [] for device in all_dev: internal_addr = GrizzlyUSB.get_device_address(device) if internal_addr == GrizzlyUSB.USB_DEVICE_ERROR: # device bound bound_devices.append(device) else: all_addresses.append(internal_addr) # we release all devices that we aren't using and aren't bound. for device in all_dev: if device not in bound_devices: usb.util.dispose_resources(device) return map(addr_to_id, all_addresses)
[ "def", "get_all_ids", "(", "idVendor", "=", "GrizzlyUSB", ".", "ID_VENDOR", ",", "idProduct", "=", "GrizzlyUSB", ".", "ID_PRODUCT", ")", ":", "all_dev", "=", "GrizzlyUSB", ".", "get_all_usb_devices", "(", "idVendor", ",", "idProduct", ")", "if", "len", "(", "all_dev", ")", "<=", "0", ":", "raise", "usb", ".", "USBError", "(", "\"Could not find any GrizzlyBear device (idVendor=%d, idProduct=%d)\"", "%", "(", "idVendor", ",", "idProduct", ")", ")", "else", ":", "all_addresses", "=", "[", "]", "# bound devices is a list of devices that are already busy.", "bound_devices", "=", "[", "]", "for", "device", "in", "all_dev", ":", "internal_addr", "=", "GrizzlyUSB", ".", "get_device_address", "(", "device", ")", "if", "internal_addr", "==", "GrizzlyUSB", ".", "USB_DEVICE_ERROR", ":", "# device bound", "bound_devices", ".", "append", "(", "device", ")", "else", ":", "all_addresses", ".", "append", "(", "internal_addr", ")", "# we release all devices that we aren't using and aren't bound.", "for", "device", "in", "all_dev", ":", "if", "device", "not", "in", "bound_devices", ":", "usb", ".", "util", ".", "dispose_resources", "(", "device", ")", "return", "map", "(", "addr_to_id", ",", "all_addresses", ")" ]
Scans for grizzlies that have not been bound, or constructed, and returns a list of their id's, or motor number.
[ "Scans", "for", "grizzlies", "that", "have", "not", "been", "bound", "or", "constructed", "and", "returns", "a", "list", "of", "their", "id", "s", "or", "motor", "number", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/__init__.py#L250-L273
def _set_esp(self, v, load=False): """ Setter method for esp, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/esp (algorithm-type-esp) If this variable is read-only (config: false) in the source YANG file, then _set_esp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_esp() directly. YANG Description: Specify Encapsulating Security Payload (ESP) as the protocol to provide packet-level security. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NULL': {'value': 1}},), is_leaf=True, yang_name="esp", rest_name="esp", parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify Encapsulating Security Payload (ESP)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-esp', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """esp must be of a type compatible with algorithm-type-esp""", 'defined-type': "brocade-ospfv3:algorithm-type-esp", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NULL': {'value': 1}},), is_leaf=True, yang_name="esp", rest_name="esp", parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify Encapsulating Security Payload (ESP)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-esp', is_config=True)""", }) self.__esp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_esp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'NULL'", ":", "{", "'value'", ":", "1", "}", "}", ",", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"esp\"", ",", "rest_name", "=", "\"esp\"", ",", "parent", "=", "self", ",", "choice", "=", "(", "u'ch-algorithm'", ",", "u'ca-esp-algorithm'", ")", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Specify Encapsulating Security Payload (ESP)'", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ospfv3'", ",", "defining_module", "=", "'brocade-ospfv3'", ",", "yang_type", "=", "'algorithm-type-esp'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"esp must be of a type compatible with algorithm-type-esp\"\"\"", ",", "'defined-type'", ":", "\"brocade-ospfv3:algorithm-type-esp\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'NULL': {'value': 1}},), is_leaf=True, yang_name=\"esp\", rest_name=\"esp\", parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify Encapsulating Security Payload (ESP)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-esp', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__esp", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for esp, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/authentication/ipsec_auth_key_config/esp (algorithm-type-esp) If this variable is read-only (config: false) in the source YANG file, then _set_esp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_esp() directly. YANG Description: Specify Encapsulating Security Payload (ESP) as the protocol to provide packet-level security.
[ "Setter", "method", "for", "esp", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "interface", "/", "ve", "/", "ipv6", "/", "interface_ospfv3_conf", "/", "authentication", "/", "ipsec_auth_key_config", "/", "esp", "(", "algorithm", "-", "type", "-", "esp", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_esp", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_esp", "()", "directly", "." ]
python
train
saltstack/salt
salt/modules/nxos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nxos.py#L407-L420
def show_run(**kwargs): ''' Shortcut to run `show running-config` on the NX-OS device. .. code-block:: bash salt '*' nxos.cmd show_run ''' command = 'show running-config' info = '' info = show(command, **kwargs) if isinstance(info, list): info = info[0] return info
[ "def", "show_run", "(", "*", "*", "kwargs", ")", ":", "command", "=", "'show running-config'", "info", "=", "''", "info", "=", "show", "(", "command", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "info", ",", "list", ")", ":", "info", "=", "info", "[", "0", "]", "return", "info" ]
Shortcut to run `show running-config` on the NX-OS device. .. code-block:: bash salt '*' nxos.cmd show_run
[ "Shortcut", "to", "run", "show", "running", "-", "config", "on", "the", "NX", "-", "OS", "device", "." ]
python
train
santoshphilip/eppy
eppy/idd_helpers.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idd_helpers.py#L29-L35
def folder2ver(folder): """get the version number from the E+ install folder""" ver = folder.split('EnergyPlus')[-1] ver = ver[1:] splitapp = ver.split('-') ver = '.'.join(splitapp) return ver
[ "def", "folder2ver", "(", "folder", ")", ":", "ver", "=", "folder", ".", "split", "(", "'EnergyPlus'", ")", "[", "-", "1", "]", "ver", "=", "ver", "[", "1", ":", "]", "splitapp", "=", "ver", ".", "split", "(", "'-'", ")", "ver", "=", "'.'", ".", "join", "(", "splitapp", ")", "return", "ver" ]
get the version number from the E+ install folder
[ "get", "the", "version", "number", "from", "the", "E", "+", "install", "folder" ]
python
train
caseyjlaw/activegit
activegit/activegit.py
https://github.com/caseyjlaw/activegit/blob/2b4a0ee0fecf13345b5257130ba98b48f46e1098/activegit/activegit.py#L137-L141
def training_data(self): """ Returns data dictionary from training.pkl """ data = pickle.load(open(os.path.join(self.repopath, 'training.pkl'))) return data.keys(), data.values()
[ "def", "training_data", "(", "self", ")", ":", "data", "=", "pickle", ".", "load", "(", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "repopath", ",", "'training.pkl'", ")", ")", ")", "return", "data", ".", "keys", "(", ")", ",", "data", ".", "values", "(", ")" ]
Returns data dictionary from training.pkl
[ "Returns", "data", "dictionary", "from", "training", ".", "pkl" ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/circuit_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/circuit_algebra.py#L379-L402
def series_with_slh(self, other): """Series product with another :class:`SLH` object Args: other (SLH): An upstream SLH circuit. Returns: SLH: The combined system. """ new_S = self.S * other.S new_L = self.S * other.L + self.L def ImAdjoint(m): return (m.H - m) * (I / 2) delta = ImAdjoint(self.L.adjoint() * self.S * other.L) if isinstance(delta, Matrix): new_H = self.H + other.H + delta[0, 0] else: assert delta == 0 new_H = self.H + other.H return SLH(new_S, new_L, new_H)
[ "def", "series_with_slh", "(", "self", ",", "other", ")", ":", "new_S", "=", "self", ".", "S", "*", "other", ".", "S", "new_L", "=", "self", ".", "S", "*", "other", ".", "L", "+", "self", ".", "L", "def", "ImAdjoint", "(", "m", ")", ":", "return", "(", "m", ".", "H", "-", "m", ")", "*", "(", "I", "/", "2", ")", "delta", "=", "ImAdjoint", "(", "self", ".", "L", ".", "adjoint", "(", ")", "*", "self", ".", "S", "*", "other", ".", "L", ")", "if", "isinstance", "(", "delta", ",", "Matrix", ")", ":", "new_H", "=", "self", ".", "H", "+", "other", ".", "H", "+", "delta", "[", "0", ",", "0", "]", "else", ":", "assert", "delta", "==", "0", "new_H", "=", "self", ".", "H", "+", "other", ".", "H", "return", "SLH", "(", "new_S", ",", "new_L", ",", "new_H", ")" ]
Series product with another :class:`SLH` object Args: other (SLH): An upstream SLH circuit. Returns: SLH: The combined system.
[ "Series", "product", "with", "another", ":", "class", ":", "SLH", "object" ]
python
train
opendatateam/udata
udata/search/fields.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/fields.py#L41-L54
def obj_to_string(obj): '''Render an object into a unicode string if possible''' if not obj: return None elif isinstance(obj, bytes): return obj.decode('utf-8') elif isinstance(obj, basestring): return obj elif is_lazy_string(obj): return obj.value elif hasattr(obj, '__html__'): return obj.__html__() else: return str(obj)
[ "def", "obj_to_string", "(", "obj", ")", ":", "if", "not", "obj", ":", "return", "None", "elif", "isinstance", "(", "obj", ",", "bytes", ")", ":", "return", "obj", ".", "decode", "(", "'utf-8'", ")", "elif", "isinstance", "(", "obj", ",", "basestring", ")", ":", "return", "obj", "elif", "is_lazy_string", "(", "obj", ")", ":", "return", "obj", ".", "value", "elif", "hasattr", "(", "obj", ",", "'__html__'", ")", ":", "return", "obj", ".", "__html__", "(", ")", "else", ":", "return", "str", "(", "obj", ")" ]
Render an object into a unicode string if possible
[ "Render", "an", "object", "into", "a", "unicode", "string", "if", "possible" ]
python
train
mosdef-hub/foyer
foyer/smarts_graph.py
https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L77-L94
def _add_label_edges(self): """Add edges between all atoms with the same atom_label in rings.""" labels = self.ast.select('atom_label') if not labels: return # We need each individual label and atoms with multiple ring labels # would yield e.g. the string '12' so split those up. label_digits = defaultdict(list) for label in labels: digits = list(label.tail[0]) for digit in digits: label_digits[digit].append(label.parent()) for label, (atom1, atom2) in label_digits.items(): atom1_idx = self._atom_indices[id(atom1)] atom2_idx = self._atom_indices[id(atom2)] self.add_edge(atom1_idx, atom2_idx)
[ "def", "_add_label_edges", "(", "self", ")", ":", "labels", "=", "self", ".", "ast", ".", "select", "(", "'atom_label'", ")", "if", "not", "labels", ":", "return", "# We need each individual label and atoms with multiple ring labels", "# would yield e.g. the string '12' so split those up.", "label_digits", "=", "defaultdict", "(", "list", ")", "for", "label", "in", "labels", ":", "digits", "=", "list", "(", "label", ".", "tail", "[", "0", "]", ")", "for", "digit", "in", "digits", ":", "label_digits", "[", "digit", "]", ".", "append", "(", "label", ".", "parent", "(", ")", ")", "for", "label", ",", "(", "atom1", ",", "atom2", ")", "in", "label_digits", ".", "items", "(", ")", ":", "atom1_idx", "=", "self", ".", "_atom_indices", "[", "id", "(", "atom1", ")", "]", "atom2_idx", "=", "self", ".", "_atom_indices", "[", "id", "(", "atom2", ")", "]", "self", ".", "add_edge", "(", "atom1_idx", ",", "atom2_idx", ")" ]
Add edges between all atoms with the same atom_label in rings.
[ "Add", "edges", "between", "all", "atoms", "with", "the", "same", "atom_label", "in", "rings", "." ]
python
train
maxalbert/tohu
tohu/v2/base.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/base.py#L141-L155
def add_new_reset_method(cls): """ Replace existing cls.reset() method with a new one which also calls reset() on any clones. """ orig_reset = cls.reset def new_reset(self, seed=None): logger.debug(f"Calling reset() on {self} (seed={seed})") orig_reset(self, seed) for c in self._dependent_generators: c.reset_dependent_generator(seed) return self cls.reset = new_reset
[ "def", "add_new_reset_method", "(", "cls", ")", ":", "orig_reset", "=", "cls", ".", "reset", "def", "new_reset", "(", "self", ",", "seed", "=", "None", ")", ":", "logger", ".", "debug", "(", "f\"Calling reset() on {self} (seed={seed})\"", ")", "orig_reset", "(", "self", ",", "seed", ")", "for", "c", "in", "self", ".", "_dependent_generators", ":", "c", ".", "reset_dependent_generator", "(", "seed", ")", "return", "self", "cls", ".", "reset", "=", "new_reset" ]
Replace existing cls.reset() method with a new one which also calls reset() on any clones.
[ "Replace", "existing", "cls", ".", "reset", "()", "method", "with", "a", "new", "one", "which", "also", "calls", "reset", "()", "on", "any", "clones", "." ]
python
train
ejeschke/ginga
ginga/rv/Control.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/Control.py#L281-L309
def call_local_plugin_method(self, chname, plugin_name, method_name, args, kwargs): """ Parameters ---------- chname : str The name of the channel containing the plugin. plugin_name : str The name of the local plugin containing the method to call. method_name : str The name of the method to call. args : list or tuple The positional arguments to the method kwargs : dict The keyword arguments to the method Returns ------- result : return value from calling the method """ channel = self.get_channel(chname) opmon = channel.opmon p_obj = opmon.get_plugin(plugin_name) method = getattr(p_obj, method_name) return self.gui_call(method, *args, **kwargs)
[ "def", "call_local_plugin_method", "(", "self", ",", "chname", ",", "plugin_name", ",", "method_name", ",", "args", ",", "kwargs", ")", ":", "channel", "=", "self", ".", "get_channel", "(", "chname", ")", "opmon", "=", "channel", ".", "opmon", "p_obj", "=", "opmon", ".", "get_plugin", "(", "plugin_name", ")", "method", "=", "getattr", "(", "p_obj", ",", "method_name", ")", "return", "self", ".", "gui_call", "(", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Parameters ---------- chname : str The name of the channel containing the plugin. plugin_name : str The name of the local plugin containing the method to call. method_name : str The name of the method to call. args : list or tuple The positional arguments to the method kwargs : dict The keyword arguments to the method Returns ------- result : return value from calling the method
[ "Parameters", "----------", "chname", ":", "str", "The", "name", "of", "the", "channel", "containing", "the", "plugin", "." ]
python
train
Spinmob/spinmob
_pylab_tweaks.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_pylab_tweaks.py#L210-L224
def differentiate_shown_data(neighbors=1, fyname=1, **kwargs): """ Differentiates the data visible on the specified axes using fun.derivative_fit() (if neighbors > 0), and derivative() otherwise. Modifies the visible data using manipulate_shown_data(**kwargs) """ if neighbors: def D(x,y): return _fun.derivative_fit(x,y,neighbors) else: def D(x,y): return _fun.derivative(x,y) if fyname==1: fyname = '$\\partial_{x(\\pm'+str(neighbors)+')}$' manipulate_shown_data(D, fxname=None, fyname=fyname, **kwargs)
[ "def", "differentiate_shown_data", "(", "neighbors", "=", "1", ",", "fyname", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "neighbors", ":", "def", "D", "(", "x", ",", "y", ")", ":", "return", "_fun", ".", "derivative_fit", "(", "x", ",", "y", ",", "neighbors", ")", "else", ":", "def", "D", "(", "x", ",", "y", ")", ":", "return", "_fun", ".", "derivative", "(", "x", ",", "y", ")", "if", "fyname", "==", "1", ":", "fyname", "=", "'$\\\\partial_{x(\\\\pm'", "+", "str", "(", "neighbors", ")", "+", "')}$'", "manipulate_shown_data", "(", "D", ",", "fxname", "=", "None", ",", "fyname", "=", "fyname", ",", "*", "*", "kwargs", ")" ]
Differentiates the data visible on the specified axes using fun.derivative_fit() (if neighbors > 0), and derivative() otherwise. Modifies the visible data using manipulate_shown_data(**kwargs)
[ "Differentiates", "the", "data", "visible", "on", "the", "specified", "axes", "using", "fun", ".", "derivative_fit", "()", "(", "if", "neighbors", ">", "0", ")", "and", "derivative", "()", "otherwise", ".", "Modifies", "the", "visible", "data", "using", "manipulate_shown_data", "(", "**", "kwargs", ")" ]
python
train
niklasf/python-chess
chess/gaviota.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/gaviota.py#L2083-L2096
def open_tablebase_native(directory: PathLike, *, libgtb: Any = None, LibraryLoader: Any = ctypes.cdll) -> NativeTablebase: """ Opens a collection of tables for probing using libgtb. In most cases :func:`~chess.gaviota.open_tablebase()` should be used. Use this function only if you do not want to downgrade to pure Python tablebase probing. :raises: :exc:`RuntimeError` or :exc:`OSError` when libgtb can not be used. """ libgtb = libgtb or ctypes.util.find_library("gtb") or "libgtb.so.1.0.1" tables = NativeTablebase(LibraryLoader.LoadLibrary(libgtb)) tables.add_directory(directory) return tables
[ "def", "open_tablebase_native", "(", "directory", ":", "PathLike", ",", "*", ",", "libgtb", ":", "Any", "=", "None", ",", "LibraryLoader", ":", "Any", "=", "ctypes", ".", "cdll", ")", "->", "NativeTablebase", ":", "libgtb", "=", "libgtb", "or", "ctypes", ".", "util", ".", "find_library", "(", "\"gtb\"", ")", "or", "\"libgtb.so.1.0.1\"", "tables", "=", "NativeTablebase", "(", "LibraryLoader", ".", "LoadLibrary", "(", "libgtb", ")", ")", "tables", ".", "add_directory", "(", "directory", ")", "return", "tables" ]
Opens a collection of tables for probing using libgtb. In most cases :func:`~chess.gaviota.open_tablebase()` should be used. Use this function only if you do not want to downgrade to pure Python tablebase probing. :raises: :exc:`RuntimeError` or :exc:`OSError` when libgtb can not be used.
[ "Opens", "a", "collection", "of", "tables", "for", "probing", "using", "libgtb", "." ]
python
train
SBRG/ssbio
ssbio/core/protein.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L919-L935
def df_pdb_blast(self): """DataFrame: Get a dataframe of PDB BLAST results""" blast_results_pre_df = [] for p in self.get_experimental_structures(): for c in p.chains: if hasattr(c, 'blast_results'): # Summary dataframe infodict = p.get_dict_with_chain(chain=c.id)['blast_results'] infodict['pdb_id'] = p.id infodict['pdb_chain_id'] = c.id blast_results_pre_df.append(infodict) cols = ['pdb_id', 'pdb_chain_id', 'hit_score', 'hit_evalue', 'hit_percent_similar', 'hit_percent_ident', 'hit_percent_gaps', 'hit_num_ident', 'hit_num_similar', 'hit_num_gaps'] df = pd.DataFrame.from_records(blast_results_pre_df, columns=cols).set_index('pdb_id') return ssbio.utils.clean_df(df)
[ "def", "df_pdb_blast", "(", "self", ")", ":", "blast_results_pre_df", "=", "[", "]", "for", "p", "in", "self", ".", "get_experimental_structures", "(", ")", ":", "for", "c", "in", "p", ".", "chains", ":", "if", "hasattr", "(", "c", ",", "'blast_results'", ")", ":", "# Summary dataframe", "infodict", "=", "p", ".", "get_dict_with_chain", "(", "chain", "=", "c", ".", "id", ")", "[", "'blast_results'", "]", "infodict", "[", "'pdb_id'", "]", "=", "p", ".", "id", "infodict", "[", "'pdb_chain_id'", "]", "=", "c", ".", "id", "blast_results_pre_df", ".", "append", "(", "infodict", ")", "cols", "=", "[", "'pdb_id'", ",", "'pdb_chain_id'", ",", "'hit_score'", ",", "'hit_evalue'", ",", "'hit_percent_similar'", ",", "'hit_percent_ident'", ",", "'hit_percent_gaps'", ",", "'hit_num_ident'", ",", "'hit_num_similar'", ",", "'hit_num_gaps'", "]", "df", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "blast_results_pre_df", ",", "columns", "=", "cols", ")", ".", "set_index", "(", "'pdb_id'", ")", "return", "ssbio", ".", "utils", ".", "clean_df", "(", "df", ")" ]
DataFrame: Get a dataframe of PDB BLAST results
[ "DataFrame", ":", "Get", "a", "dataframe", "of", "PDB", "BLAST", "results" ]
python
train
assemblerflow/flowcraft
flowcraft/generator/engine.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/engine.py#L1135-L1174
def _get_merged_params_string(self): """Returns the merged nextflow params string from a dictionary object. The params dict should be a set of key:value pairs with the parameter name, and the default parameter value:: self.params = { "genomeSize": 2.1, "minCoverage": 15 } The values are then added to the string as they are. For instance, a ``2.1`` float will appear as ``param = 2.1`` and a ``"'teste'" string will appear as ``param = 'teste'`` (Note the string). Identical parameters in multiple processes will be merged into the same param. Returns ------- str Nextflow params configuration string """ params_temp = {} for p in self.processes: logger.debug("[{}] Adding parameters: {}".format(p.template, p.params)) for param, val in p.params.items(): params_temp[param] = val["default"] config_str = "\n\t" + "\n\t".join([ "{} = {}".format(param, val) for param, val in params_temp.items() ]) return config_str
[ "def", "_get_merged_params_string", "(", "self", ")", ":", "params_temp", "=", "{", "}", "for", "p", "in", "self", ".", "processes", ":", "logger", ".", "debug", "(", "\"[{}] Adding parameters: {}\"", ".", "format", "(", "p", ".", "template", ",", "p", ".", "params", ")", ")", "for", "param", ",", "val", "in", "p", ".", "params", ".", "items", "(", ")", ":", "params_temp", "[", "param", "]", "=", "val", "[", "\"default\"", "]", "config_str", "=", "\"\\n\\t\"", "+", "\"\\n\\t\"", ".", "join", "(", "[", "\"{} = {}\"", ".", "format", "(", "param", ",", "val", ")", "for", "param", ",", "val", "in", "params_temp", ".", "items", "(", ")", "]", ")", "return", "config_str" ]
Returns the merged nextflow params string from a dictionary object. The params dict should be a set of key:value pairs with the parameter name, and the default parameter value:: self.params = { "genomeSize": 2.1, "minCoverage": 15 } The values are then added to the string as they are. For instance, a ``2.1`` float will appear as ``param = 2.1`` and a ``"'teste'" string will appear as ``param = 'teste'`` (Note the string). Identical parameters in multiple processes will be merged into the same param. Returns ------- str Nextflow params configuration string
[ "Returns", "the", "merged", "nextflow", "params", "string", "from", "a", "dictionary", "object", "." ]
python
test
pycontribs/pyrax
pyrax/base_identity.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/base_identity.py#L942-L955
def list_roles_for_user(self, user): """ ADMIN ONLY. Returns a list of roles for the specified user. Each role will be a 3-tuple, consisting of (role_id, role_name, role_description). """ user_id = utils.get_id(user) uri = "users/%s/roles" % user_id resp, resp_body = self.method_get(uri) if resp.status_code in (401, 403): raise exc.AuthorizationFailure("You are not authorized to list " "user roles.") roles = resp_body.get("roles") return roles
[ "def", "list_roles_for_user", "(", "self", ",", "user", ")", ":", "user_id", "=", "utils", ".", "get_id", "(", "user", ")", "uri", "=", "\"users/%s/roles\"", "%", "user_id", "resp", ",", "resp_body", "=", "self", ".", "method_get", "(", "uri", ")", "if", "resp", ".", "status_code", "in", "(", "401", ",", "403", ")", ":", "raise", "exc", ".", "AuthorizationFailure", "(", "\"You are not authorized to list \"", "\"user roles.\"", ")", "roles", "=", "resp_body", ".", "get", "(", "\"roles\"", ")", "return", "roles" ]
ADMIN ONLY. Returns a list of roles for the specified user. Each role will be a 3-tuple, consisting of (role_id, role_name, role_description).
[ "ADMIN", "ONLY", ".", "Returns", "a", "list", "of", "roles", "for", "the", "specified", "user", ".", "Each", "role", "will", "be", "a", "3", "-", "tuple", "consisting", "of", "(", "role_id", "role_name", "role_description", ")", "." ]
python
train