repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L5163-L5170
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'step') and self.step is not None: _dict['step'] = self.step if hasattr(self, 'snapshot') and self.snapshot is not None: _dict['snapshot'] = self.snapshot return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'step'", ")", "and", "self", ".", "step", "is", "not", "None", ":", "_dict", "[", "'step'", "]", "=", "self", ".", "step", "if", "hasattr", "(", "self", ",", "'snapshot'", ")", "and", "self", ".", "snapshot", "is", "not", "None", ":", "_dict", "[", "'snapshot'", "]", "=", "self", ".", "snapshot", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
LonamiWebs/Telethon
telethon/client/uploads.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/uploads.py#L295-L360
async def _send_album(self, entity, files, caption='', progress_callback=None, reply_to=None, parse_mode=(), silent=None): """Specialized version of .send_file for albums""" # We don't care if the user wants to avoid cache, we will use it # anyway. Why? The cached version will be exactly the same thing # we need to produce right now to send albums (uploadMedia), and # cache only makes a difference for documents where the user may # want the attributes used on them to change. # # In theory documents can be sent inside the albums but they appear # as different messages (not inside the album), and the logic to set # the attributes/avoid cache is already written in .send_file(). entity = await self.get_input_entity(entity) if not utils.is_list_like(caption): caption = (caption,) captions = [] for c in reversed(caption): # Pop from the end (so reverse) captions.append(await self._parse_message_text(c or '', parse_mode)) reply_to = utils.get_message_id(reply_to) # Need to upload the media first, but only if they're not cached yet media = [] for file in files: # Albums want :tl:`InputMedia` which, in theory, includes # :tl:`InputMediaUploadedPhoto`. However using that will # make it `raise MediaInvalidError`, so we need to upload # it as media and then convert that to :tl:`InputMediaPhoto`. fh, fm, _ = await self._file_to_media(file) if isinstance(fm, types.InputMediaUploadedPhoto): r = await self(functions.messages.UploadMediaRequest( entity, media=fm )) self.session.cache_file( fh.md5, fh.size, utils.get_input_photo(r.photo)) fm = utils.get_input_media(r.photo) if captions: caption, msg_entities = captions.pop() else: caption, msg_entities = '', None media.append(types.InputSingleMedia( fm, message=caption, entities=msg_entities )) # Now we can construct the multi-media request result = await self(functions.messages.SendMultiMediaRequest( entity, reply_to_msg_id=reply_to, multi_media=media, silent=silent )) # We never sent a `random_id` for the messages that resulted from # the request so we can't pair them up with the `Updates` that we # get from Telegram. However, the sent messages have a photo and # the photo IDs match with those we did send. # # Updates -> {_: message} messages = self._get_response_message(None, result, entity) # {_: message} -> {photo ID: message} messages = {m.photo.id: m for m in messages.values()} # Sent photo IDs -> messages return [messages[m.media.id.id] for m in media]
[ "async", "def", "_send_album", "(", "self", ",", "entity", ",", "files", ",", "caption", "=", "''", ",", "progress_callback", "=", "None", ",", "reply_to", "=", "None", ",", "parse_mode", "=", "(", ")", ",", "silent", "=", "None", ")", ":", "# We don't care if the user wants to avoid cache, we will use it", "# anyway. Why? The cached version will be exactly the same thing", "# we need to produce right now to send albums (uploadMedia), and", "# cache only makes a difference for documents where the user may", "# want the attributes used on them to change.", "#", "# In theory documents can be sent inside the albums but they appear", "# as different messages (not inside the album), and the logic to set", "# the attributes/avoid cache is already written in .send_file().", "entity", "=", "await", "self", ".", "get_input_entity", "(", "entity", ")", "if", "not", "utils", ".", "is_list_like", "(", "caption", ")", ":", "caption", "=", "(", "caption", ",", ")", "captions", "=", "[", "]", "for", "c", "in", "reversed", "(", "caption", ")", ":", "# Pop from the end (so reverse)", "captions", ".", "append", "(", "await", "self", ".", "_parse_message_text", "(", "c", "or", "''", ",", "parse_mode", ")", ")", "reply_to", "=", "utils", ".", "get_message_id", "(", "reply_to", ")", "# Need to upload the media first, but only if they're not cached yet", "media", "=", "[", "]", "for", "file", "in", "files", ":", "# Albums want :tl:`InputMedia` which, in theory, includes", "# :tl:`InputMediaUploadedPhoto`. However using that will", "# make it `raise MediaInvalidError`, so we need to upload", "# it as media and then convert that to :tl:`InputMediaPhoto`.", "fh", ",", "fm", ",", "_", "=", "await", "self", ".", "_file_to_media", "(", "file", ")", "if", "isinstance", "(", "fm", ",", "types", ".", "InputMediaUploadedPhoto", ")", ":", "r", "=", "await", "self", "(", "functions", ".", "messages", ".", "UploadMediaRequest", "(", "entity", ",", "media", "=", "fm", ")", ")", "self", ".", "session", ".", "cache_file", "(", "fh", ".", "md5", ",", "fh", ".", "size", ",", "utils", ".", "get_input_photo", "(", "r", ".", "photo", ")", ")", "fm", "=", "utils", ".", "get_input_media", "(", "r", ".", "photo", ")", "if", "captions", ":", "caption", ",", "msg_entities", "=", "captions", ".", "pop", "(", ")", "else", ":", "caption", ",", "msg_entities", "=", "''", ",", "None", "media", ".", "append", "(", "types", ".", "InputSingleMedia", "(", "fm", ",", "message", "=", "caption", ",", "entities", "=", "msg_entities", ")", ")", "# Now we can construct the multi-media request", "result", "=", "await", "self", "(", "functions", ".", "messages", ".", "SendMultiMediaRequest", "(", "entity", ",", "reply_to_msg_id", "=", "reply_to", ",", "multi_media", "=", "media", ",", "silent", "=", "silent", ")", ")", "# We never sent a `random_id` for the messages that resulted from", "# the request so we can't pair them up with the `Updates` that we", "# get from Telegram. However, the sent messages have a photo and", "# the photo IDs match with those we did send.", "#", "# Updates -> {_: message}", "messages", "=", "self", ".", "_get_response_message", "(", "None", ",", "result", ",", "entity", ")", "# {_: message} -> {photo ID: message}", "messages", "=", "{", "m", ".", "photo", ".", "id", ":", "m", "for", "m", "in", "messages", ".", "values", "(", ")", "}", "# Sent photo IDs -> messages", "return", "[", "messages", "[", "m", ".", "media", ".", "id", ".", "id", "]", "for", "m", "in", "media", "]" ]
Specialized version of .send_file for albums
[ "Specialized", "version", "of", ".", "send_file", "for", "albums" ]
python
train
CenturyLinkCloud/clc-python-sdk
src/clc/APIv2/__init__.py
https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv2/__init__.py#L46-L53
def SetCredentials(api_username,api_passwd): """Establish API username and password associated with APIv2 commands.""" global V2_API_USERNAME global V2_API_PASSWD global _V2_ENABLED _V2_ENABLED = True V2_API_USERNAME = api_username V2_API_PASSWD = api_passwd
[ "def", "SetCredentials", "(", "api_username", ",", "api_passwd", ")", ":", "global", "V2_API_USERNAME", "global", "V2_API_PASSWD", "global", "_V2_ENABLED", "_V2_ENABLED", "=", "True", "V2_API_USERNAME", "=", "api_username", "V2_API_PASSWD", "=", "api_passwd" ]
Establish API username and password associated with APIv2 commands.
[ "Establish", "API", "username", "and", "password", "associated", "with", "APIv2", "commands", "." ]
python
train
blackecho/Deep-Learning-TensorFlow
yadlt/core/model.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/core/model.py#L107-L125
def get_parameters(self, params, graph=None): """Get the parameters of the model. :param params: dictionary of keys (str names) and values (tensors). :return: evaluated tensors in params """ g = graph if graph is not None else self.tf_graph with g.as_default(): with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) out = {} for par in params: if type(params[par]) == list: for i, p in enumerate(params[par]): out[par + '-' + str(i+1)] = p.eval() else: out[par] = params[par].eval() return out
[ "def", "get_parameters", "(", "self", ",", "params", ",", "graph", "=", "None", ")", ":", "g", "=", "graph", "if", "graph", "is", "not", "None", "else", "self", ".", "tf_graph", "with", "g", ".", "as_default", "(", ")", ":", "with", "tf", ".", "Session", "(", ")", "as", "self", ".", "tf_session", ":", "self", ".", "tf_saver", ".", "restore", "(", "self", ".", "tf_session", ",", "self", ".", "model_path", ")", "out", "=", "{", "}", "for", "par", "in", "params", ":", "if", "type", "(", "params", "[", "par", "]", ")", "==", "list", ":", "for", "i", ",", "p", "in", "enumerate", "(", "params", "[", "par", "]", ")", ":", "out", "[", "par", "+", "'-'", "+", "str", "(", "i", "+", "1", ")", "]", "=", "p", ".", "eval", "(", ")", "else", ":", "out", "[", "par", "]", "=", "params", "[", "par", "]", ".", "eval", "(", ")", "return", "out" ]
Get the parameters of the model. :param params: dictionary of keys (str names) and values (tensors). :return: evaluated tensors in params
[ "Get", "the", "parameters", "of", "the", "model", "." ]
python
train
binux/pyspider
pyspider/scheduler/scheduler.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L348-L370
def _check_task_done(self): '''Check status queue''' cnt = 0 try: while True: task = self.status_queue.get_nowait() # check _on_get_info result here if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task: if task['project'] not in self.projects: continue project = self.projects[task['project']] project.on_get_info(task['track'].get('save') or {}) logger.info( '%s on_get_info %r', task['project'], task['track'].get('save', {}) ) continue elif not self.task_verify(task): continue self.on_task_status(task) cnt += 1 except Queue.Empty: pass return cnt
[ "def", "_check_task_done", "(", "self", ")", ":", "cnt", "=", "0", "try", ":", "while", "True", ":", "task", "=", "self", ".", "status_queue", ".", "get_nowait", "(", ")", "# check _on_get_info result here", "if", "task", ".", "get", "(", "'taskid'", ")", "==", "'_on_get_info'", "and", "'project'", "in", "task", "and", "'track'", "in", "task", ":", "if", "task", "[", "'project'", "]", "not", "in", "self", ".", "projects", ":", "continue", "project", "=", "self", ".", "projects", "[", "task", "[", "'project'", "]", "]", "project", ".", "on_get_info", "(", "task", "[", "'track'", "]", ".", "get", "(", "'save'", ")", "or", "{", "}", ")", "logger", ".", "info", "(", "'%s on_get_info %r'", ",", "task", "[", "'project'", "]", ",", "task", "[", "'track'", "]", ".", "get", "(", "'save'", ",", "{", "}", ")", ")", "continue", "elif", "not", "self", ".", "task_verify", "(", "task", ")", ":", "continue", "self", ".", "on_task_status", "(", "task", ")", "cnt", "+=", "1", "except", "Queue", ".", "Empty", ":", "pass", "return", "cnt" ]
Check status queue
[ "Check", "status", "queue" ]
python
train
ejeschke/ginga
ginga/web/pgw/ImageViewPg.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/web/pgw/ImageViewPg.py#L63-L68
def set_widget(self, canvas_w): """Call this method with the widget that will be used for the display. """ self.logger.debug("set widget canvas_w=%s" % canvas_w) self.pgcanvas = canvas_w
[ "def", "set_widget", "(", "self", ",", "canvas_w", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"set widget canvas_w=%s\"", "%", "canvas_w", ")", "self", ".", "pgcanvas", "=", "canvas_w" ]
Call this method with the widget that will be used for the display.
[ "Call", "this", "method", "with", "the", "widget", "that", "will", "be", "used", "for", "the", "display", "." ]
python
train
yunojuno-archive/django-package-monitor
package_monitor/models.py
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/models.py#L130-L143
def update_from_pypi(self): """Call get_latest_version and then save the object.""" package = pypi.Package(self.package_name) self.licence = package.licence() if self.is_parseable: self.latest_version = package.latest_version() self.next_version = package.next_version(self.current_version) self.diff_status = pypi.version_diff(self.current_version, self.latest_version) self.python_support = package.python_support() self.django_support = package.django_support() self.supports_py3 = package.supports_py3() self.checked_pypi_at = tz_now() self.save() return self
[ "def", "update_from_pypi", "(", "self", ")", ":", "package", "=", "pypi", ".", "Package", "(", "self", ".", "package_name", ")", "self", ".", "licence", "=", "package", ".", "licence", "(", ")", "if", "self", ".", "is_parseable", ":", "self", ".", "latest_version", "=", "package", ".", "latest_version", "(", ")", "self", ".", "next_version", "=", "package", ".", "next_version", "(", "self", ".", "current_version", ")", "self", ".", "diff_status", "=", "pypi", ".", "version_diff", "(", "self", ".", "current_version", ",", "self", ".", "latest_version", ")", "self", ".", "python_support", "=", "package", ".", "python_support", "(", ")", "self", ".", "django_support", "=", "package", ".", "django_support", "(", ")", "self", ".", "supports_py3", "=", "package", ".", "supports_py3", "(", ")", "self", ".", "checked_pypi_at", "=", "tz_now", "(", ")", "self", ".", "save", "(", ")", "return", "self" ]
Call get_latest_version and then save the object.
[ "Call", "get_latest_version", "and", "then", "save", "the", "object", "." ]
python
train
ibelie/typy
typy/google/protobuf/text_format.py
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L372-L390
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
[ "def", "ParseLines", "(", "lines", ",", "message", ",", "allow_unknown_extension", "=", "False", ",", "allow_field_number", "=", "False", ")", ":", "parser", "=", "_Parser", "(", "allow_unknown_extension", ",", "allow_field_number", ")", "return", "parser", ".", "ParseLines", "(", "lines", ",", "message", ")" ]
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
[ "Parses", "an", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
python
valid
python-wink/python-wink
src/pywink/devices/fan.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/fan.py#L72-L83
def set_fan_direction(self, direction): """ :param direction: a string one of ["forward", "reverse"] :return: nothing """ desired_state = {"direction": direction} response = self.api_interface.set_device_state(self, { "desired_state": desired_state }) self._update_state_from_response(response)
[ "def", "set_fan_direction", "(", "self", ",", "direction", ")", ":", "desired_state", "=", "{", "\"direction\"", ":", "direction", "}", "response", "=", "self", ".", "api_interface", ".", "set_device_state", "(", "self", ",", "{", "\"desired_state\"", ":", "desired_state", "}", ")", "self", ".", "_update_state_from_response", "(", "response", ")" ]
:param direction: a string one of ["forward", "reverse"] :return: nothing
[ ":", "param", "direction", ":", "a", "string", "one", "of", "[", "forward", "reverse", "]", ":", "return", ":", "nothing" ]
python
train
bwohlberg/sporco
sporco/linalg.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L826-L862
def lu_factor(A, rho, check_finite=True): r""" Compute LU factorisation of either :math:`A^T A + \rho I` or :math:`A A^T + \rho I`, depending on which matrix is smaller. Parameters ---------- A : array_like Array :math:`A` rho : float Scalar :math:`\rho` check_finite : bool, optional (default False) Flag indicating whether the input array should be checked for Inf and NaN values Returns ------- lu : ndarray Matrix containing U in its upper triangle, and L in its lower triangle, as returned by :func:`scipy.linalg.lu_factor` piv : ndarray Pivot indices representing the permutation matrix P, as returned by :func:`scipy.linalg.lu_factor` """ N, M = A.shape # If N < M it is cheaper to factorise A*A^T + rho*I and then use the # matrix inversion lemma to compute the inverse of A^T*A + rho*I if N >= M: lu, piv = linalg.lu_factor(A.T.dot(A) + rho * np.identity(M, dtype=A.dtype), check_finite=check_finite) else: lu, piv = linalg.lu_factor(A.dot(A.T) + rho * np.identity(N, dtype=A.dtype), check_finite=check_finite) return lu, piv
[ "def", "lu_factor", "(", "A", ",", "rho", ",", "check_finite", "=", "True", ")", ":", "N", ",", "M", "=", "A", ".", "shape", "# If N < M it is cheaper to factorise A*A^T + rho*I and then use the", "# matrix inversion lemma to compute the inverse of A^T*A + rho*I", "if", "N", ">=", "M", ":", "lu", ",", "piv", "=", "linalg", ".", "lu_factor", "(", "A", ".", "T", ".", "dot", "(", "A", ")", "+", "rho", "*", "np", ".", "identity", "(", "M", ",", "dtype", "=", "A", ".", "dtype", ")", ",", "check_finite", "=", "check_finite", ")", "else", ":", "lu", ",", "piv", "=", "linalg", ".", "lu_factor", "(", "A", ".", "dot", "(", "A", ".", "T", ")", "+", "rho", "*", "np", ".", "identity", "(", "N", ",", "dtype", "=", "A", ".", "dtype", ")", ",", "check_finite", "=", "check_finite", ")", "return", "lu", ",", "piv" ]
r""" Compute LU factorisation of either :math:`A^T A + \rho I` or :math:`A A^T + \rho I`, depending on which matrix is smaller. Parameters ---------- A : array_like Array :math:`A` rho : float Scalar :math:`\rho` check_finite : bool, optional (default False) Flag indicating whether the input array should be checked for Inf and NaN values Returns ------- lu : ndarray Matrix containing U in its upper triangle, and L in its lower triangle, as returned by :func:`scipy.linalg.lu_factor` piv : ndarray Pivot indices representing the permutation matrix P, as returned by :func:`scipy.linalg.lu_factor`
[ "r", "Compute", "LU", "factorisation", "of", "either", ":", "math", ":", "A^T", "A", "+", "\\", "rho", "I", "or", ":", "math", ":", "A", "A^T", "+", "\\", "rho", "I", "depending", "on", "which", "matrix", "is", "smaller", "." ]
python
train
tBuLi/symfit
symfit/core/fit.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1590-L1660
def eval_components(self, *args, **kwargs): """ Numerically integrate the system of ODEs. :param args: Ordered arguments for the parameters and independent variables :param kwargs: Keyword arguments for the parameters and independent variables :return: """ bound_arguments = self.__signature__.bind(*args, **kwargs) t_like = bound_arguments.arguments[self.independent_vars[0].name] # System of functions to be integrated f = lambda ys, t, *a: [c(t, *(list(ys) + list(a))) for c in self._ncomponents] Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian] initial_dependent = [self.initial[var] for var in self.dependent_vars] t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one # Check if the time-like data includes the initial value, because integration should start there. try: t_like[0] except (TypeError, IndexError): # Python scalar gives TypeError, numpy scalars IndexError t_like = np.array([t_like]) # Allow evaluation at one point. # The strategy is to split the time axis in a part above and below the # initial value, and to integrate those seperately. At the end we rejoin them. # np.flip is needed because odeint wants the first point to be t_initial # and so t_smaller is a declining series. if t_initial in t_like: t_bigger = t_like[t_like >= t_initial] t_smaller = t_like[t_like <= t_initial][::-1] else: t_bigger = np.concatenate( (np.array([t_initial]), t_like[t_like > t_initial]) ) t_smaller = np.concatenate( (np.array([t_initial]), t_like[t_like < t_initial][::-1]) ) # Properly ordered time axis containing t_initial t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger)) ans_bigger = odeint( f, initial_dependent, t_bigger, args=tuple( bound_arguments.arguments[param.name] for param in self.params), Dfun=Dfun, *self.lsoda_args, **self.lsoda_kwargs ) ans_smaller = odeint( f, initial_dependent, t_smaller, args=tuple( bound_arguments.arguments[param.name] for param in self.params), Dfun=Dfun, *self.lsoda_args, **self.lsoda_kwargs ) ans = np.concatenate((ans_smaller[1:][::-1], ans_bigger)) if t_initial in t_like: # The user also requested to know the value at t_initial, so keep it. return ans.T else: # The user didn't ask for the value at t_initial, so exclude it. # (t_total contains all the t-points used for the integration, # and so is t_like with t_initial inserted at the right position). return ans[t_total != t_initial].T
[ "def", "eval_components", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bound_arguments", "=", "self", ".", "__signature__", ".", "bind", "(", "*", "args", ",", "*", "*", "kwargs", ")", "t_like", "=", "bound_arguments", ".", "arguments", "[", "self", ".", "independent_vars", "[", "0", "]", ".", "name", "]", "# System of functions to be integrated", "f", "=", "lambda", "ys", ",", "t", ",", "*", "a", ":", "[", "c", "(", "t", ",", "*", "(", "list", "(", "ys", ")", "+", "list", "(", "a", ")", ")", ")", "for", "c", "in", "self", ".", "_ncomponents", "]", "Dfun", "=", "lambda", "ys", ",", "t", ",", "*", "a", ":", "[", "[", "c", "(", "t", ",", "*", "(", "list", "(", "ys", ")", "+", "list", "(", "a", ")", ")", ")", "for", "c", "in", "row", "]", "for", "row", "in", "self", ".", "_njacobian", "]", "initial_dependent", "=", "[", "self", ".", "initial", "[", "var", "]", "for", "var", "in", "self", ".", "dependent_vars", "]", "t_initial", "=", "self", ".", "initial", "[", "self", ".", "independent_vars", "[", "0", "]", "]", "# Assuming there's only one", "# Check if the time-like data includes the initial value, because integration should start there.", "try", ":", "t_like", "[", "0", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "# Python scalar gives TypeError, numpy scalars IndexError", "t_like", "=", "np", ".", "array", "(", "[", "t_like", "]", ")", "# Allow evaluation at one point.", "# The strategy is to split the time axis in a part above and below the", "# initial value, and to integrate those seperately. At the end we rejoin them.", "# np.flip is needed because odeint wants the first point to be t_initial", "# and so t_smaller is a declining series.", "if", "t_initial", "in", "t_like", ":", "t_bigger", "=", "t_like", "[", "t_like", ">=", "t_initial", "]", "t_smaller", "=", "t_like", "[", "t_like", "<=", "t_initial", "]", "[", ":", ":", "-", "1", "]", "else", ":", "t_bigger", "=", "np", ".", "concatenate", "(", "(", "np", ".", "array", "(", "[", "t_initial", "]", ")", ",", "t_like", "[", "t_like", ">", "t_initial", "]", ")", ")", "t_smaller", "=", "np", ".", "concatenate", "(", "(", "np", ".", "array", "(", "[", "t_initial", "]", ")", ",", "t_like", "[", "t_like", "<", "t_initial", "]", "[", ":", ":", "-", "1", "]", ")", ")", "# Properly ordered time axis containing t_initial", "t_total", "=", "np", ".", "concatenate", "(", "(", "t_smaller", "[", ":", ":", "-", "1", "]", "[", ":", "-", "1", "]", ",", "t_bigger", ")", ")", "ans_bigger", "=", "odeint", "(", "f", ",", "initial_dependent", ",", "t_bigger", ",", "args", "=", "tuple", "(", "bound_arguments", ".", "arguments", "[", "param", ".", "name", "]", "for", "param", "in", "self", ".", "params", ")", ",", "Dfun", "=", "Dfun", ",", "*", "self", ".", "lsoda_args", ",", "*", "*", "self", ".", "lsoda_kwargs", ")", "ans_smaller", "=", "odeint", "(", "f", ",", "initial_dependent", ",", "t_smaller", ",", "args", "=", "tuple", "(", "bound_arguments", ".", "arguments", "[", "param", ".", "name", "]", "for", "param", "in", "self", ".", "params", ")", ",", "Dfun", "=", "Dfun", ",", "*", "self", ".", "lsoda_args", ",", "*", "*", "self", ".", "lsoda_kwargs", ")", "ans", "=", "np", ".", "concatenate", "(", "(", "ans_smaller", "[", "1", ":", "]", "[", ":", ":", "-", "1", "]", ",", "ans_bigger", ")", ")", "if", "t_initial", "in", "t_like", ":", "# The user also requested to know the value at t_initial, so keep it.", "return", "ans", ".", "T", "else", ":", "# The user didn't ask for the value at t_initial, so exclude it.", "# (t_total contains all the t-points used for the integration,", "# and so is t_like with t_initial inserted at the right position).", "return", "ans", "[", "t_total", "!=", "t_initial", "]", ".", "T" ]
Numerically integrate the system of ODEs. :param args: Ordered arguments for the parameters and independent variables :param kwargs: Keyword arguments for the parameters and independent variables :return:
[ "Numerically", "integrate", "the", "system", "of", "ODEs", "." ]
python
train
saltstack/salt
salt/cloud/clouds/qingcloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/qingcloud.py#L448-L473
def _show_normalized_node(full_node): ''' Normalize the QingCloud instance data. Used by list_nodes()-related functions. ''' public_ips = full_node.get('eip', []) if public_ips: public_ip = public_ips['eip_addr'] public_ips = [public_ip, ] private_ips = [] for vxnet in full_node.get('vxnets', []): private_ip = vxnet.get('private_ip', None) if private_ip: private_ips.append(private_ip) normalized_node = { 'id': full_node['instance_id'], 'image': full_node['image']['image_id'], 'size': full_node['instance_type'], 'state': full_node['status'], 'private_ips': private_ips, 'public_ips': public_ips, } return normalized_node
[ "def", "_show_normalized_node", "(", "full_node", ")", ":", "public_ips", "=", "full_node", ".", "get", "(", "'eip'", ",", "[", "]", ")", "if", "public_ips", ":", "public_ip", "=", "public_ips", "[", "'eip_addr'", "]", "public_ips", "=", "[", "public_ip", ",", "]", "private_ips", "=", "[", "]", "for", "vxnet", "in", "full_node", ".", "get", "(", "'vxnets'", ",", "[", "]", ")", ":", "private_ip", "=", "vxnet", ".", "get", "(", "'private_ip'", ",", "None", ")", "if", "private_ip", ":", "private_ips", ".", "append", "(", "private_ip", ")", "normalized_node", "=", "{", "'id'", ":", "full_node", "[", "'instance_id'", "]", ",", "'image'", ":", "full_node", "[", "'image'", "]", "[", "'image_id'", "]", ",", "'size'", ":", "full_node", "[", "'instance_type'", "]", ",", "'state'", ":", "full_node", "[", "'status'", "]", ",", "'private_ips'", ":", "private_ips", ",", "'public_ips'", ":", "public_ips", ",", "}", "return", "normalized_node" ]
Normalize the QingCloud instance data. Used by list_nodes()-related functions.
[ "Normalize", "the", "QingCloud", "instance", "data", ".", "Used", "by", "list_nodes", "()", "-", "related", "functions", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L277-L288
def has_duplicate_min(x): """ Checks if the minimal value of x is observed more than once :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: bool """ if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) return np.sum(x == np.min(x)) >= 2
[ "def", "has_duplicate_min", "(", "x", ")", ":", "if", "not", "isinstance", "(", "x", ",", "(", "np", ".", "ndarray", ",", "pd", ".", "Series", ")", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "return", "np", ".", "sum", "(", "x", "==", "np", ".", "min", "(", "x", ")", ")", ">=", "2" ]
Checks if the minimal value of x is observed more than once :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: bool
[ "Checks", "if", "the", "minimal", "value", "of", "x", "is", "observed", "more", "than", "once" ]
python
train
nitely/django-hooks
hooks/signalhook.py
https://github.com/nitely/django-hooks/blob/26ea2150c9be110e90b9ee60fbfd1065ac30ab1d/hooks/signalhook.py#L25-L35
def register(self, name): """ Register a new hook. Not required (see :py:func:`.connect` method) :param str name: The hook name :return: Django signal :rtype: :py:class:`django.dispatch.Signal` """ signal = Signal(providing_args=['args', 'kwargs']) self._registry[name] = signal return signal
[ "def", "register", "(", "self", ",", "name", ")", ":", "signal", "=", "Signal", "(", "providing_args", "=", "[", "'args'", ",", "'kwargs'", "]", ")", "self", ".", "_registry", "[", "name", "]", "=", "signal", "return", "signal" ]
Register a new hook. Not required (see :py:func:`.connect` method) :param str name: The hook name :return: Django signal :rtype: :py:class:`django.dispatch.Signal`
[ "Register", "a", "new", "hook", ".", "Not", "required", "(", "see", ":", "py", ":", "func", ":", ".", "connect", "method", ")" ]
python
train
twisted/mantissa
xmantissa/product.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/product.py#L29-L40
def installProductOn(self, userstore): """ Creates an Installation in this user store for our collection of powerups, and then install those powerups on the user's store. """ def install(): i = Installation(store=userstore) i.types = self.types i.install() userstore.transact(install)
[ "def", "installProductOn", "(", "self", ",", "userstore", ")", ":", "def", "install", "(", ")", ":", "i", "=", "Installation", "(", "store", "=", "userstore", ")", "i", ".", "types", "=", "self", ".", "types", "i", ".", "install", "(", ")", "userstore", ".", "transact", "(", "install", ")" ]
Creates an Installation in this user store for our collection of powerups, and then install those powerups on the user's store.
[ "Creates", "an", "Installation", "in", "this", "user", "store", "for", "our", "collection", "of", "powerups", "and", "then", "install", "those", "powerups", "on", "the", "user", "s", "store", "." ]
python
train
osfclient/osfclient
osfclient/models/file.py
https://github.com/osfclient/osfclient/blob/44b9a87e8c1ae6b63cdecd27a924af3fc2bf94cf/osfclient/models/file.py#L7-L18
def copyfileobj(fsrc, fdst, total, length=16*1024): """Copy data from file-like object fsrc to file-like object fdst This is like shutil.copyfileobj but with a progressbar. """ with tqdm(unit='bytes', total=total, unit_scale=True) as pbar: while 1: buf = fsrc.read(length) if not buf: break fdst.write(buf) pbar.update(len(buf))
[ "def", "copyfileobj", "(", "fsrc", ",", "fdst", ",", "total", ",", "length", "=", "16", "*", "1024", ")", ":", "with", "tqdm", "(", "unit", "=", "'bytes'", ",", "total", "=", "total", ",", "unit_scale", "=", "True", ")", "as", "pbar", ":", "while", "1", ":", "buf", "=", "fsrc", ".", "read", "(", "length", ")", "if", "not", "buf", ":", "break", "fdst", ".", "write", "(", "buf", ")", "pbar", ".", "update", "(", "len", "(", "buf", ")", ")" ]
Copy data from file-like object fsrc to file-like object fdst This is like shutil.copyfileobj but with a progressbar.
[ "Copy", "data", "from", "file", "-", "like", "object", "fsrc", "to", "file", "-", "like", "object", "fdst" ]
python
valid
RPi-Distro/python-gpiozero
gpiozero/tools.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/tools.py#L574-L601
def post_periodic_filtered(values, repeat_after, block): """ After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value) """ values = _normalize(values) if repeat_after < 1: raise ValueError("repeat_after must be 1 or larger") if block < 1: raise ValueError("block must be 1 or larger") it = iter(values) try: while True: for _ in range(repeat_after): yield next(it) for _ in range(block): next(it) except StopIteration: pass
[ "def", "post_periodic_filtered", "(", "values", ",", "repeat_after", ",", "block", ")", ":", "values", "=", "_normalize", "(", "values", ")", "if", "repeat_after", "<", "1", ":", "raise", "ValueError", "(", "\"repeat_after must be 1 or larger\"", ")", "if", "block", "<", "1", ":", "raise", "ValueError", "(", "\"block must be 1 or larger\"", ")", "it", "=", "iter", "(", "values", ")", "try", ":", "while", "True", ":", "for", "_", "in", "range", "(", "repeat_after", ")", ":", "yield", "next", "(", "it", ")", "for", "_", "in", "range", "(", "block", ")", ":", "next", "(", "it", ")", "except", "StopIteration", ":", "pass" ]
After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value)
[ "After", "every", "*", "repeat_after", "*", "items", "blocks", "the", "next", "*", "block", "*", "items", "from", "*", "values", "*", ".", "Note", "that", "unlike", ":", "func", ":", "pre_periodic_filtered", "*", "repeat_after", "*", "can", "t", "be", "0", ".", "For", "example", "to", "block", "every", "tenth", "item", "read", "from", "an", "ADC", "::" ]
python
train
demianbrecht/flask-canvas
flask_canvas.py
https://github.com/demianbrecht/flask-canvas/blob/07aa310c43f9386598cdfd5b163f94efa7808c85/flask_canvas.py#L56-L102
def _canvas_route(self, *args, **kwargs): """ Decorator for canvas route """ def outer(view_fn): @self.route(*args, **kwargs) def inner(*args, **kwargs): fn_args = getargspec(view_fn) try: idx = fn_args.args.index(_ARG_KEY) except ValueError: idx = -1 if idx > -1: if 'error' in flask_request.args: return redirect('%s?error=%s' % ( self.config.get('CANVAS_ERROR_URI', '/'), flask_request.args.get('error'))) if 'signed_request' not in flask_request.form: self.logger.error('signed_request not in request.form') abort(403) try: _, decoded_data = _decode_signed_user( *flask_request.form['signed_request'].split('.')) except ValueError as e: self.logger.error(e.message) abort(403) if 'oauth_token' not in decoded_data: app.logger.info('unauthorized user, redirecting') return _authorize() user = User(**decoded_data) if not app.config.get('CANVAS_SKIP_AUTH_CHECK', False) \ and not user.has_permissions(): self.logger.info( 'user does not have the required permission set.') return _authorize() self.logger.info('all required permissions have been granted') args = args[:idx - 1] + (user,) + args[idx:] return view_fn(*args, **kwargs) return inner return outer
[ "def", "_canvas_route", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "outer", "(", "view_fn", ")", ":", "@", "self", ".", "route", "(", "*", "args", ",", "*", "*", "kwargs", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "fn_args", "=", "getargspec", "(", "view_fn", ")", "try", ":", "idx", "=", "fn_args", ".", "args", ".", "index", "(", "_ARG_KEY", ")", "except", "ValueError", ":", "idx", "=", "-", "1", "if", "idx", ">", "-", "1", ":", "if", "'error'", "in", "flask_request", ".", "args", ":", "return", "redirect", "(", "'%s?error=%s'", "%", "(", "self", ".", "config", ".", "get", "(", "'CANVAS_ERROR_URI'", ",", "'/'", ")", ",", "flask_request", ".", "args", ".", "get", "(", "'error'", ")", ")", ")", "if", "'signed_request'", "not", "in", "flask_request", ".", "form", ":", "self", ".", "logger", ".", "error", "(", "'signed_request not in request.form'", ")", "abort", "(", "403", ")", "try", ":", "_", ",", "decoded_data", "=", "_decode_signed_user", "(", "*", "flask_request", ".", "form", "[", "'signed_request'", "]", ".", "split", "(", "'.'", ")", ")", "except", "ValueError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "e", ".", "message", ")", "abort", "(", "403", ")", "if", "'oauth_token'", "not", "in", "decoded_data", ":", "app", ".", "logger", ".", "info", "(", "'unauthorized user, redirecting'", ")", "return", "_authorize", "(", ")", "user", "=", "User", "(", "*", "*", "decoded_data", ")", "if", "not", "app", ".", "config", ".", "get", "(", "'CANVAS_SKIP_AUTH_CHECK'", ",", "False", ")", "and", "not", "user", ".", "has_permissions", "(", ")", ":", "self", ".", "logger", ".", "info", "(", "'user does not have the required permission set.'", ")", "return", "_authorize", "(", ")", "self", ".", "logger", ".", "info", "(", "'all required permissions have been granted'", ")", "args", "=", "args", "[", ":", "idx", "-", "1", "]", "+", "(", "user", ",", ")", "+", "args", "[", "idx", ":", "]", "return", "view_fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inner", "return", "outer" ]
Decorator for canvas route
[ "Decorator", "for", "canvas", "route" ]
python
train
benedictpaten/sonLib
bioio.py
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/bioio.py#L875-L887
def fastaAlignmentWrite(columnAlignment, names, seqNo, fastaFile, filter=lambda x : True): """ Writes out column alignment to given file multi-fasta format """ fastaFile = open(fastaFile, 'w') columnAlignment = [ i for i in columnAlignment if filter(i) ] for seq in xrange(0, seqNo): fastaFile.write(">%s\n" % names[seq]) for column in columnAlignment: fastaFile.write(column[seq]) fastaFile.write("\n") fastaFile.close()
[ "def", "fastaAlignmentWrite", "(", "columnAlignment", ",", "names", ",", "seqNo", ",", "fastaFile", ",", "filter", "=", "lambda", "x", ":", "True", ")", ":", "fastaFile", "=", "open", "(", "fastaFile", ",", "'w'", ")", "columnAlignment", "=", "[", "i", "for", "i", "in", "columnAlignment", "if", "filter", "(", "i", ")", "]", "for", "seq", "in", "xrange", "(", "0", ",", "seqNo", ")", ":", "fastaFile", ".", "write", "(", "\">%s\\n\"", "%", "names", "[", "seq", "]", ")", "for", "column", "in", "columnAlignment", ":", "fastaFile", ".", "write", "(", "column", "[", "seq", "]", ")", "fastaFile", ".", "write", "(", "\"\\n\"", ")", "fastaFile", ".", "close", "(", ")" ]
Writes out column alignment to given file multi-fasta format
[ "Writes", "out", "column", "alignment", "to", "given", "file", "multi", "-", "fasta", "format" ]
python
train
tensorpack/tensorpack
tensorpack/utils/nvml.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/nvml.py#L92-L113
def memory(self): """Memory information in bytes Example: >>> print(ctx.device(0).memory()) {'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L} Returns: total/used/free memory in bytes """ class GpuMemoryInfo(Structure): _fields_ = [ ('total', c_ulonglong), ('free', c_ulonglong), ('used', c_ulonglong), ] c_memory = GpuMemoryInfo() _check_return(_NVML.get_function( "nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory))) return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used}
[ "def", "memory", "(", "self", ")", ":", "class", "GpuMemoryInfo", "(", "Structure", ")", ":", "_fields_", "=", "[", "(", "'total'", ",", "c_ulonglong", ")", ",", "(", "'free'", ",", "c_ulonglong", ")", ",", "(", "'used'", ",", "c_ulonglong", ")", ",", "]", "c_memory", "=", "GpuMemoryInfo", "(", ")", "_check_return", "(", "_NVML", ".", "get_function", "(", "\"nvmlDeviceGetMemoryInfo\"", ")", "(", "self", ".", "hnd", ",", "byref", "(", "c_memory", ")", ")", ")", "return", "{", "'total'", ":", "c_memory", ".", "total", ",", "'free'", ":", "c_memory", ".", "free", ",", "'used'", ":", "c_memory", ".", "used", "}" ]
Memory information in bytes Example: >>> print(ctx.device(0).memory()) {'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L} Returns: total/used/free memory in bytes
[ "Memory", "information", "in", "bytes" ]
python
train
wind-python/windpowerlib
windpowerlib/wind_turbine.py
https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_turbine.py#L126-L190
def fetch_turbine_data(self, fetch_curve, data_source): r""" Fetches data of the requested wind turbine. Method fetches nominal power as well as power coefficient curve or power curve from a data set provided in the OpenEnergy Database (oedb). You can also import your own power (coefficient) curves from a file. For that the wind speeds in m/s have to be in the first row and the corresponding power coefficient curve values or power curve values in W in a row where the first column contains the turbine name. See `example_power_curves.csv' and `example_power_coefficient_curves.csv` in example/data for the required form of a csv file (more columns can be added). See :py:func:`~.get_turbine_data_from_file` for an example reading data from a csv file. Parameters ---------- fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. data_source : string Specifies whether turbine data (f.e. nominal power, power curve, power coefficient curve) is loaded from the OpenEnergy Database ('oedb') or from a csv file ('<path including file name>'). Default: 'oedb'. Returns ------- self Examples -------- >>> from windpowerlib import wind_turbine >>> enerconE126 = { ... 'hub_height': 135, ... 'rotor_diameter': 127, ... 'name': 'E-126/4200', ... 'fetch_curve': 'power_coefficient_curve', ... 'data_source': 'oedb'} >>> e126 = wind_turbine.WindTurbine(**enerconE126) >>> print(e126.power_coefficient_curve['value'][5]) 0.44 >>> print(e126.nominal_power) 4200000.0 """ if data_source == 'oedb': curve_df, nominal_power = get_turbine_data_from_oedb( turbine_type=self.name, fetch_curve=fetch_curve) else: curve_df, nominal_power = get_turbine_data_from_file( turbine_type=self.name, file_=data_source) if fetch_curve == 'power_curve': self.power_curve = curve_df elif fetch_curve == 'power_coefficient_curve': self.power_coefficient_curve = curve_df else: raise ValueError("'{0}' is an invalid value. ".format( fetch_curve) + "`fetch_curve` must be " + "'power_curve' or 'power_coefficient_curve'.") if self.nominal_power is None: self.nominal_power = nominal_power return self
[ "def", "fetch_turbine_data", "(", "self", ",", "fetch_curve", ",", "data_source", ")", ":", "if", "data_source", "==", "'oedb'", ":", "curve_df", ",", "nominal_power", "=", "get_turbine_data_from_oedb", "(", "turbine_type", "=", "self", ".", "name", ",", "fetch_curve", "=", "fetch_curve", ")", "else", ":", "curve_df", ",", "nominal_power", "=", "get_turbine_data_from_file", "(", "turbine_type", "=", "self", ".", "name", ",", "file_", "=", "data_source", ")", "if", "fetch_curve", "==", "'power_curve'", ":", "self", ".", "power_curve", "=", "curve_df", "elif", "fetch_curve", "==", "'power_coefficient_curve'", ":", "self", ".", "power_coefficient_curve", "=", "curve_df", "else", ":", "raise", "ValueError", "(", "\"'{0}' is an invalid value. \"", ".", "format", "(", "fetch_curve", ")", "+", "\"`fetch_curve` must be \"", "+", "\"'power_curve' or 'power_coefficient_curve'.\"", ")", "if", "self", ".", "nominal_power", "is", "None", ":", "self", ".", "nominal_power", "=", "nominal_power", "return", "self" ]
r""" Fetches data of the requested wind turbine. Method fetches nominal power as well as power coefficient curve or power curve from a data set provided in the OpenEnergy Database (oedb). You can also import your own power (coefficient) curves from a file. For that the wind speeds in m/s have to be in the first row and the corresponding power coefficient curve values or power curve values in W in a row where the first column contains the turbine name. See `example_power_curves.csv' and `example_power_coefficient_curves.csv` in example/data for the required form of a csv file (more columns can be added). See :py:func:`~.get_turbine_data_from_file` for an example reading data from a csv file. Parameters ---------- fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. data_source : string Specifies whether turbine data (f.e. nominal power, power curve, power coefficient curve) is loaded from the OpenEnergy Database ('oedb') or from a csv file ('<path including file name>'). Default: 'oedb'. Returns ------- self Examples -------- >>> from windpowerlib import wind_turbine >>> enerconE126 = { ... 'hub_height': 135, ... 'rotor_diameter': 127, ... 'name': 'E-126/4200', ... 'fetch_curve': 'power_coefficient_curve', ... 'data_source': 'oedb'} >>> e126 = wind_turbine.WindTurbine(**enerconE126) >>> print(e126.power_coefficient_curve['value'][5]) 0.44 >>> print(e126.nominal_power) 4200000.0
[ "r", "Fetches", "data", "of", "the", "requested", "wind", "turbine", "." ]
python
train
PMBio/limix-backup
limix/deprecated/utils/preprocess.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/utils/preprocess.py#L62-L86
def rankStandardizeNormal(X): """ Gaussianize X: [samples x phenotypes] - each phentoype is converted to ranks and transformed back to normal using the inverse CDF """ Is = X.argsort(axis=0) RV = SP.zeros_like(X) rank = SP.zeros_like(X) for i in range(X.shape[1]): x = X[:,i] i_nan = SP.isnan(x) if 0: Is = x.argsort() rank = SP.zeros_like(x) rank[Is] = SP.arange(X.shape[0]) #add one to ensure nothing = 0 rank +=1 else: rank = st.rankdata(x[~i_nan]) #devide by (N+1) which yields uniform [0,1] rank /= ((~i_nan).sum()+1) #apply inverse gaussian cdf RV[~i_nan,i] = SP.sqrt(2) * special.erfinv(2*rank-1) RV[i_nan,i] = x[i_nan] return RV
[ "def", "rankStandardizeNormal", "(", "X", ")", ":", "Is", "=", "X", ".", "argsort", "(", "axis", "=", "0", ")", "RV", "=", "SP", ".", "zeros_like", "(", "X", ")", "rank", "=", "SP", ".", "zeros_like", "(", "X", ")", "for", "i", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ":", "x", "=", "X", "[", ":", ",", "i", "]", "i_nan", "=", "SP", ".", "isnan", "(", "x", ")", "if", "0", ":", "Is", "=", "x", ".", "argsort", "(", ")", "rank", "=", "SP", ".", "zeros_like", "(", "x", ")", "rank", "[", "Is", "]", "=", "SP", ".", "arange", "(", "X", ".", "shape", "[", "0", "]", ")", "#add one to ensure nothing = 0", "rank", "+=", "1", "else", ":", "rank", "=", "st", ".", "rankdata", "(", "x", "[", "~", "i_nan", "]", ")", "#devide by (N+1) which yields uniform [0,1]", "rank", "/=", "(", "(", "~", "i_nan", ")", ".", "sum", "(", ")", "+", "1", ")", "#apply inverse gaussian cdf", "RV", "[", "~", "i_nan", ",", "i", "]", "=", "SP", ".", "sqrt", "(", "2", ")", "*", "special", ".", "erfinv", "(", "2", "*", "rank", "-", "1", ")", "RV", "[", "i_nan", ",", "i", "]", "=", "x", "[", "i_nan", "]", "return", "RV" ]
Gaussianize X: [samples x phenotypes] - each phentoype is converted to ranks and transformed back to normal using the inverse CDF
[ "Gaussianize", "X", ":", "[", "samples", "x", "phenotypes", "]", "-", "each", "phentoype", "is", "converted", "to", "ranks", "and", "transformed", "back", "to", "normal", "using", "the", "inverse", "CDF" ]
python
train
peopledoc/workalendar
workalendar/core.py
https://github.com/peopledoc/workalendar/blob/d044d5dfc1709ec388db34dab583dd554cc66c4e/workalendar/core.py#L246-L256
def find_following_working_day(self, day): """Looks for the following working day, if not already a working day. **WARNING**: this function doesn't take into account the calendar holidays, only the days of the week and the weekend days parameters. """ day = cleaned_date(day) while day.weekday() in self.get_weekend_days(): day = day + timedelta(days=1) return day
[ "def", "find_following_working_day", "(", "self", ",", "day", ")", ":", "day", "=", "cleaned_date", "(", "day", ")", "while", "day", ".", "weekday", "(", ")", "in", "self", ".", "get_weekend_days", "(", ")", ":", "day", "=", "day", "+", "timedelta", "(", "days", "=", "1", ")", "return", "day" ]
Looks for the following working day, if not already a working day. **WARNING**: this function doesn't take into account the calendar holidays, only the days of the week and the weekend days parameters.
[ "Looks", "for", "the", "following", "working", "day", "if", "not", "already", "a", "working", "day", "." ]
python
train
celery/cell
cell/actors.py
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L422-L428
def call(self, method, args={}, retry=False, retry_policy=None, ticket=None, **props): """Send message to the same actor and return :class:`AsyncResult`.""" ticket = ticket or uuid() reply_q = self.get_reply_queue(ticket) self.cast(method, args, declare=[reply_q], reply_to=ticket, **props) return self.AsyncResult(ticket, self)
[ "def", "call", "(", "self", ",", "method", ",", "args", "=", "{", "}", ",", "retry", "=", "False", ",", "retry_policy", "=", "None", ",", "ticket", "=", "None", ",", "*", "*", "props", ")", ":", "ticket", "=", "ticket", "or", "uuid", "(", ")", "reply_q", "=", "self", ".", "get_reply_queue", "(", "ticket", ")", "self", ".", "cast", "(", "method", ",", "args", ",", "declare", "=", "[", "reply_q", "]", ",", "reply_to", "=", "ticket", ",", "*", "*", "props", ")", "return", "self", ".", "AsyncResult", "(", "ticket", ",", "self", ")" ]
Send message to the same actor and return :class:`AsyncResult`.
[ "Send", "message", "to", "the", "same", "actor", "and", "return", ":", "class", ":", "AsyncResult", "." ]
python
train
minhhoit/yacms
yacms/core/managers.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/managers.py#L390-L408
def url_map(self, for_user=None, **kwargs): """ Returns a dictionary of urls mapped to Displayable subclass instances, including a fake homepage instance if none exists. Used in ``yacms.core.sitemaps``. """ class Home: title = _("Home") home = Home() setattr(home, "get_absolute_url", home_slug) items = {home.get_absolute_url(): home} for model in apps.get_models(): if issubclass(model, self.model): for item in (model.objects.published(for_user=for_user) .filter(**kwargs) .exclude(slug__startswith="http://") .exclude(slug__startswith="https://")): items[item.get_absolute_url()] = item return items
[ "def", "url_map", "(", "self", ",", "for_user", "=", "None", ",", "*", "*", "kwargs", ")", ":", "class", "Home", ":", "title", "=", "_", "(", "\"Home\"", ")", "home", "=", "Home", "(", ")", "setattr", "(", "home", ",", "\"get_absolute_url\"", ",", "home_slug", ")", "items", "=", "{", "home", ".", "get_absolute_url", "(", ")", ":", "home", "}", "for", "model", "in", "apps", ".", "get_models", "(", ")", ":", "if", "issubclass", "(", "model", ",", "self", ".", "model", ")", ":", "for", "item", "in", "(", "model", ".", "objects", ".", "published", "(", "for_user", "=", "for_user", ")", ".", "filter", "(", "*", "*", "kwargs", ")", ".", "exclude", "(", "slug__startswith", "=", "\"http://\"", ")", ".", "exclude", "(", "slug__startswith", "=", "\"https://\"", ")", ")", ":", "items", "[", "item", ".", "get_absolute_url", "(", ")", "]", "=", "item", "return", "items" ]
Returns a dictionary of urls mapped to Displayable subclass instances, including a fake homepage instance if none exists. Used in ``yacms.core.sitemaps``.
[ "Returns", "a", "dictionary", "of", "urls", "mapped", "to", "Displayable", "subclass", "instances", "including", "a", "fake", "homepage", "instance", "if", "none", "exists", ".", "Used", "in", "yacms", ".", "core", ".", "sitemaps", "." ]
python
train
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L439-L445
def set_column_stretch(self, column=0, stretch=10): """ Sets the column stretch. Larger numbers mean it will expand more to fill space. """ self._layout.setColumnStretch(column, stretch) return self
[ "def", "set_column_stretch", "(", "self", ",", "column", "=", "0", ",", "stretch", "=", "10", ")", ":", "self", ".", "_layout", ".", "setColumnStretch", "(", "column", ",", "stretch", ")", "return", "self" ]
Sets the column stretch. Larger numbers mean it will expand more to fill space.
[ "Sets", "the", "column", "stretch", ".", "Larger", "numbers", "mean", "it", "will", "expand", "more", "to", "fill", "space", "." ]
python
train
pandas-dev/pandas
pandas/core/strings.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L1085-L1176
def str_findall(arr, pat, flags=0): """ Find all occurrences of pattern or regular expression in the Series/Index. Equivalent to applying :func:`re.findall` to all the elements in the Series/Index. Parameters ---------- pat : str Pattern or regular expression. flags : int, default 0 Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which means no flags). Returns ------- Series/Index of lists of strings All non-overlapping matches of pattern or regular expression in each string of this Series/Index. See Also -------- count : Count occurrences of pattern or regular expression in each string of the Series/Index. extractall : For each string in the Series, extract groups from all matches of regular expression and return a DataFrame with one row for each match and one column for each group. re.findall : The equivalent ``re`` function to all non-overlapping matches of pattern or regular expression in string, as a list of strings. Examples -------- >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit']) The search for the pattern 'Monkey' returns one match: >>> s.str.findall('Monkey') 0 [] 1 [Monkey] 2 [] dtype: object On the other hand, the search for the pattern 'MONKEY' doesn't return any match: >>> s.str.findall('MONKEY') 0 [] 1 [] 2 [] dtype: object Flags can be added to the pattern or regular expression. For instance, to find the pattern 'MONKEY' ignoring the case: >>> import re >>> s.str.findall('MONKEY', flags=re.IGNORECASE) 0 [] 1 [Monkey] 2 [] dtype: object When the pattern matches more than one string in the Series, all matches are returned: >>> s.str.findall('on') 0 [on] 1 [on] 2 [] dtype: object Regular expressions are supported too. For instance, the search for all the strings ending with the word 'on' is shown next: >>> s.str.findall('on$') 0 [on] 1 [] 2 [] dtype: object If the pattern is found more than once in the same string, then a list of multiple strings is returned: >>> s.str.findall('b') 0 [] 1 [] 2 [b, b] dtype: object """ regex = re.compile(pat, flags=flags) return _na_map(regex.findall, arr)
[ "def", "str_findall", "(", "arr", ",", "pat", ",", "flags", "=", "0", ")", ":", "regex", "=", "re", ".", "compile", "(", "pat", ",", "flags", "=", "flags", ")", "return", "_na_map", "(", "regex", ".", "findall", ",", "arr", ")" ]
Find all occurrences of pattern or regular expression in the Series/Index. Equivalent to applying :func:`re.findall` to all the elements in the Series/Index. Parameters ---------- pat : str Pattern or regular expression. flags : int, default 0 Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which means no flags). Returns ------- Series/Index of lists of strings All non-overlapping matches of pattern or regular expression in each string of this Series/Index. See Also -------- count : Count occurrences of pattern or regular expression in each string of the Series/Index. extractall : For each string in the Series, extract groups from all matches of regular expression and return a DataFrame with one row for each match and one column for each group. re.findall : The equivalent ``re`` function to all non-overlapping matches of pattern or regular expression in string, as a list of strings. Examples -------- >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit']) The search for the pattern 'Monkey' returns one match: >>> s.str.findall('Monkey') 0 [] 1 [Monkey] 2 [] dtype: object On the other hand, the search for the pattern 'MONKEY' doesn't return any match: >>> s.str.findall('MONKEY') 0 [] 1 [] 2 [] dtype: object Flags can be added to the pattern or regular expression. For instance, to find the pattern 'MONKEY' ignoring the case: >>> import re >>> s.str.findall('MONKEY', flags=re.IGNORECASE) 0 [] 1 [Monkey] 2 [] dtype: object When the pattern matches more than one string in the Series, all matches are returned: >>> s.str.findall('on') 0 [on] 1 [on] 2 [] dtype: object Regular expressions are supported too. For instance, the search for all the strings ending with the word 'on' is shown next: >>> s.str.findall('on$') 0 [on] 1 [] 2 [] dtype: object If the pattern is found more than once in the same string, then a list of multiple strings is returned: >>> s.str.findall('b') 0 [] 1 [] 2 [b, b] dtype: object
[ "Find", "all", "occurrences", "of", "pattern", "or", "regular", "expression", "in", "the", "Series", "/", "Index", "." ]
python
train
Qiskit/qiskit-terra
qiskit/circuit/instructionset.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/circuit/instructionset.py#L45-L49
def inverse(self): """Invert all instructions.""" for index, instruction in enumerate(self.instructions): self.instructions[index] = instruction.inverse() return self
[ "def", "inverse", "(", "self", ")", ":", "for", "index", ",", "instruction", "in", "enumerate", "(", "self", ".", "instructions", ")", ":", "self", ".", "instructions", "[", "index", "]", "=", "instruction", ".", "inverse", "(", ")", "return", "self" ]
Invert all instructions.
[ "Invert", "all", "instructions", "." ]
python
test
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/dipole.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/dipole.py#L85-L91
def com(self, center1_x, center1_y, center2_x, center2_y, Fm): """ :return: center of mass """ com_x = (Fm * center1_x + center2_x)/(Fm + 1.) com_y = (Fm * center1_y + center2_y)/(Fm + 1.) return com_x, com_y
[ "def", "com", "(", "self", ",", "center1_x", ",", "center1_y", ",", "center2_x", ",", "center2_y", ",", "Fm", ")", ":", "com_x", "=", "(", "Fm", "*", "center1_x", "+", "center2_x", ")", "/", "(", "Fm", "+", "1.", ")", "com_y", "=", "(", "Fm", "*", "center1_y", "+", "center2_y", ")", "/", "(", "Fm", "+", "1.", ")", "return", "com_x", ",", "com_y" ]
:return: center of mass
[ ":", "return", ":", "center", "of", "mass" ]
python
train
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L106-L121
def dict_of_lists_add(dictionary, key, value): # type: (DictUpperBound, Any, Any) -> None """Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None """ list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
[ "def", "dict_of_lists_add", "(", "dictionary", ",", "key", ",", "value", ")", ":", "# type: (DictUpperBound, Any, Any) -> None", "list_objs", "=", "dictionary", ".", "get", "(", "key", ",", "list", "(", ")", ")", "list_objs", ".", "append", "(", "value", ")", "dictionary", "[", "key", "]", "=", "list_objs" ]
Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None
[ "Add", "value", "to", "a", "list", "in", "a", "dictionary", "by", "key" ]
python
train
epfl-lts2/pygsp
pygsp/graphs/graph.py
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/graphs/graph.py#L687-L738
def d(self): r"""The degree (number of neighbors) of vertices. For undirected graphs, the degree of a vertex is the number of vertices it is connected to. For directed graphs, the degree is the average of the in and out degrees, where the in degree is the number of incoming edges, and the out degree the number of outgoing edges. In both cases, the degree of the vertex :math:`v_i` is the average between the number of non-zero values in the :math:`i`-th column (the in degree) and the :math:`i`-th row (the out degree) of the weighted adjacency matrix :attr:`W`. Examples -------- Undirected graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [1, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [1 2 1] >>> print(graph.dw) # Weighted degree. [1 3 2] Directed graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [0, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [0.5 1.5 1. ] >>> print(graph.dw) # Weighted degree. [0.5 2.5 2. ] """ if self._d is None: if not self.is_directed(): # Shortcut for undirected graphs. self._d = self.W.getnnz(axis=1) # axis=1 faster for CSR (https://stackoverflow.com/a/16391764) else: degree_in = self.W.getnnz(axis=0) degree_out = self.W.getnnz(axis=1) self._d = (degree_in + degree_out) / 2 return self._d
[ "def", "d", "(", "self", ")", ":", "if", "self", ".", "_d", "is", "None", ":", "if", "not", "self", ".", "is_directed", "(", ")", ":", "# Shortcut for undirected graphs.", "self", ".", "_d", "=", "self", ".", "W", ".", "getnnz", "(", "axis", "=", "1", ")", "# axis=1 faster for CSR (https://stackoverflow.com/a/16391764)", "else", ":", "degree_in", "=", "self", ".", "W", ".", "getnnz", "(", "axis", "=", "0", ")", "degree_out", "=", "self", ".", "W", ".", "getnnz", "(", "axis", "=", "1", ")", "self", ".", "_d", "=", "(", "degree_in", "+", "degree_out", ")", "/", "2", "return", "self", ".", "_d" ]
r"""The degree (number of neighbors) of vertices. For undirected graphs, the degree of a vertex is the number of vertices it is connected to. For directed graphs, the degree is the average of the in and out degrees, where the in degree is the number of incoming edges, and the out degree the number of outgoing edges. In both cases, the degree of the vertex :math:`v_i` is the average between the number of non-zero values in the :math:`i`-th column (the in degree) and the :math:`i`-th row (the out degree) of the weighted adjacency matrix :attr:`W`. Examples -------- Undirected graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [1, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [1 2 1] >>> print(graph.dw) # Weighted degree. [1 3 2] Directed graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [0, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [0.5 1.5 1. ] >>> print(graph.dw) # Weighted degree. [0.5 2.5 2. ]
[ "r", "The", "degree", "(", "number", "of", "neighbors", ")", "of", "vertices", "." ]
python
train
google/grr
grr/client/grr_response_client/vfs_handlers/sleuthkit.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/vfs_handlers/sleuthkit.py#L331-L357
def ListFiles(self, ext_attrs=None): """List all the files in the directory.""" del ext_attrs # Unused. if not self.IsDirectory(): raise IOError("%s is not a directory" % self.pathspec.CollapsePath()) for f in self.fd.as_directory(): try: name = _DecodeUTF8WithWarning(f.info.name.name) # Drop these useless entries. if name in [".", ".."] or name in self.BLACKLIST_FILES: continue # First we yield a standard response using the default attributes. yield self.MakeStatResponse(f, tsk_attribute=None, append_name=name) # Now send back additional named attributes for the ADS. for attribute in f: if attribute.info.type in [ pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA, pytsk3.TSK_FS_ATTR_TYPE_DEFAULT ]: if attribute.info.name: yield self.MakeStatResponse( f, append_name=name, tsk_attribute=attribute) except AttributeError: pass
[ "def", "ListFiles", "(", "self", ",", "ext_attrs", "=", "None", ")", ":", "del", "ext_attrs", "# Unused.", "if", "not", "self", ".", "IsDirectory", "(", ")", ":", "raise", "IOError", "(", "\"%s is not a directory\"", "%", "self", ".", "pathspec", ".", "CollapsePath", "(", ")", ")", "for", "f", "in", "self", ".", "fd", ".", "as_directory", "(", ")", ":", "try", ":", "name", "=", "_DecodeUTF8WithWarning", "(", "f", ".", "info", ".", "name", ".", "name", ")", "# Drop these useless entries.", "if", "name", "in", "[", "\".\"", ",", "\"..\"", "]", "or", "name", "in", "self", ".", "BLACKLIST_FILES", ":", "continue", "# First we yield a standard response using the default attributes.", "yield", "self", ".", "MakeStatResponse", "(", "f", ",", "tsk_attribute", "=", "None", ",", "append_name", "=", "name", ")", "# Now send back additional named attributes for the ADS.", "for", "attribute", "in", "f", ":", "if", "attribute", ".", "info", ".", "type", "in", "[", "pytsk3", ".", "TSK_FS_ATTR_TYPE_NTFS_DATA", ",", "pytsk3", ".", "TSK_FS_ATTR_TYPE_DEFAULT", "]", ":", "if", "attribute", ".", "info", ".", "name", ":", "yield", "self", ".", "MakeStatResponse", "(", "f", ",", "append_name", "=", "name", ",", "tsk_attribute", "=", "attribute", ")", "except", "AttributeError", ":", "pass" ]
List all the files in the directory.
[ "List", "all", "the", "files", "in", "the", "directory", "." ]
python
train
zsimic/runez
src/runez/config.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/config.py#L471-L482
def unitized(value, unit, base=DEFAULT_BASE): """ Args: value (int | float): Value to expand unit (str | unicode): Given unit (see UNITS) base (int): Base to use (usually 1024) Returns: Deduced value (example: "1k" becomes 1000) """ exponent = 0 if not unit else UNITS.index(unit) + 1 return int(value * (base ** exponent))
[ "def", "unitized", "(", "value", ",", "unit", ",", "base", "=", "DEFAULT_BASE", ")", ":", "exponent", "=", "0", "if", "not", "unit", "else", "UNITS", ".", "index", "(", "unit", ")", "+", "1", "return", "int", "(", "value", "*", "(", "base", "**", "exponent", ")", ")" ]
Args: value (int | float): Value to expand unit (str | unicode): Given unit (see UNITS) base (int): Base to use (usually 1024) Returns: Deduced value (example: "1k" becomes 1000)
[ "Args", ":", "value", "(", "int", "|", "float", ")", ":", "Value", "to", "expand", "unit", "(", "str", "|", "unicode", ")", ":", "Given", "unit", "(", "see", "UNITS", ")", "base", "(", "int", ")", ":", "Base", "to", "use", "(", "usually", "1024", ")" ]
python
train
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L14942-L14967
def patch_namespaced_event(self, name, namespace, body, **kwargs): """ partially update the specified Event This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_event(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Event (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1Event If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_event_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_event_with_http_info(name, namespace, body, **kwargs) return data
[ "def", "patch_namespaced_event", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "patch_namespaced_event_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "patch_namespaced_event_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
partially update the specified Event This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_event(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Event (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1Event If the method is called asynchronously, returns the request thread.
[ "partially", "update", "the", "specified", "Event", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "patch_namespaced_event", "(", "name", "namespace", "body", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
MartinThoma/hwrt
hwrt/language_model/language_model.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/language_model/language_model.py#L157-L177
def load_model(): """ Load a n-gram language model for mathematics in ARPA format which gets shipped with hwrt. Returns ------- A NgramLanguageModel object """ logging.info("Load language model...") ngram_arpa_t = pkg_resources.resource_filename('hwrt', 'misc/ngram.arpa.tar.bz2') with tarfile.open(ngram_arpa_t, 'r:bz2') as tar: tarfolder = tempfile.mkdtemp() tar.extractall(path=tarfolder) ngram_arpa_f = os.path.join(tarfolder, 'ngram.arpa') with open(ngram_arpa_f) as f: content = f.read() ngram_model = NgramLanguageModel() ngram_model.load_from_arpa_str(content) return ngram_model
[ "def", "load_model", "(", ")", ":", "logging", ".", "info", "(", "\"Load language model...\"", ")", "ngram_arpa_t", "=", "pkg_resources", ".", "resource_filename", "(", "'hwrt'", ",", "'misc/ngram.arpa.tar.bz2'", ")", "with", "tarfile", ".", "open", "(", "ngram_arpa_t", ",", "'r:bz2'", ")", "as", "tar", ":", "tarfolder", "=", "tempfile", ".", "mkdtemp", "(", ")", "tar", ".", "extractall", "(", "path", "=", "tarfolder", ")", "ngram_arpa_f", "=", "os", ".", "path", ".", "join", "(", "tarfolder", ",", "'ngram.arpa'", ")", "with", "open", "(", "ngram_arpa_f", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "ngram_model", "=", "NgramLanguageModel", "(", ")", "ngram_model", ".", "load_from_arpa_str", "(", "content", ")", "return", "ngram_model" ]
Load a n-gram language model for mathematics in ARPA format which gets shipped with hwrt. Returns ------- A NgramLanguageModel object
[ "Load", "a", "n", "-", "gram", "language", "model", "for", "mathematics", "in", "ARPA", "format", "which", "gets", "shipped", "with", "hwrt", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/utils.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/utils.py#L181-L198
def pointIsInTriangle(p, p1, p2, p3): """ Return True if a point is inside (or above/below) a triangle defined by 3 points in space. """ p = np.array(p) u = np.array(p2) - p1 v = np.array(p3) - p1 n = np.cross(u, v) w = p - p1 ln = np.dot(n, n) if not ln: return True # degenerate triangle gamma = (np.dot(np.cross(u, w), n)) / ln beta = (np.dot(np.cross(w, v), n)) / ln alpha = 1 - gamma - beta if 0 < alpha < 1 and 0 < beta < 1 and 0 < gamma < 1: return True return False
[ "def", "pointIsInTriangle", "(", "p", ",", "p1", ",", "p2", ",", "p3", ")", ":", "p", "=", "np", ".", "array", "(", "p", ")", "u", "=", "np", ".", "array", "(", "p2", ")", "-", "p1", "v", "=", "np", ".", "array", "(", "p3", ")", "-", "p1", "n", "=", "np", ".", "cross", "(", "u", ",", "v", ")", "w", "=", "p", "-", "p1", "ln", "=", "np", ".", "dot", "(", "n", ",", "n", ")", "if", "not", "ln", ":", "return", "True", "# degenerate triangle", "gamma", "=", "(", "np", ".", "dot", "(", "np", ".", "cross", "(", "u", ",", "w", ")", ",", "n", ")", ")", "/", "ln", "beta", "=", "(", "np", ".", "dot", "(", "np", ".", "cross", "(", "w", ",", "v", ")", ",", "n", ")", ")", "/", "ln", "alpha", "=", "1", "-", "gamma", "-", "beta", "if", "0", "<", "alpha", "<", "1", "and", "0", "<", "beta", "<", "1", "and", "0", "<", "gamma", "<", "1", ":", "return", "True", "return", "False" ]
Return True if a point is inside (or above/below) a triangle defined by 3 points in space.
[ "Return", "True", "if", "a", "point", "is", "inside", "(", "or", "above", "/", "below", ")", "a", "triangle", "defined", "by", "3", "points", "in", "space", "." ]
python
train
BD2KGenomics/protect
attic/ProTECT.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1099-L1109
def run_fusion_caller(job, star_bam, univ_options, fusion_options): """ This module will run a fusion caller on DNA bams. This module will be implemented in the future. This module corresponds to node 10 on the tree """ job.fileStore.logToMaster('Running FUSION on %s' % univ_options['patient']) fusion_file = job.fileStore.getLocalTempFile() output_file = job.fileStore.writeGlobalFile(fusion_file) return output_file
[ "def", "run_fusion_caller", "(", "job", ",", "star_bam", ",", "univ_options", ",", "fusion_options", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Running FUSION on %s'", "%", "univ_options", "[", "'patient'", "]", ")", "fusion_file", "=", "job", ".", "fileStore", ".", "getLocalTempFile", "(", ")", "output_file", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "fusion_file", ")", "return", "output_file" ]
This module will run a fusion caller on DNA bams. This module will be implemented in the future. This module corresponds to node 10 on the tree
[ "This", "module", "will", "run", "a", "fusion", "caller", "on", "DNA", "bams", ".", "This", "module", "will", "be", "implemented", "in", "the", "future", "." ]
python
train
PGower/PyCanvas
pycanvas/apis/calendar_events.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/calendar_events.py#L254-L286
def reserve_time_slot(self, id, cancel_existing=None, comments=None, participant_id=None): """ Reserve a time slot. Reserves a particular time slot and return the new reservation """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - participant_id """User or group id for whom you are making the reservation (depends on the participant type). Defaults to the current user (or user's candidate group).""" if participant_id is not None: data["participant_id"] = participant_id # OPTIONAL - comments """Comments to associate with this reservation""" if comments is not None: data["comments"] = comments # OPTIONAL - cancel_existing """Defaults to false. If true, cancel any previous reservation(s) for this participant and appointment group.""" if cancel_existing is not None: data["cancel_existing"] = cancel_existing self.logger.debug("POST /api/v1/calendar_events/{id}/reservations with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/calendar_events/{id}/reservations".format(**path), data=data, params=params, no_data=True)
[ "def", "reserve_time_slot", "(", "self", ",", "id", ",", "cancel_existing", "=", "None", ",", "comments", "=", "None", ",", "participant_id", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "# OPTIONAL - participant_id\r", "\"\"\"User or group id for whom you are making the reservation (depends on the\r\n participant type). Defaults to the current user (or user's candidate group).\"\"\"", "if", "participant_id", "is", "not", "None", ":", "data", "[", "\"participant_id\"", "]", "=", "participant_id", "# OPTIONAL - comments\r", "\"\"\"Comments to associate with this reservation\"\"\"", "if", "comments", "is", "not", "None", ":", "data", "[", "\"comments\"", "]", "=", "comments", "# OPTIONAL - cancel_existing\r", "\"\"\"Defaults to false. If true, cancel any previous reservation(s) for this\r\n participant and appointment group.\"\"\"", "if", "cancel_existing", "is", "not", "None", ":", "data", "[", "\"cancel_existing\"", "]", "=", "cancel_existing", "self", ".", "logger", ".", "debug", "(", "\"POST /api/v1/calendar_events/{id}/reservations with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"POST\"", ",", "\"/api/v1/calendar_events/{id}/reservations\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
Reserve a time slot. Reserves a particular time slot and return the new reservation
[ "Reserve", "a", "time", "slot", ".", "Reserves", "a", "particular", "time", "slot", "and", "return", "the", "new", "reservation" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1598-L1617
def disassemble_string(self, lpAddress, code): """ Disassemble instructions from a block of binary code. @type lpAddress: int @param lpAddress: Memory address where the code was read from. @type code: str @param code: Binary code to disassemble. @rtype: list of tuple( long, int, str, str ) @return: List of tuples. Each tuple represents an assembly instruction and contains: - Memory address of instruction. - Size of instruction in bytes. - Disassembly line of instruction. - Hexadecimal dump of instruction. """ aProcess = self.get_process() return aProcess.disassemble_string(lpAddress, code)
[ "def", "disassemble_string", "(", "self", ",", "lpAddress", ",", "code", ")", ":", "aProcess", "=", "self", ".", "get_process", "(", ")", "return", "aProcess", ".", "disassemble_string", "(", "lpAddress", ",", "code", ")" ]
Disassemble instructions from a block of binary code. @type lpAddress: int @param lpAddress: Memory address where the code was read from. @type code: str @param code: Binary code to disassemble. @rtype: list of tuple( long, int, str, str ) @return: List of tuples. Each tuple represents an assembly instruction and contains: - Memory address of instruction. - Size of instruction in bytes. - Disassembly line of instruction. - Hexadecimal dump of instruction.
[ "Disassemble", "instructions", "from", "a", "block", "of", "binary", "code", "." ]
python
train
jonathf/chaospy
chaospy/distributions/operators/sinh.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/sinh.py#L38-L41
def _pdf(self, x, dist, cache): """Probability density function.""" return evaluation.evaluate_density( dist, numpy.arcsinh(x), cache=cache)/numpy.sqrt(1+x*x)
[ "def", "_pdf", "(", "self", ",", "x", ",", "dist", ",", "cache", ")", ":", "return", "evaluation", ".", "evaluate_density", "(", "dist", ",", "numpy", ".", "arcsinh", "(", "x", ")", ",", "cache", "=", "cache", ")", "/", "numpy", ".", "sqrt", "(", "1", "+", "x", "*", "x", ")" ]
Probability density function.
[ "Probability", "density", "function", "." ]
python
train
squidsoup/muddle.py
muddle/api.py
https://github.com/squidsoup/muddle.py/blob/f58c62e7d92b9ac24a16de007c0fbd6607b15687/muddle/api.py#L249-L266
def details(self): """ Returns details for given category :returns: category response object Example Usage:: >>> import muddle >>> muddle.category(10).details() """ params = {'wsfunction': 'core_course_get_categories', 'criteria[0][key]': 'id', 'criteria[0][value]': self.category_id} params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False)
[ "def", "details", "(", "self", ")", ":", "params", "=", "{", "'wsfunction'", ":", "'core_course_get_categories'", ",", "'criteria[0][key]'", ":", "'id'", ",", "'criteria[0][value]'", ":", "self", ".", "category_id", "}", "params", ".", "update", "(", "self", ".", "request_params", ")", "return", "requests", ".", "post", "(", "self", ".", "api_url", ",", "params", "=", "params", ",", "verify", "=", "False", ")" ]
Returns details for given category :returns: category response object Example Usage:: >>> import muddle >>> muddle.category(10).details()
[ "Returns", "details", "for", "given", "category" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/pdftex.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/pdftex.py#L71-L99
def generate(env): """Add Builders and construction variables for pdftex to an Environment.""" global PDFTeXAction if PDFTeXAction is None: PDFTeXAction = SCons.Action.Action('$PDFTEXCOM', '$PDFTEXCOMSTR') global PDFLaTeXAction if PDFLaTeXAction is None: PDFLaTeXAction = SCons.Action.Action("$PDFLATEXCOM", "$PDFLATEXCOMSTR") global PDFTeXLaTeXAction if PDFTeXLaTeXAction is None: PDFTeXLaTeXAction = SCons.Action.Action(PDFTeXLaTeXFunction, strfunction=SCons.Tool.tex.TeXLaTeXStrFunction) env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes) from . import pdf pdf.generate(env) bld = env['BUILDERS']['PDF'] bld.add_action('.tex', PDFTeXLaTeXAction) bld.add_emitter('.tex', SCons.Tool.tex.tex_pdf_emitter) # Add the epstopdf builder after the pdftex builder # so pdftex is the default for no source suffix pdf.generate2(env) SCons.Tool.tex.generate_common(env)
[ "def", "generate", "(", "env", ")", ":", "global", "PDFTeXAction", "if", "PDFTeXAction", "is", "None", ":", "PDFTeXAction", "=", "SCons", ".", "Action", ".", "Action", "(", "'$PDFTEXCOM'", ",", "'$PDFTEXCOMSTR'", ")", "global", "PDFLaTeXAction", "if", "PDFLaTeXAction", "is", "None", ":", "PDFLaTeXAction", "=", "SCons", ".", "Action", ".", "Action", "(", "\"$PDFLATEXCOM\"", ",", "\"$PDFLATEXCOMSTR\"", ")", "global", "PDFTeXLaTeXAction", "if", "PDFTeXLaTeXAction", "is", "None", ":", "PDFTeXLaTeXAction", "=", "SCons", ".", "Action", ".", "Action", "(", "PDFTeXLaTeXFunction", ",", "strfunction", "=", "SCons", ".", "Tool", ".", "tex", ".", "TeXLaTeXStrFunction", ")", "env", ".", "AppendUnique", "(", "LATEXSUFFIXES", "=", "SCons", ".", "Tool", ".", "LaTeXSuffixes", ")", "from", ".", "import", "pdf", "pdf", ".", "generate", "(", "env", ")", "bld", "=", "env", "[", "'BUILDERS'", "]", "[", "'PDF'", "]", "bld", ".", "add_action", "(", "'.tex'", ",", "PDFTeXLaTeXAction", ")", "bld", ".", "add_emitter", "(", "'.tex'", ",", "SCons", ".", "Tool", ".", "tex", ".", "tex_pdf_emitter", ")", "# Add the epstopdf builder after the pdftex builder ", "# so pdftex is the default for no source suffix", "pdf", ".", "generate2", "(", "env", ")", "SCons", ".", "Tool", ".", "tex", ".", "generate_common", "(", "env", ")" ]
Add Builders and construction variables for pdftex to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "pdftex", "to", "an", "Environment", "." ]
python
train
edx/bok-choy
bok_choy/browser.py
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L477-L503
def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value """ Determines the kwargs needed to set up a proxy based on the browser type. Returns: a dictionary of arguments needed to pass when instantiating the WebDriver instance. """ proxy_dict = { "httpProxy": proxy.proxy, "proxyType": 'manual', } if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs: # This one works for firefox locally wd_proxy = webdriver.common.proxy.Proxy(proxy_dict) browser_kwargs['proxy'] = wd_proxy else: # This one works with chrome, both locally and remote # This one works with firefox remote, but not locally if 'desired_capabilities' not in browser_kwargs: browser_kwargs['desired_capabilities'] = {} browser_kwargs['desired_capabilities']['proxy'] = proxy_dict return browser_kwargs
[ "def", "_proxy_kwargs", "(", "browser_name", ",", "proxy", ",", "browser_kwargs", "=", "{", "}", ")", ":", "# pylint: disable=dangerous-default-value", "proxy_dict", "=", "{", "\"httpProxy\"", ":", "proxy", ".", "proxy", ",", "\"proxyType\"", ":", "'manual'", ",", "}", "if", "browser_name", "==", "'firefox'", "and", "'desired_capabilities'", "not", "in", "browser_kwargs", ":", "# This one works for firefox locally", "wd_proxy", "=", "webdriver", ".", "common", ".", "proxy", ".", "Proxy", "(", "proxy_dict", ")", "browser_kwargs", "[", "'proxy'", "]", "=", "wd_proxy", "else", ":", "# This one works with chrome, both locally and remote", "# This one works with firefox remote, but not locally", "if", "'desired_capabilities'", "not", "in", "browser_kwargs", ":", "browser_kwargs", "[", "'desired_capabilities'", "]", "=", "{", "}", "browser_kwargs", "[", "'desired_capabilities'", "]", "[", "'proxy'", "]", "=", "proxy_dict", "return", "browser_kwargs" ]
Determines the kwargs needed to set up a proxy based on the browser type. Returns: a dictionary of arguments needed to pass when instantiating the WebDriver instance.
[ "Determines", "the", "kwargs", "needed", "to", "set", "up", "a", "proxy", "based", "on", "the", "browser", "type", "." ]
python
train
DancingQuanta/pyusbiss
usbiss/usbiss.py
https://github.com/DancingQuanta/pyusbiss/blob/fc64e123f1c97f53ad153c474d230ad38044c3cb/usbiss/usbiss.py#L95-L112
def get_iss_info(self): """ Get information about the USB-ISS Querying will return three bytes; - the module ID (7), - firmware version (currently 2), - the current operating mode. """ self.write_data([self.ISS_CMD, self.ISS_VERSION]) response = self.read_data(3) if len(response) == 3: response = self.decode(response) self.module = response[0] self.firmware = response[1] self._mode = response[2] else: raise USBISSError("Could not get version details")
[ "def", "get_iss_info", "(", "self", ")", ":", "self", ".", "write_data", "(", "[", "self", ".", "ISS_CMD", ",", "self", ".", "ISS_VERSION", "]", ")", "response", "=", "self", ".", "read_data", "(", "3", ")", "if", "len", "(", "response", ")", "==", "3", ":", "response", "=", "self", ".", "decode", "(", "response", ")", "self", ".", "module", "=", "response", "[", "0", "]", "self", ".", "firmware", "=", "response", "[", "1", "]", "self", ".", "_mode", "=", "response", "[", "2", "]", "else", ":", "raise", "USBISSError", "(", "\"Could not get version details\"", ")" ]
Get information about the USB-ISS Querying will return three bytes; - the module ID (7), - firmware version (currently 2), - the current operating mode.
[ "Get", "information", "about", "the", "USB", "-", "ISS" ]
python
train
apache/incubator-superset
superset/connectors/druid/models.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1010-L1025
def _dimensions_to_values(dimensions): """ Replace dimensions specs with their `dimension` values, and ignore those without """ values = [] for dimension in dimensions: if isinstance(dimension, dict): if 'extractionFn' in dimension: values.append(dimension) elif 'dimension' in dimension: values.append(dimension['dimension']) else: values.append(dimension) return values
[ "def", "_dimensions_to_values", "(", "dimensions", ")", ":", "values", "=", "[", "]", "for", "dimension", "in", "dimensions", ":", "if", "isinstance", "(", "dimension", ",", "dict", ")", ":", "if", "'extractionFn'", "in", "dimension", ":", "values", ".", "append", "(", "dimension", ")", "elif", "'dimension'", "in", "dimension", ":", "values", ".", "append", "(", "dimension", "[", "'dimension'", "]", ")", "else", ":", "values", ".", "append", "(", "dimension", ")", "return", "values" ]
Replace dimensions specs with their `dimension` values, and ignore those without
[ "Replace", "dimensions", "specs", "with", "their", "dimension", "values", "and", "ignore", "those", "without" ]
python
train
eternnoir/pyTelegramBotAPI
telebot/apihelper.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/apihelper.py#L34-L56
def _make_request(token, method_name, method='get', params=None, files=None, base_url=API_URL): """ Makes a request to the Telegram API. :param token: The bot's API token. (Created with @BotFather) :param method_name: Name of the API method to be called. (E.g. 'getUpdates') :param method: HTTP method to be used. Defaults to 'get'. :param params: Optional parameters. Should be a dictionary with key-value pairs. :param files: Optional files. :return: The result parsed to a JSON dictionary. """ request_url = base_url.format(token, method_name) logger.debug("Request: method={0} url={1} params={2} files={3}".format(method, request_url, params, files)) read_timeout = READ_TIMEOUT connect_timeout = CONNECT_TIMEOUT if files and format_header_param: fields.format_header_param = _no_encode(format_header_param) if params: if 'timeout' in params: read_timeout = params['timeout'] + 10 if 'connect-timeout' in params: connect_timeout = params['connect-timeout'] + 10 result = _get_req_session().request(method, request_url, params=params, files=files, timeout=(connect_timeout, read_timeout), proxies=proxy) logger.debug("The server returned: '{0}'".format(result.text.encode('utf8'))) return _check_result(method_name, result)['result']
[ "def", "_make_request", "(", "token", ",", "method_name", ",", "method", "=", "'get'", ",", "params", "=", "None", ",", "files", "=", "None", ",", "base_url", "=", "API_URL", ")", ":", "request_url", "=", "base_url", ".", "format", "(", "token", ",", "method_name", ")", "logger", ".", "debug", "(", "\"Request: method={0} url={1} params={2} files={3}\"", ".", "format", "(", "method", ",", "request_url", ",", "params", ",", "files", ")", ")", "read_timeout", "=", "READ_TIMEOUT", "connect_timeout", "=", "CONNECT_TIMEOUT", "if", "files", "and", "format_header_param", ":", "fields", ".", "format_header_param", "=", "_no_encode", "(", "format_header_param", ")", "if", "params", ":", "if", "'timeout'", "in", "params", ":", "read_timeout", "=", "params", "[", "'timeout'", "]", "+", "10", "if", "'connect-timeout'", "in", "params", ":", "connect_timeout", "=", "params", "[", "'connect-timeout'", "]", "+", "10", "result", "=", "_get_req_session", "(", ")", ".", "request", "(", "method", ",", "request_url", ",", "params", "=", "params", ",", "files", "=", "files", ",", "timeout", "=", "(", "connect_timeout", ",", "read_timeout", ")", ",", "proxies", "=", "proxy", ")", "logger", ".", "debug", "(", "\"The server returned: '{0}'\"", ".", "format", "(", "result", ".", "text", ".", "encode", "(", "'utf8'", ")", ")", ")", "return", "_check_result", "(", "method_name", ",", "result", ")", "[", "'result'", "]" ]
Makes a request to the Telegram API. :param token: The bot's API token. (Created with @BotFather) :param method_name: Name of the API method to be called. (E.g. 'getUpdates') :param method: HTTP method to be used. Defaults to 'get'. :param params: Optional parameters. Should be a dictionary with key-value pairs. :param files: Optional files. :return: The result parsed to a JSON dictionary.
[ "Makes", "a", "request", "to", "the", "Telegram", "API", ".", ":", "param", "token", ":", "The", "bot", "s", "API", "token", ".", "(", "Created", "with" ]
python
train
ewels/MultiQC
multiqc/modules/vcftools/tstv_summary.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/vcftools/tstv_summary.py#L14-L49
def parse_tstv_summary(self): """ Create the HTML for the TsTv summary plot. """ self.vcftools_tstv_summary = dict() for f in self.find_log_files('vcftools/tstv_summary', filehandles=True): d = {} for line in f['f'].readlines()[1:]: # don't add the header line (first row) key = line.split()[0] # taking the first column (MODEL) as key val = int(line.split()[1]) # taking the second column (COUNT) as value d[key] = val self.vcftools_tstv_summary[f['s_name']] = d # Filter out ignored sample names self.vcftools_tstv_summary = self.ignore_samples(self.vcftools_tstv_summary) if len(self.vcftools_tstv_summary) == 0: return 0 # Specifying the categories of the bargraph keys = OrderedDict() keys = ['AC', 'AG', 'AT', 'CG', 'CT', 'GT', 'Ts', 'Tv'] pconfig = { 'id': 'vcftools_tstv_summary', 'title': 'VCFTools: TsTv Summary', 'ylab': 'Counts', } self.add_section( name = 'TsTv Summary', anchor = 'vcftools-tstv-summary', description = "Plot of `TSTV-SUMMARY` - count of different types of transition and transversion SNPs.", plot = bargraph.plot(self.vcftools_tstv_summary,keys,pconfig) ) return len(self.vcftools_tstv_summary)
[ "def", "parse_tstv_summary", "(", "self", ")", ":", "self", ".", "vcftools_tstv_summary", "=", "dict", "(", ")", "for", "f", "in", "self", ".", "find_log_files", "(", "'vcftools/tstv_summary'", ",", "filehandles", "=", "True", ")", ":", "d", "=", "{", "}", "for", "line", "in", "f", "[", "'f'", "]", ".", "readlines", "(", ")", "[", "1", ":", "]", ":", "# don't add the header line (first row)", "key", "=", "line", ".", "split", "(", ")", "[", "0", "]", "# taking the first column (MODEL) as key", "val", "=", "int", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "# taking the second column (COUNT) as value", "d", "[", "key", "]", "=", "val", "self", ".", "vcftools_tstv_summary", "[", "f", "[", "'s_name'", "]", "]", "=", "d", "# Filter out ignored sample names", "self", ".", "vcftools_tstv_summary", "=", "self", ".", "ignore_samples", "(", "self", ".", "vcftools_tstv_summary", ")", "if", "len", "(", "self", ".", "vcftools_tstv_summary", ")", "==", "0", ":", "return", "0", "# Specifying the categories of the bargraph", "keys", "=", "OrderedDict", "(", ")", "keys", "=", "[", "'AC'", ",", "'AG'", ",", "'AT'", ",", "'CG'", ",", "'CT'", ",", "'GT'", ",", "'Ts'", ",", "'Tv'", "]", "pconfig", "=", "{", "'id'", ":", "'vcftools_tstv_summary'", ",", "'title'", ":", "'VCFTools: TsTv Summary'", ",", "'ylab'", ":", "'Counts'", ",", "}", "self", ".", "add_section", "(", "name", "=", "'TsTv Summary'", ",", "anchor", "=", "'vcftools-tstv-summary'", ",", "description", "=", "\"Plot of `TSTV-SUMMARY` - count of different types of transition and transversion SNPs.\"", ",", "plot", "=", "bargraph", ".", "plot", "(", "self", ".", "vcftools_tstv_summary", ",", "keys", ",", "pconfig", ")", ")", "return", "len", "(", "self", ".", "vcftools_tstv_summary", ")" ]
Create the HTML for the TsTv summary plot.
[ "Create", "the", "HTML", "for", "the", "TsTv", "summary", "plot", "." ]
python
train
pytroll/pyspectral
rsr_convert_scripts/metimage_rsr.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/rsr_convert_scripts/metimage_rsr.py#L71-L88
def _load(self, scale=1.0): """Load the MetImage RSR data for the band requested""" data = np.genfromtxt(self.requested_band_filename, unpack=True, names=['wavenumber', 'response'], skip_header=4) # Data are wavenumbers in cm-1: wavelength = 1. / data['wavenumber'] * 10000. response = data['response'] # The real MetImage has 24 detectors. However, for now we store the # single rsr as 'detector-1', indicating that there will be multiple # detectors in the future: detectors = {} detectors['det-1'] = {'wavelength': wavelength, 'response': response} self.rsr = detectors
[ "def", "_load", "(", "self", ",", "scale", "=", "1.0", ")", ":", "data", "=", "np", ".", "genfromtxt", "(", "self", ".", "requested_band_filename", ",", "unpack", "=", "True", ",", "names", "=", "[", "'wavenumber'", ",", "'response'", "]", ",", "skip_header", "=", "4", ")", "# Data are wavenumbers in cm-1:", "wavelength", "=", "1.", "/", "data", "[", "'wavenumber'", "]", "*", "10000.", "response", "=", "data", "[", "'response'", "]", "# The real MetImage has 24 detectors. However, for now we store the", "# single rsr as 'detector-1', indicating that there will be multiple", "# detectors in the future:", "detectors", "=", "{", "}", "detectors", "[", "'det-1'", "]", "=", "{", "'wavelength'", ":", "wavelength", ",", "'response'", ":", "response", "}", "self", ".", "rsr", "=", "detectors" ]
Load the MetImage RSR data for the band requested
[ "Load", "the", "MetImage", "RSR", "data", "for", "the", "band", "requested" ]
python
train
PyGithub/PyGithub
github/AuthenticatedUser.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/AuthenticatedUser.py#L529-L601
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, has_projects=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, license_template=github.GithubObject.NotSet, gitignore_template=github.GithubObject.NotSet, allow_squash_merge=github.GithubObject.NotSet, allow_merge_commit=github.GithubObject.NotSet, allow_rebase_merge=github.GithubObject.NotSet): """ :calls: `POST /user/repos <http://developer.github.com/v3/repos>`_ :param name: string :param description: string :param homepage: string :param private: bool :param has_issues: bool :param has_wiki: bool :param has_downloads: bool :param has_projects: bool :param auto_init: bool :param license_template: string :param gitignore_template: string :param allow_squash_merge: bool :param allow_merge_commit: bool :param allow_rebase_merge: bool :rtype: :class:`github.Repository.Repository` """ assert isinstance(name, (str, unicode)), name assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage assert private is github.GithubObject.NotSet or isinstance(private, bool), private assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads assert has_projects is github.GithubObject.NotSet or isinstance(has_projects, bool), has_projects assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init assert license_template is github.GithubObject.NotSet or isinstance(license_template, (str, unicode)), license_template assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template assert allow_squash_merge is github.GithubObject.NotSet or isinstance(allow_squash_merge, bool), allow_squash_merge assert allow_merge_commit is github.GithubObject.NotSet or isinstance(allow_merge_commit, bool), allow_merge_commit assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(allow_rebase_merge, bool), allow_rebase_merge post_parameters = { "name": name, } if description is not github.GithubObject.NotSet: post_parameters["description"] = description if homepage is not github.GithubObject.NotSet: post_parameters["homepage"] = homepage if private is not github.GithubObject.NotSet: post_parameters["private"] = private if has_issues is not github.GithubObject.NotSet: post_parameters["has_issues"] = has_issues if has_wiki is not github.GithubObject.NotSet: post_parameters["has_wiki"] = has_wiki if has_downloads is not github.GithubObject.NotSet: post_parameters["has_downloads"] = has_downloads if has_projects is not github.GithubObject.NotSet: post_parameters["has_projects"] = has_projects if auto_init is not github.GithubObject.NotSet: post_parameters["auto_init"] = auto_init if license_template is not github.GithubObject.NotSet: post_parameters["license_template"] = license_template if gitignore_template is not github.GithubObject.NotSet: post_parameters["gitignore_template"] = gitignore_template if allow_squash_merge is not github.GithubObject.NotSet: post_parameters["allow_squash_merge"] = allow_squash_merge if allow_merge_commit is not github.GithubObject.NotSet: post_parameters["allow_merge_commit"] = allow_merge_commit if allow_rebase_merge is not github.GithubObject.NotSet: post_parameters["allow_rebase_merge"] = allow_rebase_merge headers, data = self._requester.requestJsonAndCheck( "POST", "/user/repos", input=post_parameters ) return github.Repository.Repository(self._requester, headers, data, completed=True)
[ "def", "create_repo", "(", "self", ",", "name", ",", "description", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "homepage", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "private", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "has_issues", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "has_wiki", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "has_downloads", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "has_projects", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "auto_init", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "license_template", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "gitignore_template", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "allow_squash_merge", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "allow_merge_commit", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "allow_rebase_merge", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "isinstance", "(", "name", ",", "(", "str", ",", "unicode", ")", ")", ",", "name", "assert", "description", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "description", ",", "(", "str", ",", "unicode", ")", ")", ",", "description", "assert", "homepage", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "homepage", ",", "(", "str", ",", "unicode", ")", ")", ",", "homepage", "assert", "private", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "private", ",", "bool", ")", ",", "private", "assert", "has_issues", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "has_issues", ",", "bool", ")", ",", "has_issues", "assert", "has_wiki", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "has_wiki", ",", "bool", ")", ",", "has_wiki", "assert", "has_downloads", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "has_downloads", ",", "bool", ")", ",", "has_downloads", "assert", "has_projects", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "has_projects", ",", "bool", ")", ",", "has_projects", "assert", "auto_init", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "auto_init", ",", "bool", ")", ",", "auto_init", "assert", "license_template", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "license_template", ",", "(", "str", ",", "unicode", ")", ")", ",", "license_template", "assert", "gitignore_template", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "gitignore_template", ",", "(", "str", ",", "unicode", ")", ")", ",", "gitignore_template", "assert", "allow_squash_merge", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "allow_squash_merge", ",", "bool", ")", ",", "allow_squash_merge", "assert", "allow_merge_commit", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "allow_merge_commit", ",", "bool", ")", ",", "allow_merge_commit", "assert", "allow_rebase_merge", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "allow_rebase_merge", ",", "bool", ")", ",", "allow_rebase_merge", "post_parameters", "=", "{", "\"name\"", ":", "name", ",", "}", "if", "description", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"description\"", "]", "=", "description", "if", "homepage", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"homepage\"", "]", "=", "homepage", "if", "private", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"private\"", "]", "=", "private", "if", "has_issues", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"has_issues\"", "]", "=", "has_issues", "if", "has_wiki", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"has_wiki\"", "]", "=", "has_wiki", "if", "has_downloads", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"has_downloads\"", "]", "=", "has_downloads", "if", "has_projects", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"has_projects\"", "]", "=", "has_projects", "if", "auto_init", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"auto_init\"", "]", "=", "auto_init", "if", "license_template", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"license_template\"", "]", "=", "license_template", "if", "gitignore_template", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"gitignore_template\"", "]", "=", "gitignore_template", "if", "allow_squash_merge", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"allow_squash_merge\"", "]", "=", "allow_squash_merge", "if", "allow_merge_commit", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"allow_merge_commit\"", "]", "=", "allow_merge_commit", "if", "allow_rebase_merge", "is", "not", "github", ".", "GithubObject", ".", "NotSet", ":", "post_parameters", "[", "\"allow_rebase_merge\"", "]", "=", "allow_rebase_merge", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"POST\"", ",", "\"/user/repos\"", ",", "input", "=", "post_parameters", ")", "return", "github", ".", "Repository", ".", "Repository", "(", "self", ".", "_requester", ",", "headers", ",", "data", ",", "completed", "=", "True", ")" ]
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_ :param name: string :param description: string :param homepage: string :param private: bool :param has_issues: bool :param has_wiki: bool :param has_downloads: bool :param has_projects: bool :param auto_init: bool :param license_template: string :param gitignore_template: string :param allow_squash_merge: bool :param allow_merge_commit: bool :param allow_rebase_merge: bool :rtype: :class:`github.Repository.Repository`
[ ":", "calls", ":", "POST", "/", "user", "/", "repos", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", ">", "_", ":", "param", "name", ":", "string", ":", "param", "description", ":", "string", ":", "param", "homepage", ":", "string", ":", "param", "private", ":", "bool", ":", "param", "has_issues", ":", "bool", ":", "param", "has_wiki", ":", "bool", ":", "param", "has_downloads", ":", "bool", ":", "param", "has_projects", ":", "bool", ":", "param", "auto_init", ":", "bool", ":", "param", "license_template", ":", "string", ":", "param", "gitignore_template", ":", "string", ":", "param", "allow_squash_merge", ":", "bool", ":", "param", "allow_merge_commit", ":", "bool", ":", "param", "allow_rebase_merge", ":", "bool", ":", "rtype", ":", ":", "class", ":", "github", ".", "Repository", ".", "Repository" ]
python
train
flaviogrossi/sockjs-cyclone
sockjs/cyclone/router.py
https://github.com/flaviogrossi/sockjs-cyclone/blob/d3ca053ec1aa1e85f652347bff562c2319be37a2/sockjs/cyclone/router.py#L131-L158
def create_session(self, session_id, register=True, session_factory=None): """ Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed. """ if session_factory is not None: # use custom class to create session sess_factory, sess_args, sess_kwargs = session_factory s = sess_factory(*sess_args, **sess_kwargs) else: # use default session and arguments if not using a custom session # factory s = session.Session(self._connection, self, session_id, self.settings.get('disconnect_delay')) if register: self._sessions.add(s) return s
[ "def", "create_session", "(", "self", ",", "session_id", ",", "register", "=", "True", ",", "session_factory", "=", "None", ")", ":", "if", "session_factory", "is", "not", "None", ":", "# use custom class to create session", "sess_factory", ",", "sess_args", ",", "sess_kwargs", "=", "session_factory", "s", "=", "sess_factory", "(", "*", "sess_args", ",", "*", "*", "sess_kwargs", ")", "else", ":", "# use default session and arguments if not using a custom session", "# factory", "s", "=", "session", ".", "Session", "(", "self", ".", "_connection", ",", "self", ",", "session_id", ",", "self", ".", "settings", ".", "get", "(", "'disconnect_delay'", ")", ")", "if", "register", ":", "self", ".", "_sessions", ".", "add", "(", "s", ")", "return", "s" ]
Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed.
[ "Creates", "new", "session", "object", "and", "returns", "it", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/coordination_geometry_finder.py#L419-L429
def set_structure(self, lattice, species, coords, coords_are_cartesian): """ Sets up the pymatgen structure for which the coordination geometries have to be identified starting from the lattice, the species and the coordinates :param lattice: The lattice of the structure :param species: The species on the sites :param coords: The coordinates of the sites :param coords_are_cartesian: If set to True, the coordinates are given in cartesian coordinates """ self.setup_structure( Structure(lattice, species, coords, coords_are_cartesian))
[ "def", "set_structure", "(", "self", ",", "lattice", ",", "species", ",", "coords", ",", "coords_are_cartesian", ")", ":", "self", ".", "setup_structure", "(", "Structure", "(", "lattice", ",", "species", ",", "coords", ",", "coords_are_cartesian", ")", ")" ]
Sets up the pymatgen structure for which the coordination geometries have to be identified starting from the lattice, the species and the coordinates :param lattice: The lattice of the structure :param species: The species on the sites :param coords: The coordinates of the sites :param coords_are_cartesian: If set to True, the coordinates are given in cartesian coordinates
[ "Sets", "up", "the", "pymatgen", "structure", "for", "which", "the", "coordination", "geometries", "have", "to", "be", "identified", "starting", "from", "the", "lattice", "the", "species", "and", "the", "coordinates", ":", "param", "lattice", ":", "The", "lattice", "of", "the", "structure", ":", "param", "species", ":", "The", "species", "on", "the", "sites", ":", "param", "coords", ":", "The", "coordinates", "of", "the", "sites", ":", "param", "coords_are_cartesian", ":", "If", "set", "to", "True", "the", "coordinates", "are", "given", "in", "cartesian", "coordinates" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/depends.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/depends.py#L166-L198
def extract_constant(code, symbol, default=-1): """Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'. """ if symbol not in code.co_names: # name's not there, can't possibly be an assigment return None name_idx = list(code.co_names).index(symbol) STORE_NAME = 90 STORE_GLOBAL = 97 LOAD_CONST = 100 const = default for op, arg in _iter_code(code): if op==LOAD_CONST: const = code.co_consts[arg] elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): return const else: const = default
[ "def", "extract_constant", "(", "code", ",", "symbol", ",", "default", "=", "-", "1", ")", ":", "if", "symbol", "not", "in", "code", ".", "co_names", ":", "# name's not there, can't possibly be an assigment", "return", "None", "name_idx", "=", "list", "(", "code", ".", "co_names", ")", ".", "index", "(", "symbol", ")", "STORE_NAME", "=", "90", "STORE_GLOBAL", "=", "97", "LOAD_CONST", "=", "100", "const", "=", "default", "for", "op", ",", "arg", "in", "_iter_code", "(", "code", ")", ":", "if", "op", "==", "LOAD_CONST", ":", "const", "=", "code", ".", "co_consts", "[", "arg", "]", "elif", "arg", "==", "name_idx", "and", "(", "op", "==", "STORE_NAME", "or", "op", "==", "STORE_GLOBAL", ")", ":", "return", "const", "else", ":", "const", "=", "default" ]
Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'.
[ "Extract", "the", "constant", "value", "of", "symbol", "from", "code" ]
python
test
KnowledgeLinks/rdfframework
rdfframework/rdfclass/rdfclass.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfclass.py#L660-L689
def _initilize_props(self): """ Adds an intialized property to the class dictionary """ # if self.subject == "pyuri_aHR0cDovL3R1dHQuZWR1Lw==_": # pdb.set_trace() try: # pdb.set_trace() for prop in self.es_props: self[prop] = self.properties[prop](self, self.dataset) setattr(self, prop, self[prop]) self[__a__] = self.properties[__a__](self, self.dataset) setattr(self, __a__, self[__a__]) # for prop, prop_class in self.properties.items(): # # passing in the current dataset tie # self[prop] = prop_class(self, self.dataset) # setattr(self, prop, self[prop]) # bases = remove_parents((self.__class__,) + # self.__class__.__bases__) # for base in bases: # if base.__name__ not in IGNORE_CLASSES: # base_name = Uri(base.__name__) # try: # self['rdf_type'].append(base_name) # except KeyError: # self[Uri('rdf_type')] = MODULE.rdfclass.make_property({}, # 'rdf_type', # self.__class__.__name__)(self, self.dataset) # self['rdf_type'].append(base_name) except (AttributeError, TypeError): pass
[ "def", "_initilize_props", "(", "self", ")", ":", "# if self.subject == \"pyuri_aHR0cDovL3R1dHQuZWR1Lw==_\":", "# pdb.set_trace()", "try", ":", "# pdb.set_trace()", "for", "prop", "in", "self", ".", "es_props", ":", "self", "[", "prop", "]", "=", "self", ".", "properties", "[", "prop", "]", "(", "self", ",", "self", ".", "dataset", ")", "setattr", "(", "self", ",", "prop", ",", "self", "[", "prop", "]", ")", "self", "[", "__a__", "]", "=", "self", ".", "properties", "[", "__a__", "]", "(", "self", ",", "self", ".", "dataset", ")", "setattr", "(", "self", ",", "__a__", ",", "self", "[", "__a__", "]", ")", "# for prop, prop_class in self.properties.items():", "# # passing in the current dataset tie", "# self[prop] = prop_class(self, self.dataset)", "# setattr(self, prop, self[prop])", "# bases = remove_parents((self.__class__,) +", "# self.__class__.__bases__)", "# for base in bases:", "# if base.__name__ not in IGNORE_CLASSES:", "# base_name = Uri(base.__name__)", "# try:", "# self['rdf_type'].append(base_name)", "# except KeyError:", "# self[Uri('rdf_type')] = MODULE.rdfclass.make_property({},", "# 'rdf_type',", "# self.__class__.__name__)(self, self.dataset)", "# self['rdf_type'].append(base_name)", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "pass" ]
Adds an intialized property to the class dictionary
[ "Adds", "an", "intialized", "property", "to", "the", "class", "dictionary" ]
python
train
juju/python-libjuju
juju/model.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L343-L355
def previous(self): """Return a copy of this object as was at its previous state in history. Returns None if this object is new (and therefore has no history). The returned object is always "disconnected", i.e. does not receive live updates. """ return self.model.state.get_entity( self.entity_type, self.entity_id, self._history_index - 1, connected=False)
[ "def", "previous", "(", "self", ")", ":", "return", "self", ".", "model", ".", "state", ".", "get_entity", "(", "self", ".", "entity_type", ",", "self", ".", "entity_id", ",", "self", ".", "_history_index", "-", "1", ",", "connected", "=", "False", ")" ]
Return a copy of this object as was at its previous state in history. Returns None if this object is new (and therefore has no history). The returned object is always "disconnected", i.e. does not receive live updates.
[ "Return", "a", "copy", "of", "this", "object", "as", "was", "at", "its", "previous", "state", "in", "history", "." ]
python
train
Jaymon/prom
prom/decorators.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/decorators.py#L13-L81
def reconnecting(count=None, backoff=None): """this is a very specific decorator meant to be used on Interface classes. It will attempt to reconnect if the connection is closed and run the same method again. TODO -- I think this will have issues with transactions using passed in connections, ie, you pass in a transacting connection to the insert() method and that connection gets dropped, this will reconnect but the transaction will be hosed. count -- integer -- how many attempts to run the method, defaults to 3 backoff -- float -- how long to sleep on failure, defaults to 1.0 """ # we get trixxy here so we can manipulate these values in the wrapped function, # this is one of the first times I wish we were on Python 3 # http://stackoverflow.com/a/9264845/5006 reconn_params = { "count": count, "backoff": backoff } def retry_decorator(func): @wraps(func) def wrapper(self, *args, **kwargs): count = reconn_params["count"] backoff = reconn_params["backoff"] if count is None: count = self.connection_config.options.get('reconnect_attempts', 3) if backoff is None: backoff = self.connection_config.options.get('reconnect_backoff', 1.0) count = int(count) backoff = float(backoff) for attempt in range(1, count + 1): try: backoff_seconds = float(attempt - 1) * backoff if backoff_seconds: logger.debug("sleeping {} seconds before attempt {}".format( backoff_seconds, attempt )) time.sleep(backoff_seconds) return func(self, *args, **kwargs) except InterfaceError as e: e_msg = str(e.e) # TODO -- this gets us by SQLite and Postgres, but might not # work in the future, so this needs to be a tad more robust if "closed" in e_msg.lower(): if attempt == count: logger.debug("all {} attempts failed".format(count)) raise else: logger.debug("attempt {}/{} failed, retrying".format( attempt, count )) else: raise return wrapper return retry_decorator
[ "def", "reconnecting", "(", "count", "=", "None", ",", "backoff", "=", "None", ")", ":", "# we get trixxy here so we can manipulate these values in the wrapped function,", "# this is one of the first times I wish we were on Python 3", "# http://stackoverflow.com/a/9264845/5006", "reconn_params", "=", "{", "\"count\"", ":", "count", ",", "\"backoff\"", ":", "backoff", "}", "def", "retry_decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "count", "=", "reconn_params", "[", "\"count\"", "]", "backoff", "=", "reconn_params", "[", "\"backoff\"", "]", "if", "count", "is", "None", ":", "count", "=", "self", ".", "connection_config", ".", "options", ".", "get", "(", "'reconnect_attempts'", ",", "3", ")", "if", "backoff", "is", "None", ":", "backoff", "=", "self", ".", "connection_config", ".", "options", ".", "get", "(", "'reconnect_backoff'", ",", "1.0", ")", "count", "=", "int", "(", "count", ")", "backoff", "=", "float", "(", "backoff", ")", "for", "attempt", "in", "range", "(", "1", ",", "count", "+", "1", ")", ":", "try", ":", "backoff_seconds", "=", "float", "(", "attempt", "-", "1", ")", "*", "backoff", "if", "backoff_seconds", ":", "logger", ".", "debug", "(", "\"sleeping {} seconds before attempt {}\"", ".", "format", "(", "backoff_seconds", ",", "attempt", ")", ")", "time", ".", "sleep", "(", "backoff_seconds", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "InterfaceError", "as", "e", ":", "e_msg", "=", "str", "(", "e", ".", "e", ")", "# TODO -- this gets us by SQLite and Postgres, but might not", "# work in the future, so this needs to be a tad more robust", "if", "\"closed\"", "in", "e_msg", ".", "lower", "(", ")", ":", "if", "attempt", "==", "count", ":", "logger", ".", "debug", "(", "\"all {} attempts failed\"", ".", "format", "(", "count", ")", ")", "raise", "else", ":", "logger", ".", "debug", "(", "\"attempt {}/{} failed, retrying\"", ".", "format", "(", "attempt", ",", "count", ")", ")", "else", ":", "raise", "return", "wrapper", "return", "retry_decorator" ]
this is a very specific decorator meant to be used on Interface classes. It will attempt to reconnect if the connection is closed and run the same method again. TODO -- I think this will have issues with transactions using passed in connections, ie, you pass in a transacting connection to the insert() method and that connection gets dropped, this will reconnect but the transaction will be hosed. count -- integer -- how many attempts to run the method, defaults to 3 backoff -- float -- how long to sleep on failure, defaults to 1.0
[ "this", "is", "a", "very", "specific", "decorator", "meant", "to", "be", "used", "on", "Interface", "classes", ".", "It", "will", "attempt", "to", "reconnect", "if", "the", "connection", "is", "closed", "and", "run", "the", "same", "method", "again", "." ]
python
train
danpaquin/coinbasepro-python
cbpro/authenticated_client.py
https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L131-L174
def get_account_holds(self, account_id, **kwargs): """ Get holds on an account. This method returns a generator which may make multiple HTTP requests while iterating through it. Holds are placed on an account for active orders or pending withdraw requests. As an order is filled, the hold amount is updated. If an order is canceled, any remaining hold is removed. For a withdraw, once it is completed, the hold is removed. The `type` field will indicate why the hold exists. The hold type is 'order' for holds related to open orders and 'transfer' for holds related to a withdraw. The `ref` field contains the id of the order or transfer which created the hold. Args: account_id (str): Account id to get holds of. kwargs (dict): Additional HTTP request parameters. Returns: generator(list): Hold information for the account. Example:: [ { "id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f", "account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3", "created_at": "2014-11-06T10:34:47.123456Z", "updated_at": "2014-11-06T10:40:47.123456Z", "amount": "4.23", "type": "order", "ref": "0a205de4-dd35-4370-a285-fe8fc375a273", }, { ... } ] """ endpoint = '/accounts/{}/holds'.format(account_id) return self._send_paginated_message(endpoint, params=kwargs)
[ "def", "get_account_holds", "(", "self", ",", "account_id", ",", "*", "*", "kwargs", ")", ":", "endpoint", "=", "'/accounts/{}/holds'", ".", "format", "(", "account_id", ")", "return", "self", ".", "_send_paginated_message", "(", "endpoint", ",", "params", "=", "kwargs", ")" ]
Get holds on an account. This method returns a generator which may make multiple HTTP requests while iterating through it. Holds are placed on an account for active orders or pending withdraw requests. As an order is filled, the hold amount is updated. If an order is canceled, any remaining hold is removed. For a withdraw, once it is completed, the hold is removed. The `type` field will indicate why the hold exists. The hold type is 'order' for holds related to open orders and 'transfer' for holds related to a withdraw. The `ref` field contains the id of the order or transfer which created the hold. Args: account_id (str): Account id to get holds of. kwargs (dict): Additional HTTP request parameters. Returns: generator(list): Hold information for the account. Example:: [ { "id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f", "account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3", "created_at": "2014-11-06T10:34:47.123456Z", "updated_at": "2014-11-06T10:40:47.123456Z", "amount": "4.23", "type": "order", "ref": "0a205de4-dd35-4370-a285-fe8fc375a273", }, { ... } ]
[ "Get", "holds", "on", "an", "account", "." ]
python
train
asweigart/pytweening
pytweening/__init__.py
https://github.com/asweigart/pytweening/blob/20d74368e53dc7d0f77c810b624b2c90994f099d/pytweening/__init__.py#L511-L522
def easeOutBack(n, s=1.70158): """A tween function that overshoots the destination a little and then backs into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """ _checkRange(n) n = n - 1 return n * n * ((s + 1) * n + s) + 1
[ "def", "easeOutBack", "(", "n", ",", "s", "=", "1.70158", ")", ":", "_checkRange", "(", "n", ")", "n", "=", "n", "-", "1", "return", "n", "*", "n", "*", "(", "(", "s", "+", "1", ")", "*", "n", "+", "s", ")", "+", "1" ]
A tween function that overshoots the destination a little and then backs into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
[ "A", "tween", "function", "that", "overshoots", "the", "destination", "a", "little", "and", "then", "backs", "into", "the", "destination", "." ]
python
train
jorgeecardona/dynect
dynect/__init__.py
https://github.com/jorgeecardona/dynect/blob/d2cd85bc510f00108a3a5bfe515f45daae15a482/dynect/__init__.py#L348-L352
def delete(self): " Delete the record." response = self.dyn.delete(self.url) return response.content['job_id']
[ "def", "delete", "(", "self", ")", ":", "response", "=", "self", ".", "dyn", ".", "delete", "(", "self", ".", "url", ")", "return", "response", ".", "content", "[", "'job_id'", "]" ]
Delete the record.
[ "Delete", "the", "record", "." ]
python
train
zhanglab/psamm
psamm/gapfilling.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/gapfilling.py#L98-L156
def add_all_transport_reactions(model, boundaries, allow_duplicates=False): """Add all transport reactions to database and to model. Add transport reactions for all boundaries. Boundaries are defined by pairs (2-tuples) of compartment IDs. Transport reactions are added for all compounds in the model, not just for compounds in the two boundary compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`. boundaries: Set of compartment boundary pairs. Returns: Set of IDs of reactions that were added. """ all_reactions = {} if not allow_duplicates: # TODO: Avoid adding reactions that already exist in the database. # This should be integrated in the database. for rxnid in model.database.reactions: rx = model.database.get_reaction(rxnid) all_reactions[rx] = rxnid boundary_pairs = set() for source, dest in boundaries: if source != dest: boundary_pairs.add(tuple(sorted((source, dest)))) added = set() added_pairs = set() initial_compounds = set(model.compounds) reactions = set(model.database.reactions) for compound in initial_compounds: for c1, c2 in boundary_pairs: compound1 = compound.in_compartment(c1) compound2 = compound.in_compartment(c2) pair = compound1, compound2 if pair in added_pairs: continue rxnid_tp = create_transport_id(reactions, compound1, compound2) reaction_tp = Reaction(Direction.Both, { compound1: -1, compound2: 1 }) if reaction_tp not in all_reactions: model.database.set_reaction(rxnid_tp, reaction_tp) reactions.add(rxnid_tp) else: rxnid_tp = all_reactions[reaction_tp] if not model.has_reaction(rxnid_tp): added.add(rxnid_tp) model.add_reaction(rxnid_tp) added_pairs.add(pair) return added
[ "def", "add_all_transport_reactions", "(", "model", ",", "boundaries", ",", "allow_duplicates", "=", "False", ")", ":", "all_reactions", "=", "{", "}", "if", "not", "allow_duplicates", ":", "# TODO: Avoid adding reactions that already exist in the database.", "# This should be integrated in the database.", "for", "rxnid", "in", "model", ".", "database", ".", "reactions", ":", "rx", "=", "model", ".", "database", ".", "get_reaction", "(", "rxnid", ")", "all_reactions", "[", "rx", "]", "=", "rxnid", "boundary_pairs", "=", "set", "(", ")", "for", "source", ",", "dest", "in", "boundaries", ":", "if", "source", "!=", "dest", ":", "boundary_pairs", ".", "add", "(", "tuple", "(", "sorted", "(", "(", "source", ",", "dest", ")", ")", ")", ")", "added", "=", "set", "(", ")", "added_pairs", "=", "set", "(", ")", "initial_compounds", "=", "set", "(", "model", ".", "compounds", ")", "reactions", "=", "set", "(", "model", ".", "database", ".", "reactions", ")", "for", "compound", "in", "initial_compounds", ":", "for", "c1", ",", "c2", "in", "boundary_pairs", ":", "compound1", "=", "compound", ".", "in_compartment", "(", "c1", ")", "compound2", "=", "compound", ".", "in_compartment", "(", "c2", ")", "pair", "=", "compound1", ",", "compound2", "if", "pair", "in", "added_pairs", ":", "continue", "rxnid_tp", "=", "create_transport_id", "(", "reactions", ",", "compound1", ",", "compound2", ")", "reaction_tp", "=", "Reaction", "(", "Direction", ".", "Both", ",", "{", "compound1", ":", "-", "1", ",", "compound2", ":", "1", "}", ")", "if", "reaction_tp", "not", "in", "all_reactions", ":", "model", ".", "database", ".", "set_reaction", "(", "rxnid_tp", ",", "reaction_tp", ")", "reactions", ".", "add", "(", "rxnid_tp", ")", "else", ":", "rxnid_tp", "=", "all_reactions", "[", "reaction_tp", "]", "if", "not", "model", ".", "has_reaction", "(", "rxnid_tp", ")", ":", "added", ".", "add", "(", "rxnid_tp", ")", "model", ".", "add_reaction", "(", "rxnid_tp", ")", "added_pairs", ".", "add", "(", "pair", ")", "return", "added" ]
Add all transport reactions to database and to model. Add transport reactions for all boundaries. Boundaries are defined by pairs (2-tuples) of compartment IDs. Transport reactions are added for all compounds in the model, not just for compounds in the two boundary compartments. Args: model: :class:`psamm.metabolicmodel.MetabolicModel`. boundaries: Set of compartment boundary pairs. Returns: Set of IDs of reactions that were added.
[ "Add", "all", "transport", "reactions", "to", "database", "and", "to", "model", "." ]
python
train
gem/oq-engine
openquake/baselib/parallel.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/parallel.py#L707-L724
def submit(self, *args, func=None, monitor=None): """ Submit the given arguments to the underlying task """ monitor = monitor or self.monitor func = func or self.task_func if not hasattr(self, 'socket'): # first time self.__class__.running_tasks = self.tasks self.socket = Socket(self.receiver, zmq.PULL, 'bind').__enter__() monitor.backurl = 'tcp://%s:%s' % ( config.dbserver.host, self.socket.port) assert not isinstance(args[-1], Monitor) # sanity check dist = 'no' if self.num_tasks == 1 else self.distribute if dist != 'no': args = pickle_sequence(args) self.sent += numpy.array([len(p) for p in args]) res = submit[dist](self, func, args, monitor) self.tasks.append(res)
[ "def", "submit", "(", "self", ",", "*", "args", ",", "func", "=", "None", ",", "monitor", "=", "None", ")", ":", "monitor", "=", "monitor", "or", "self", ".", "monitor", "func", "=", "func", "or", "self", ".", "task_func", "if", "not", "hasattr", "(", "self", ",", "'socket'", ")", ":", "# first time", "self", ".", "__class__", ".", "running_tasks", "=", "self", ".", "tasks", "self", ".", "socket", "=", "Socket", "(", "self", ".", "receiver", ",", "zmq", ".", "PULL", ",", "'bind'", ")", ".", "__enter__", "(", ")", "monitor", ".", "backurl", "=", "'tcp://%s:%s'", "%", "(", "config", ".", "dbserver", ".", "host", ",", "self", ".", "socket", ".", "port", ")", "assert", "not", "isinstance", "(", "args", "[", "-", "1", "]", ",", "Monitor", ")", "# sanity check", "dist", "=", "'no'", "if", "self", ".", "num_tasks", "==", "1", "else", "self", ".", "distribute", "if", "dist", "!=", "'no'", ":", "args", "=", "pickle_sequence", "(", "args", ")", "self", ".", "sent", "+=", "numpy", ".", "array", "(", "[", "len", "(", "p", ")", "for", "p", "in", "args", "]", ")", "res", "=", "submit", "[", "dist", "]", "(", "self", ",", "func", ",", "args", ",", "monitor", ")", "self", ".", "tasks", ".", "append", "(", "res", ")" ]
Submit the given arguments to the underlying task
[ "Submit", "the", "given", "arguments", "to", "the", "underlying", "task" ]
python
train
dereneaton/ipyrad
ipyrad/assemble/write_outfiles.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/write_outfiles.py#L1748-L1796
def write_nex(data, sidx, pnames): """ write the nexus output file from the tmparr[seqarray] and tmparr[maparr] """ ## grab seq data from tmparr start = time.time() tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, 'r') as io5: seqarr = io5["seqarr"] ## trim to size b/c it was made longer than actual end = np.where(np.all(seqarr[:] == "", axis=0))[0] if np.any(end): end = end.min() else: end = seqarr.shape[1] ## write to nexus data.outfiles.nex = os.path.join(data.dirs.outfiles, data.name+".nex") with open(data.outfiles.nex, 'w') as out: ## write nexus seq header out.write(NEXHEADER.format(seqarr.shape[0], end)) ## grab a big block of data chunksize = 100000 # this should be a multiple of 100 for bidx in xrange(0, end, chunksize): bigblock = seqarr[:, bidx:bidx+chunksize] lend = end-bidx #LOGGER.info("BIG: %s %s %s %s", bigblock.shape, bidx, lend, end) ## write interleaved seqs 100 chars with longname+2 before tmpout = [] for block in xrange(0, min(chunksize, lend), 100): stop = min(block+100, end) for idx, name in enumerate(pnames): seqdat = bigblock[idx, block:stop] tmpout.append(" {}{}\n".format(name, "".join(seqdat))) tmpout.append("\n") ## print intermediate result and clear if any(tmpout): out.write("".join(tmpout)) ## closer out.write(NEXCLOSER) LOGGER.debug("finished writing nex in: %s", time.time() - start)
[ "def", "write_nex", "(", "data", ",", "sidx", ",", "pnames", ")", ":", "## grab seq data from tmparr", "start", "=", "time", ".", "time", "(", ")", "tmparrs", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "\"tmp-{}.h5\"", ".", "format", "(", "data", ".", "name", ")", ")", "with", "h5py", ".", "File", "(", "tmparrs", ",", "'r'", ")", "as", "io5", ":", "seqarr", "=", "io5", "[", "\"seqarr\"", "]", "## trim to size b/c it was made longer than actual", "end", "=", "np", ".", "where", "(", "np", ".", "all", "(", "seqarr", "[", ":", "]", "==", "\"\"", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "if", "np", ".", "any", "(", "end", ")", ":", "end", "=", "end", ".", "min", "(", ")", "else", ":", "end", "=", "seqarr", ".", "shape", "[", "1", "]", "## write to nexus", "data", ".", "outfiles", ".", "nex", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "data", ".", "name", "+", "\".nex\"", ")", "with", "open", "(", "data", ".", "outfiles", ".", "nex", ",", "'w'", ")", "as", "out", ":", "## write nexus seq header", "out", ".", "write", "(", "NEXHEADER", ".", "format", "(", "seqarr", ".", "shape", "[", "0", "]", ",", "end", ")", ")", "## grab a big block of data", "chunksize", "=", "100000", "# this should be a multiple of 100", "for", "bidx", "in", "xrange", "(", "0", ",", "end", ",", "chunksize", ")", ":", "bigblock", "=", "seqarr", "[", ":", ",", "bidx", ":", "bidx", "+", "chunksize", "]", "lend", "=", "end", "-", "bidx", "#LOGGER.info(\"BIG: %s %s %s %s\", bigblock.shape, bidx, lend, end)", "## write interleaved seqs 100 chars with longname+2 before", "tmpout", "=", "[", "]", "for", "block", "in", "xrange", "(", "0", ",", "min", "(", "chunksize", ",", "lend", ")", ",", "100", ")", ":", "stop", "=", "min", "(", "block", "+", "100", ",", "end", ")", "for", "idx", ",", "name", "in", "enumerate", "(", "pnames", ")", ":", "seqdat", "=", "bigblock", "[", "idx", ",", "block", ":", "stop", "]", "tmpout", ".", "append", "(", "\" {}{}\\n\"", ".", "format", "(", "name", ",", "\"\"", ".", "join", "(", "seqdat", ")", ")", ")", "tmpout", ".", "append", "(", "\"\\n\"", ")", "## print intermediate result and clear", "if", "any", "(", "tmpout", ")", ":", "out", ".", "write", "(", "\"\"", ".", "join", "(", "tmpout", ")", ")", "## closer", "out", ".", "write", "(", "NEXCLOSER", ")", "LOGGER", ".", "debug", "(", "\"finished writing nex in: %s\"", ",", "time", ".", "time", "(", ")", "-", "start", ")" ]
write the nexus output file from the tmparr[seqarray] and tmparr[maparr]
[ "write", "the", "nexus", "output", "file", "from", "the", "tmparr", "[", "seqarray", "]", "and", "tmparr", "[", "maparr", "]" ]
python
valid
timstaley/voeventdb
voeventdb/server/database/models.py
https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L28-L56
def _has_bad_coords(root, stream): """ Predicate function encapsulating 'data clean up' filter code. Currently minimal, but these sort of functions tend to grow over time. Problem 1: Some of the GCN packets have an RA /Dec equal to (0,0) in the WhereWhen, and a flag in the What signifying that those are actually dummy co-ords. (This is used for time-stamping an event which is not localised). So, we don't load those positions, to avoid muddying the database corpus. Problem 2: com.dc3/dc3.broker#BrokerTest packets have dummy RA/Dec values, with no units specified. (They're also marked role=test, so it's not such a big deal, but it generates a lot of debug-log churn.) """ if stream == "com.dc3/dc3.broker": return True if not stream.split('/')[0] == 'nasa.gsfc.gcn': return False toplevel_params = vp.get_toplevel_params(root) if "Coords_String" in toplevel_params: if (toplevel_params["Coords_String"]['value'] == "unavailable/inappropriate"): return True return False
[ "def", "_has_bad_coords", "(", "root", ",", "stream", ")", ":", "if", "stream", "==", "\"com.dc3/dc3.broker\"", ":", "return", "True", "if", "not", "stream", ".", "split", "(", "'/'", ")", "[", "0", "]", "==", "'nasa.gsfc.gcn'", ":", "return", "False", "toplevel_params", "=", "vp", ".", "get_toplevel_params", "(", "root", ")", "if", "\"Coords_String\"", "in", "toplevel_params", ":", "if", "(", "toplevel_params", "[", "\"Coords_String\"", "]", "[", "'value'", "]", "==", "\"unavailable/inappropriate\"", ")", ":", "return", "True", "return", "False" ]
Predicate function encapsulating 'data clean up' filter code. Currently minimal, but these sort of functions tend to grow over time. Problem 1: Some of the GCN packets have an RA /Dec equal to (0,0) in the WhereWhen, and a flag in the What signifying that those are actually dummy co-ords. (This is used for time-stamping an event which is not localised). So, we don't load those positions, to avoid muddying the database corpus. Problem 2: com.dc3/dc3.broker#BrokerTest packets have dummy RA/Dec values, with no units specified. (They're also marked role=test, so it's not such a big deal, but it generates a lot of debug-log churn.)
[ "Predicate", "function", "encapsulating", "data", "clean", "up", "filter", "code", "." ]
python
train
dropbox/stone
stone/backends/python_type_stubs.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_type_stubs.py#L109-L141
def _generate_base_namespace_module(self, namespace): # type: (ApiNamespace) -> None """Creates a module for the namespace. All data types and routes are represented as Python classes.""" self.cur_namespace = namespace self.import_tracker.clear() generate_module_header(self) self.emit_placeholder('imports_needed_for_typing') self.emit_raw(validators_import_with_type_ignore) # Generate import statements for all referenced namespaces. self._generate_imports_for_referenced_namespaces(namespace) self._generate_typevars() for annotation_type in namespace.annotation_types: self._generate_annotation_type_class(namespace, annotation_type) for data_type in namespace.linearize_data_types(): if isinstance(data_type, Struct): self._generate_struct_class(namespace, data_type) elif isinstance(data_type, Union): self._generate_union_class(namespace, data_type) else: raise TypeError('Cannot handle type %r' % type(data_type)) for alias in namespace.linearize_aliases(): self._generate_alias_definition(namespace, alias) self._generate_routes(namespace) self._generate_imports_needed_for_typing()
[ "def", "_generate_base_namespace_module", "(", "self", ",", "namespace", ")", ":", "# type: (ApiNamespace) -> None", "self", ".", "cur_namespace", "=", "namespace", "self", ".", "import_tracker", ".", "clear", "(", ")", "generate_module_header", "(", "self", ")", "self", ".", "emit_placeholder", "(", "'imports_needed_for_typing'", ")", "self", ".", "emit_raw", "(", "validators_import_with_type_ignore", ")", "# Generate import statements for all referenced namespaces.", "self", ".", "_generate_imports_for_referenced_namespaces", "(", "namespace", ")", "self", ".", "_generate_typevars", "(", ")", "for", "annotation_type", "in", "namespace", ".", "annotation_types", ":", "self", ".", "_generate_annotation_type_class", "(", "namespace", ",", "annotation_type", ")", "for", "data_type", "in", "namespace", ".", "linearize_data_types", "(", ")", ":", "if", "isinstance", "(", "data_type", ",", "Struct", ")", ":", "self", ".", "_generate_struct_class", "(", "namespace", ",", "data_type", ")", "elif", "isinstance", "(", "data_type", ",", "Union", ")", ":", "self", ".", "_generate_union_class", "(", "namespace", ",", "data_type", ")", "else", ":", "raise", "TypeError", "(", "'Cannot handle type %r'", "%", "type", "(", "data_type", ")", ")", "for", "alias", "in", "namespace", ".", "linearize_aliases", "(", ")", ":", "self", ".", "_generate_alias_definition", "(", "namespace", ",", "alias", ")", "self", ".", "_generate_routes", "(", "namespace", ")", "self", ".", "_generate_imports_needed_for_typing", "(", ")" ]
Creates a module for the namespace. All data types and routes are represented as Python classes.
[ "Creates", "a", "module", "for", "the", "namespace", ".", "All", "data", "types", "and", "routes", "are", "represented", "as", "Python", "classes", "." ]
python
train
wreckage/django-happenings
happenings/utils/common.py
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/common.py#L169-L180
def check_weekday(year, month, day, reverse=False): """ Make sure any event day we send back for weekday repeating events is not a weekend. """ d = date(year, month, day) while d.weekday() in (5, 6): if reverse: d -= timedelta(days=1) else: d += timedelta(days=1) return d.year, d.month, d.day
[ "def", "check_weekday", "(", "year", ",", "month", ",", "day", ",", "reverse", "=", "False", ")", ":", "d", "=", "date", "(", "year", ",", "month", ",", "day", ")", "while", "d", ".", "weekday", "(", ")", "in", "(", "5", ",", "6", ")", ":", "if", "reverse", ":", "d", "-=", "timedelta", "(", "days", "=", "1", ")", "else", ":", "d", "+=", "timedelta", "(", "days", "=", "1", ")", "return", "d", ".", "year", ",", "d", ".", "month", ",", "d", ".", "day" ]
Make sure any event day we send back for weekday repeating events is not a weekend.
[ "Make", "sure", "any", "event", "day", "we", "send", "back", "for", "weekday", "repeating", "events", "is", "not", "a", "weekend", "." ]
python
test
slackapi/python-slack-events-api
slackeventsapi/__init__.py
https://github.com/slackapi/python-slack-events-api/blob/1254d83181eb939f124a0e4746dafea7e14047c1/slackeventsapi/__init__.py#L13-L23
def start(self, host='127.0.0.1', port=None, debug=False, **kwargs): """ Start the built in webserver, bound to the host and port you'd like. Default host is `127.0.0.1` and port 8080. :param host: The host you want to bind the build in webserver to :param port: The port number you want the webserver to run on :param debug: Set to `True` to enable debug level logging :param kwargs: Additional arguments you'd like to pass to Flask """ self.server.run(host=host, port=port, debug=debug, **kwargs)
[ "def", "start", "(", "self", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "None", ",", "debug", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "server", ".", "run", "(", "host", "=", "host", ",", "port", "=", "port", ",", "debug", "=", "debug", ",", "*", "*", "kwargs", ")" ]
Start the built in webserver, bound to the host and port you'd like. Default host is `127.0.0.1` and port 8080. :param host: The host you want to bind the build in webserver to :param port: The port number you want the webserver to run on :param debug: Set to `True` to enable debug level logging :param kwargs: Additional arguments you'd like to pass to Flask
[ "Start", "the", "built", "in", "webserver", "bound", "to", "the", "host", "and", "port", "you", "d", "like", ".", "Default", "host", "is", "127", ".", "0", ".", "0", ".", "1", "and", "port", "8080", "." ]
python
train
hearsaycorp/normalize
normalize/visitor.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/visitor.py#L482-L506
def reflect(cls, X, **kwargs): """Reflect is for visitors where you are exposing some information about the types reachable from a starting type to an external system. For example, a front-end, a REST URL router and documentation framework, an avro schema definition, etc. X can be a type or an instance. This API should be considered **experimental** """ if isinstance(X, type): value = None value_type = X else: value = X value_type = type(X) if not issubclass(value_type, Record): raise TypeError("Cannot reflect on %s" % value_type.__name__) visitor = cls.Visitor( cls.scantypes, cls.propinfo, cls.itemtypes, cls.typeinfo, **kwargs) return cls.map(visitor, value, value_type)
[ "def", "reflect", "(", "cls", ",", "X", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "X", ",", "type", ")", ":", "value", "=", "None", "value_type", "=", "X", "else", ":", "value", "=", "X", "value_type", "=", "type", "(", "X", ")", "if", "not", "issubclass", "(", "value_type", ",", "Record", ")", ":", "raise", "TypeError", "(", "\"Cannot reflect on %s\"", "%", "value_type", ".", "__name__", ")", "visitor", "=", "cls", ".", "Visitor", "(", "cls", ".", "scantypes", ",", "cls", ".", "propinfo", ",", "cls", ".", "itemtypes", ",", "cls", ".", "typeinfo", ",", "*", "*", "kwargs", ")", "return", "cls", ".", "map", "(", "visitor", ",", "value", ",", "value_type", ")" ]
Reflect is for visitors where you are exposing some information about the types reachable from a starting type to an external system. For example, a front-end, a REST URL router and documentation framework, an avro schema definition, etc. X can be a type or an instance. This API should be considered **experimental**
[ "Reflect", "is", "for", "visitors", "where", "you", "are", "exposing", "some", "information", "about", "the", "types", "reachable", "from", "a", "starting", "type", "to", "an", "external", "system", ".", "For", "example", "a", "front", "-", "end", "a", "REST", "URL", "router", "and", "documentation", "framework", "an", "avro", "schema", "definition", "etc", "." ]
python
train
cons3rt/pycons3rt
pycons3rt/nexus.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/nexus.py#L302-L553
def get_artifact_nexus3(suppress_status=False, nexus_base_url=sample_nexus_base_url, repository=None, timeout_sec=600, overwrite=True, username=None, password=None, **kwargs): """Retrieves an artifact from the Nexus 3 ReST API :param suppress_status: (bool) Set to True to suppress printing download status :param nexus_base_url: (str) Base URL of the Nexus Server (domain name portion only, see sample) :param repository: (str) Repository to query (e.g. snapshots) if not provided, will attempt to determine :param timeout_sec: (int) Number of seconds to wait before timing out the artifact retrieval. :param overwrite: (bool) True overwrites the file on the local system if it exists, False does will log an INFO message and exist if the file already exists :param username: (str) username for basic auth :param password: (str) password for basic auth :param kwargs: group_id: (str) The artifact's Group ID in Nexus artifact_id: (str) The artifact's Artifact ID in Nexus packaging: (str) The artifact's packaging (e.g. war, zip) version: (str) Version of the artifact to retrieve (e.g. LATEST, 4.8.4, 4.9.0-SNAPSHOT) destination_dir: (str) Full path to the destination directory classifier: (str) The artifact's classifier (e.g. bin) :return: None :raises: TypeError, ValueError, OSError, RuntimeError """ log = logging.getLogger(mod_logger + '.get_artifact_nexus3') required_args = ['group_id', 'artifact_id', 'packaging', 'version', 'destination_dir'] if not isinstance(overwrite, bool): msg = 'overwrite arg must be a string, found: {t}'.format(t=overwrite.__class__.__name__) log.error(msg) raise TypeError(msg) if not isinstance(nexus_base_url, basestring): msg = 'nexus_url arg must be a string, found: {t}'.format(t=nexus_base_url.__class__.__name__) log.error(msg) raise TypeError(msg) log.debug('Using Nexus Server URL: {u}'.format(u=nexus_base_url)) # Ensure the required args are supplied, and that they are all strings for required_arg in required_args: try: assert required_arg in kwargs except AssertionError: _, ex, trace = sys.exc_info() msg = 'A required arg was not supplied. Required args are: group_id, artifact_id, classifier, version, ' \ 'packaging and destination_dir\n{e}'.format(e=str(ex)) log.error(msg) raise ValueError(msg) if not isinstance(kwargs[required_arg], basestring): msg = 'Arg {a} should be a string'.format(a=required_arg) log.error(msg) raise TypeError(msg) # Set variables to be used in the REST call group_id = kwargs['group_id'] artifact_id = kwargs['artifact_id'] version = kwargs['version'] packaging = kwargs['packaging'] destination_dir = kwargs['destination_dir'] # Ensure the destination directory exists if not os.path.isdir(destination_dir): log.debug('Specified destination_dir not found on file system, creating: {d}'.format(d=destination_dir)) try: mkdir_p(destination_dir) except CommandError: _, ex, trace = sys.exc_info() msg = 'Unable to create destination directory: {d}\n{e}'.format(d=destination_dir, e=str(ex)) raise OSError(msg) # Determine the auth based on username and password basic_auth = None if (username is not None) and (password is not None): log.info('Using the provided username/password for basic authentication...') basic_auth = HTTPBasicAuth(username, password) # Set the classifier if it was provided classifier = None if 'classifier' in kwargs: if isinstance(kwargs['classifier'], basestring): classifier = kwargs['classifier'] log.debug('Using classifier: {c}'.format(c=classifier)) else: log.warn('Arg classifier provided but it was not an instance of basestring') # Determine the repository (snapshots or releases) if not repository: if 'SNAPSHOT' in version: repository = 'snapshots' else: repository = 'releases' log.debug('Using repository: {r}'.format(r=repository)) # Compute the query URL group_id_url = group_id.replace('.', '/') # Get the Maven metadata query_url_version = nexus_base_url + '/repository/{r}/{g}/{a}/{v}'.format( r=repository, g=group_id_url, a=artifact_id, v=version ) if 'snapshot' in repository.lower(): # Query nexus for metadata to determine the proper file name query_url_metadata = query_url_version + '/maven-metadata.xml' log.info('Attempting to query Nexus for the snapshot metadata using URL: {u}'.format(u=query_url_metadata)) try: nexus_response = query_nexus(query_url=query_url_metadata, timeout_sec=timeout_sec, basic_auth=basic_auth) except RuntimeError: _, ex, trace = sys.exc_info() msg = '{n}: There was a problem querying Nexus URL: {u}\n{e}'.format( n=ex.__class__.__name__, u=query_url_metadata, e=str(ex)) log.error(msg) raise RuntimeError, msg, trace if nexus_response.status_code != 200: raise RuntimeError('Bad response from Nexus metadata URL [{c}]: {u}'.format( c=nexus_response.status_code, u=query_url_metadata)) # Parse the XML output root = ET.fromstring(nexus_response.text) log.info('Attempting to find the value of the file name...') try: value = root.find('versioning').find('snapshotVersions').find('snapshotVersion').find('value') except AttributeError: _, ex, trace = sys.exc_info() msg = 'AttributeError: Unable to find versioning/snapshotVersions/snapshotVersion/value\n{e}'.format( e=str(ex)) raise ValueError, msg, trace # Ensure a value was found if value is None: raise ValueError('Unable to determine the value of the snapshot version') # Get the text version text_version = value.text log.info('Found version value: {t}'.format(t=text_version)) # Determine the artifact file name artifact_file_name = '{a}-{t}'.format( a=artifact_id, t=text_version ) else: # Construct the file name for releases (e.g. cons3rt-backend-install-18.14.0-package-otto.zip) artifact_file_name = '{a}-{v}'.format( a=artifact_id, v=version ) # Add classifier if provided and packaging if classifier: artifact_file_name += '-{c}'.format(c=classifier) artifact_file_name += '.{p}'.format(p=packaging) log.info('Using artifact file name: {n}'.format(n=artifact_file_name)) # Determine the full query URL query_url = query_url_version + '/{n}'.format(n=artifact_file_name) log.info('Using Nexus query URL: {u}'.format(u=query_url)) # Set up for download attempts retry_sec = 5 max_retries = 6 try_num = 1 download_success = False dl_err = None failed_attempt = False # Start the retry loop while try_num <= max_retries: # Break the loop if the download was successful if download_success: break log.info('Attempting to query Nexus for the Artifact using URL: {u}'.format(u=query_url)) try: nexus_response = query_nexus(query_url=query_url, timeout_sec=timeout_sec, basic_auth=basic_auth) except RuntimeError: _, ex, trace = sys.exc_info() msg = '{n}: There was a problem querying Nexus URL: {u}\n{e}'.format( n=ex.__class__.__name__, u=query_url, e=str(ex)) log.error(msg) raise RuntimeError, msg, trace # Attempt to get the content-length file_size = 0 try: file_size = int(nexus_response.headers['Content-Length']) except(KeyError, ValueError): log.debug('Could not get Content-Length, suppressing download status...') suppress_status = True else: log.info('Artifact file size: {s}'.format(s=file_size)) # Determine the full download file path file_name = nexus_response.url.split('/')[-1] download_file = os.path.join(destination_dir, file_name) # Attempt to download the content from the response log.info('Attempting to download content of size {s} from Nexus to file: {d}'.format( s=file_size, d=download_file)) # Remove the existing file if it exists, or exit if the file exists, overwrite is set, # and there was not a previous failed attempted download if os.path.isfile(download_file) and overwrite: log.debug('File already exists, removing: {d}'.format(d=download_file)) os.remove(download_file) elif os.path.isfile(download_file) and not overwrite and not failed_attempt: log.info('File already downloaded, and overwrite is set to False. The Artifact will ' 'not be retrieved from Nexus: {f}. To overwrite the existing downloaded file, ' 'set overwrite=True'.format(f=download_file)) return # Attempt to download content log.debug('Attempt # {n} of {m} to download content from the Nexus response'.format(n=try_num, m=max_retries)) chunk_size = 1024 file_size_dl = 0 try: with open(download_file, 'wb') as f: for chunk in nexus_response.iter_content(chunk_size=chunk_size): if chunk: f.write(chunk) file_size_dl += len(chunk) status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size) status += chr(8)*(len(status)+1) if not suppress_status: print(status), except(requests.exceptions.ConnectionError, requests.exceptions.RequestException, OSError): _, ex, trace = sys.exc_info() dl_err = '{n}: There was an error reading content from the Nexus response. Downloaded ' \ 'size: {s}.\n{e}'.format(n=ex.__class__.__name__, s=file_size_dl, t=retry_sec, e=str(ex)) failed_attempt = True log.warn(dl_err) if try_num < max_retries: log.info('Retrying download in {t} sec...'.format(t=retry_sec)) time.sleep(retry_sec) else: log.info('File download of size {s} completed without error: {f}'.format(s=file_size_dl, f=download_file)) failed_attempt = False download_success = True try_num += 1 # Raise an exception if the download did not complete successfully if not download_success: msg = 'Unable to download file content from Nexus after {n} attempts'.format(n=max_retries) if dl_err: msg += '\n{m}'.format(m=dl_err) log.error(msg) raise RuntimeError(msg)
[ "def", "get_artifact_nexus3", "(", "suppress_status", "=", "False", ",", "nexus_base_url", "=", "sample_nexus_base_url", ",", "repository", "=", "None", ",", "timeout_sec", "=", "600", ",", "overwrite", "=", "True", ",", "username", "=", "None", ",", "password", "=", "None", ",", "*", "*", "kwargs", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "mod_logger", "+", "'.get_artifact_nexus3'", ")", "required_args", "=", "[", "'group_id'", ",", "'artifact_id'", ",", "'packaging'", ",", "'version'", ",", "'destination_dir'", "]", "if", "not", "isinstance", "(", "overwrite", ",", "bool", ")", ":", "msg", "=", "'overwrite arg must be a string, found: {t}'", ".", "format", "(", "t", "=", "overwrite", ".", "__class__", ".", "__name__", ")", "log", ".", "error", "(", "msg", ")", "raise", "TypeError", "(", "msg", ")", "if", "not", "isinstance", "(", "nexus_base_url", ",", "basestring", ")", ":", "msg", "=", "'nexus_url arg must be a string, found: {t}'", ".", "format", "(", "t", "=", "nexus_base_url", ".", "__class__", ".", "__name__", ")", "log", ".", "error", "(", "msg", ")", "raise", "TypeError", "(", "msg", ")", "log", ".", "debug", "(", "'Using Nexus Server URL: {u}'", ".", "format", "(", "u", "=", "nexus_base_url", ")", ")", "# Ensure the required args are supplied, and that they are all strings", "for", "required_arg", "in", "required_args", ":", "try", ":", "assert", "required_arg", "in", "kwargs", "except", "AssertionError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'A required arg was not supplied. Required args are: group_id, artifact_id, classifier, version, '", "'packaging and destination_dir\\n{e}'", ".", "format", "(", "e", "=", "str", "(", "ex", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "ValueError", "(", "msg", ")", "if", "not", "isinstance", "(", "kwargs", "[", "required_arg", "]", ",", "basestring", ")", ":", "msg", "=", "'Arg {a} should be a string'", ".", "format", "(", "a", "=", "required_arg", ")", "log", ".", "error", "(", "msg", ")", "raise", "TypeError", "(", "msg", ")", "# Set variables to be used in the REST call", "group_id", "=", "kwargs", "[", "'group_id'", "]", "artifact_id", "=", "kwargs", "[", "'artifact_id'", "]", "version", "=", "kwargs", "[", "'version'", "]", "packaging", "=", "kwargs", "[", "'packaging'", "]", "destination_dir", "=", "kwargs", "[", "'destination_dir'", "]", "# Ensure the destination directory exists", "if", "not", "os", ".", "path", ".", "isdir", "(", "destination_dir", ")", ":", "log", ".", "debug", "(", "'Specified destination_dir not found on file system, creating: {d}'", ".", "format", "(", "d", "=", "destination_dir", ")", ")", "try", ":", "mkdir_p", "(", "destination_dir", ")", "except", "CommandError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'Unable to create destination directory: {d}\\n{e}'", ".", "format", "(", "d", "=", "destination_dir", ",", "e", "=", "str", "(", "ex", ")", ")", "raise", "OSError", "(", "msg", ")", "# Determine the auth based on username and password", "basic_auth", "=", "None", "if", "(", "username", "is", "not", "None", ")", "and", "(", "password", "is", "not", "None", ")", ":", "log", ".", "info", "(", "'Using the provided username/password for basic authentication...'", ")", "basic_auth", "=", "HTTPBasicAuth", "(", "username", ",", "password", ")", "# Set the classifier if it was provided", "classifier", "=", "None", "if", "'classifier'", "in", "kwargs", ":", "if", "isinstance", "(", "kwargs", "[", "'classifier'", "]", ",", "basestring", ")", ":", "classifier", "=", "kwargs", "[", "'classifier'", "]", "log", ".", "debug", "(", "'Using classifier: {c}'", ".", "format", "(", "c", "=", "classifier", ")", ")", "else", ":", "log", ".", "warn", "(", "'Arg classifier provided but it was not an instance of basestring'", ")", "# Determine the repository (snapshots or releases)", "if", "not", "repository", ":", "if", "'SNAPSHOT'", "in", "version", ":", "repository", "=", "'snapshots'", "else", ":", "repository", "=", "'releases'", "log", ".", "debug", "(", "'Using repository: {r}'", ".", "format", "(", "r", "=", "repository", ")", ")", "# Compute the query URL", "group_id_url", "=", "group_id", ".", "replace", "(", "'.'", ",", "'/'", ")", "# Get the Maven metadata", "query_url_version", "=", "nexus_base_url", "+", "'/repository/{r}/{g}/{a}/{v}'", ".", "format", "(", "r", "=", "repository", ",", "g", "=", "group_id_url", ",", "a", "=", "artifact_id", ",", "v", "=", "version", ")", "if", "'snapshot'", "in", "repository", ".", "lower", "(", ")", ":", "# Query nexus for metadata to determine the proper file name", "query_url_metadata", "=", "query_url_version", "+", "'/maven-metadata.xml'", "log", ".", "info", "(", "'Attempting to query Nexus for the snapshot metadata using URL: {u}'", ".", "format", "(", "u", "=", "query_url_metadata", ")", ")", "try", ":", "nexus_response", "=", "query_nexus", "(", "query_url", "=", "query_url_metadata", ",", "timeout_sec", "=", "timeout_sec", ",", "basic_auth", "=", "basic_auth", ")", "except", "RuntimeError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'{n}: There was a problem querying Nexus URL: {u}\\n{e}'", ".", "format", "(", "n", "=", "ex", ".", "__class__", ".", "__name__", ",", "u", "=", "query_url_metadata", ",", "e", "=", "str", "(", "ex", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "RuntimeError", ",", "msg", ",", "trace", "if", "nexus_response", ".", "status_code", "!=", "200", ":", "raise", "RuntimeError", "(", "'Bad response from Nexus metadata URL [{c}]: {u}'", ".", "format", "(", "c", "=", "nexus_response", ".", "status_code", ",", "u", "=", "query_url_metadata", ")", ")", "# Parse the XML output", "root", "=", "ET", ".", "fromstring", "(", "nexus_response", ".", "text", ")", "log", ".", "info", "(", "'Attempting to find the value of the file name...'", ")", "try", ":", "value", "=", "root", ".", "find", "(", "'versioning'", ")", ".", "find", "(", "'snapshotVersions'", ")", ".", "find", "(", "'snapshotVersion'", ")", ".", "find", "(", "'value'", ")", "except", "AttributeError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'AttributeError: Unable to find versioning/snapshotVersions/snapshotVersion/value\\n{e}'", ".", "format", "(", "e", "=", "str", "(", "ex", ")", ")", "raise", "ValueError", ",", "msg", ",", "trace", "# Ensure a value was found", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "'Unable to determine the value of the snapshot version'", ")", "# Get the text version", "text_version", "=", "value", ".", "text", "log", ".", "info", "(", "'Found version value: {t}'", ".", "format", "(", "t", "=", "text_version", ")", ")", "# Determine the artifact file name", "artifact_file_name", "=", "'{a}-{t}'", ".", "format", "(", "a", "=", "artifact_id", ",", "t", "=", "text_version", ")", "else", ":", "# Construct the file name for releases (e.g. cons3rt-backend-install-18.14.0-package-otto.zip)", "artifact_file_name", "=", "'{a}-{v}'", ".", "format", "(", "a", "=", "artifact_id", ",", "v", "=", "version", ")", "# Add classifier if provided and packaging", "if", "classifier", ":", "artifact_file_name", "+=", "'-{c}'", ".", "format", "(", "c", "=", "classifier", ")", "artifact_file_name", "+=", "'.{p}'", ".", "format", "(", "p", "=", "packaging", ")", "log", ".", "info", "(", "'Using artifact file name: {n}'", ".", "format", "(", "n", "=", "artifact_file_name", ")", ")", "# Determine the full query URL", "query_url", "=", "query_url_version", "+", "'/{n}'", ".", "format", "(", "n", "=", "artifact_file_name", ")", "log", ".", "info", "(", "'Using Nexus query URL: {u}'", ".", "format", "(", "u", "=", "query_url", ")", ")", "# Set up for download attempts", "retry_sec", "=", "5", "max_retries", "=", "6", "try_num", "=", "1", "download_success", "=", "False", "dl_err", "=", "None", "failed_attempt", "=", "False", "# Start the retry loop", "while", "try_num", "<=", "max_retries", ":", "# Break the loop if the download was successful", "if", "download_success", ":", "break", "log", ".", "info", "(", "'Attempting to query Nexus for the Artifact using URL: {u}'", ".", "format", "(", "u", "=", "query_url", ")", ")", "try", ":", "nexus_response", "=", "query_nexus", "(", "query_url", "=", "query_url", ",", "timeout_sec", "=", "timeout_sec", ",", "basic_auth", "=", "basic_auth", ")", "except", "RuntimeError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'{n}: There was a problem querying Nexus URL: {u}\\n{e}'", ".", "format", "(", "n", "=", "ex", ".", "__class__", ".", "__name__", ",", "u", "=", "query_url", ",", "e", "=", "str", "(", "ex", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "RuntimeError", ",", "msg", ",", "trace", "# Attempt to get the content-length", "file_size", "=", "0", "try", ":", "file_size", "=", "int", "(", "nexus_response", ".", "headers", "[", "'Content-Length'", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "log", ".", "debug", "(", "'Could not get Content-Length, suppressing download status...'", ")", "suppress_status", "=", "True", "else", ":", "log", ".", "info", "(", "'Artifact file size: {s}'", ".", "format", "(", "s", "=", "file_size", ")", ")", "# Determine the full download file path", "file_name", "=", "nexus_response", ".", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "download_file", "=", "os", ".", "path", ".", "join", "(", "destination_dir", ",", "file_name", ")", "# Attempt to download the content from the response", "log", ".", "info", "(", "'Attempting to download content of size {s} from Nexus to file: {d}'", ".", "format", "(", "s", "=", "file_size", ",", "d", "=", "download_file", ")", ")", "# Remove the existing file if it exists, or exit if the file exists, overwrite is set,", "# and there was not a previous failed attempted download", "if", "os", ".", "path", ".", "isfile", "(", "download_file", ")", "and", "overwrite", ":", "log", ".", "debug", "(", "'File already exists, removing: {d}'", ".", "format", "(", "d", "=", "download_file", ")", ")", "os", ".", "remove", "(", "download_file", ")", "elif", "os", ".", "path", ".", "isfile", "(", "download_file", ")", "and", "not", "overwrite", "and", "not", "failed_attempt", ":", "log", ".", "info", "(", "'File already downloaded, and overwrite is set to False. The Artifact will '", "'not be retrieved from Nexus: {f}. To overwrite the existing downloaded file, '", "'set overwrite=True'", ".", "format", "(", "f", "=", "download_file", ")", ")", "return", "# Attempt to download content", "log", ".", "debug", "(", "'Attempt # {n} of {m} to download content from the Nexus response'", ".", "format", "(", "n", "=", "try_num", ",", "m", "=", "max_retries", ")", ")", "chunk_size", "=", "1024", "file_size_dl", "=", "0", "try", ":", "with", "open", "(", "download_file", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "nexus_response", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ")", ":", "if", "chunk", ":", "f", ".", "write", "(", "chunk", ")", "file_size_dl", "+=", "len", "(", "chunk", ")", "status", "=", "r\"%10d [%3.2f%%]\"", "%", "(", "file_size_dl", ",", "file_size_dl", "*", "100.", "/", "file_size", ")", "status", "+=", "chr", "(", "8", ")", "*", "(", "len", "(", "status", ")", "+", "1", ")", "if", "not", "suppress_status", ":", "print", "(", "status", ")", ",", "except", "(", "requests", ".", "exceptions", ".", "ConnectionError", ",", "requests", ".", "exceptions", ".", "RequestException", ",", "OSError", ")", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "dl_err", "=", "'{n}: There was an error reading content from the Nexus response. Downloaded '", "'size: {s}.\\n{e}'", ".", "format", "(", "n", "=", "ex", ".", "__class__", ".", "__name__", ",", "s", "=", "file_size_dl", ",", "t", "=", "retry_sec", ",", "e", "=", "str", "(", "ex", ")", ")", "failed_attempt", "=", "True", "log", ".", "warn", "(", "dl_err", ")", "if", "try_num", "<", "max_retries", ":", "log", ".", "info", "(", "'Retrying download in {t} sec...'", ".", "format", "(", "t", "=", "retry_sec", ")", ")", "time", ".", "sleep", "(", "retry_sec", ")", "else", ":", "log", ".", "info", "(", "'File download of size {s} completed without error: {f}'", ".", "format", "(", "s", "=", "file_size_dl", ",", "f", "=", "download_file", ")", ")", "failed_attempt", "=", "False", "download_success", "=", "True", "try_num", "+=", "1", "# Raise an exception if the download did not complete successfully", "if", "not", "download_success", ":", "msg", "=", "'Unable to download file content from Nexus after {n} attempts'", ".", "format", "(", "n", "=", "max_retries", ")", "if", "dl_err", ":", "msg", "+=", "'\\n{m}'", ".", "format", "(", "m", "=", "dl_err", ")", "log", ".", "error", "(", "msg", ")", "raise", "RuntimeError", "(", "msg", ")" ]
Retrieves an artifact from the Nexus 3 ReST API :param suppress_status: (bool) Set to True to suppress printing download status :param nexus_base_url: (str) Base URL of the Nexus Server (domain name portion only, see sample) :param repository: (str) Repository to query (e.g. snapshots) if not provided, will attempt to determine :param timeout_sec: (int) Number of seconds to wait before timing out the artifact retrieval. :param overwrite: (bool) True overwrites the file on the local system if it exists, False does will log an INFO message and exist if the file already exists :param username: (str) username for basic auth :param password: (str) password for basic auth :param kwargs: group_id: (str) The artifact's Group ID in Nexus artifact_id: (str) The artifact's Artifact ID in Nexus packaging: (str) The artifact's packaging (e.g. war, zip) version: (str) Version of the artifact to retrieve (e.g. LATEST, 4.8.4, 4.9.0-SNAPSHOT) destination_dir: (str) Full path to the destination directory classifier: (str) The artifact's classifier (e.g. bin) :return: None :raises: TypeError, ValueError, OSError, RuntimeError
[ "Retrieves", "an", "artifact", "from", "the", "Nexus", "3", "ReST", "API" ]
python
train
justquick/django-activity-stream
actstream/templatetags/activity_tags.py
https://github.com/justquick/django-activity-stream/blob/a1e06f2e6429cc5fc321e7801440dd7c5b9d5a35/actstream/templatetags/activity_tags.py#L60-L79
def handle_token(cls, parser, token): """ Class method to parse and return a Node. """ tag_error = "Accepted formats {%% %(tagname)s %(args)s %%} or " \ "{%% %(tagname)s %(args)s as [var] %%}" bits = token.split_contents() args_count = len(bits) - 1 if args_count >= 2 and bits[-2] == 'as': as_var = bits[-1] args_count -= 2 else: as_var = None if args_count != cls.args_count: arg_list = ' '.join(['[arg]' * cls.args_count]) raise TemplateSyntaxError(tag_error % {'tagname': bits[0], 'args': arg_list}) args = [parser.compile_filter(tkn) for tkn in bits[1:args_count + 1]] return cls(args, varname=as_var)
[ "def", "handle_token", "(", "cls", ",", "parser", ",", "token", ")", ":", "tag_error", "=", "\"Accepted formats {%% %(tagname)s %(args)s %%} or \"", "\"{%% %(tagname)s %(args)s as [var] %%}\"", "bits", "=", "token", ".", "split_contents", "(", ")", "args_count", "=", "len", "(", "bits", ")", "-", "1", "if", "args_count", ">=", "2", "and", "bits", "[", "-", "2", "]", "==", "'as'", ":", "as_var", "=", "bits", "[", "-", "1", "]", "args_count", "-=", "2", "else", ":", "as_var", "=", "None", "if", "args_count", "!=", "cls", ".", "args_count", ":", "arg_list", "=", "' '", ".", "join", "(", "[", "'[arg]'", "*", "cls", ".", "args_count", "]", ")", "raise", "TemplateSyntaxError", "(", "tag_error", "%", "{", "'tagname'", ":", "bits", "[", "0", "]", ",", "'args'", ":", "arg_list", "}", ")", "args", "=", "[", "parser", ".", "compile_filter", "(", "tkn", ")", "for", "tkn", "in", "bits", "[", "1", ":", "args_count", "+", "1", "]", "]", "return", "cls", "(", "args", ",", "varname", "=", "as_var", ")" ]
Class method to parse and return a Node.
[ "Class", "method", "to", "parse", "and", "return", "a", "Node", "." ]
python
train
google/mobly
mobly/controllers/monsoon.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L481-L488
def average_current(self): """Average current in the unit of mA. """ len_data_pt = len(self.data_points) if len_data_pt == 0: return 0 cur = sum(self.data_points) * 1000 / len_data_pt return round(cur, self.sr)
[ "def", "average_current", "(", "self", ")", ":", "len_data_pt", "=", "len", "(", "self", ".", "data_points", ")", "if", "len_data_pt", "==", "0", ":", "return", "0", "cur", "=", "sum", "(", "self", ".", "data_points", ")", "*", "1000", "/", "len_data_pt", "return", "round", "(", "cur", ",", "self", ".", "sr", ")" ]
Average current in the unit of mA.
[ "Average", "current", "in", "the", "unit", "of", "mA", "." ]
python
train
pantsbuild/pants
src/python/pants/base/hash_utils.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/hash_utils.py#L19-L28
def hash_all(strs, digest=None): """Returns a hash of the concatenation of all the strings in strs. If a hashlib message digest is not supplied a new sha1 message digest is used. """ digest = digest or hashlib.sha1() for s in strs: s = ensure_binary(s) digest.update(s) return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8')
[ "def", "hash_all", "(", "strs", ",", "digest", "=", "None", ")", ":", "digest", "=", "digest", "or", "hashlib", ".", "sha1", "(", ")", "for", "s", "in", "strs", ":", "s", "=", "ensure_binary", "(", "s", ")", "digest", ".", "update", "(", "s", ")", "return", "digest", ".", "hexdigest", "(", ")", "if", "PY3", "else", "digest", ".", "hexdigest", "(", ")", ".", "decode", "(", "'utf-8'", ")" ]
Returns a hash of the concatenation of all the strings in strs. If a hashlib message digest is not supplied a new sha1 message digest is used.
[ "Returns", "a", "hash", "of", "the", "concatenation", "of", "all", "the", "strings", "in", "strs", "." ]
python
train
intuition-io/intuition
intuition/api/portfolio.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/api/portfolio.py#L77-L84
def update(self, portfolio, date, perfs=None): ''' Actualizes the portfolio universe with the alog state ''' # Make the manager aware of current simulation self.portfolio = portfolio self.perfs = perfs self.date = date
[ "def", "update", "(", "self", ",", "portfolio", ",", "date", ",", "perfs", "=", "None", ")", ":", "# Make the manager aware of current simulation", "self", ".", "portfolio", "=", "portfolio", "self", ".", "perfs", "=", "perfs", "self", ".", "date", "=", "date" ]
Actualizes the portfolio universe with the alog state
[ "Actualizes", "the", "portfolio", "universe", "with", "the", "alog", "state" ]
python
train
neo4j/neo4j-python-driver
neo4j/types/temporal.py
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L80-L97
def dehydrate_time(value): """ Dehydrator for `time` values. :param value: :type value: Time :return: """ if isinstance(value, Time): nanoseconds = int(value.ticks * 1000000000) elif isinstance(value, time): nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute + 1000000000 * value.second + 1000 * value.microsecond) else: raise TypeError("Value must be a neotime.Time or a datetime.time") if value.tzinfo: return Structure(b"T", nanoseconds, value.tzinfo.utcoffset(value).seconds) else: return Structure(b"t", nanoseconds)
[ "def", "dehydrate_time", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Time", ")", ":", "nanoseconds", "=", "int", "(", "value", ".", "ticks", "*", "1000000000", ")", "elif", "isinstance", "(", "value", ",", "time", ")", ":", "nanoseconds", "=", "(", "3600000000000", "*", "value", ".", "hour", "+", "60000000000", "*", "value", ".", "minute", "+", "1000000000", "*", "value", ".", "second", "+", "1000", "*", "value", ".", "microsecond", ")", "else", ":", "raise", "TypeError", "(", "\"Value must be a neotime.Time or a datetime.time\"", ")", "if", "value", ".", "tzinfo", ":", "return", "Structure", "(", "b\"T\"", ",", "nanoseconds", ",", "value", ".", "tzinfo", ".", "utcoffset", "(", "value", ")", ".", "seconds", ")", "else", ":", "return", "Structure", "(", "b\"t\"", ",", "nanoseconds", ")" ]
Dehydrator for `time` values. :param value: :type value: Time :return:
[ "Dehydrator", "for", "time", "values", "." ]
python
train
Nachtfeuer/pipeline
spline/tools/loader.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loader.py#L32-L39
def include(self, node): """Include the defined yaml file.""" result = None if isinstance(node, ScalarNode): result = Loader.include_file(self.construct_scalar(node)) else: raise RuntimeError("Not supported !include on type %s" % type(node)) return result
[ "def", "include", "(", "self", ",", "node", ")", ":", "result", "=", "None", "if", "isinstance", "(", "node", ",", "ScalarNode", ")", ":", "result", "=", "Loader", ".", "include_file", "(", "self", ".", "construct_scalar", "(", "node", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Not supported !include on type %s\"", "%", "type", "(", "node", ")", ")", "return", "result" ]
Include the defined yaml file.
[ "Include", "the", "defined", "yaml", "file", "." ]
python
train
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1071-L1081
def to_cryptography(self): """ Export as a ``cryptography`` certificate. :rtype: ``cryptography.x509.Certificate`` .. versionadded:: 17.1.0 """ from cryptography.hazmat.backends.openssl.x509 import _Certificate backend = _get_backend() return _Certificate(backend, self._x509)
[ "def", "to_cryptography", "(", "self", ")", ":", "from", "cryptography", ".", "hazmat", ".", "backends", ".", "openssl", ".", "x509", "import", "_Certificate", "backend", "=", "_get_backend", "(", ")", "return", "_Certificate", "(", "backend", ",", "self", ".", "_x509", ")" ]
Export as a ``cryptography`` certificate. :rtype: ``cryptography.x509.Certificate`` .. versionadded:: 17.1.0
[ "Export", "as", "a", "cryptography", "certificate", "." ]
python
test
cds-astro/mocpy
mocpy/abstract_moc.py
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/abstract_moc.py#L410-L487
def _to_str(uniq): """ Serializes a MOC to the STRING format. HEALPix cells are separated by a comma. The HEALPix cell at order 0 and number 10 is encoded by the string: "0/10", the first digit representing the depth and the second the HEALPix cell number for this depth. HEALPix cells next to each other within a specific depth can be expressed as a range and therefore written like that: "12/10-150". This encodes the list of HEALPix cells from 10 to 150 at the depth 12. Parameters ---------- uniq : `~numpy.ndarray` The array of HEALPix cells representing the MOC to serialize. Returns ------- result : str The serialized MOC. """ def write_cells(serial, a, b, sep=''): if a == b: serial += '{0}{1}'.format(a, sep) else: serial += '{0}-{1}{2}'.format(a, b, sep) return serial res = '' if uniq.size == 0: return res depth, ipixels = utils.uniq2orderipix(uniq) min_depth = np.min(depth[0]) max_depth = np.max(depth[-1]) for d in range(min_depth, max_depth+1): pix_index = np.where(depth == d)[0] if pix_index.size > 0: # Serialize the depth followed by a slash res += '{0}/'.format(d) # Retrieve the pixel(s) for this depth ipix_depth = ipixels[pix_index] if ipix_depth.size == 1: # If there is only one pixel we serialize it and # go to the next depth res = write_cells(res, ipix_depth[0], ipix_depth[0]) else: # Sort them in case there are several ipix_depth = np.sort(ipix_depth) beg_range = ipix_depth[0] last_range = beg_range # Loop over the sorted pixels by tracking the lower bound of # the current range and the last pixel. for ipix in ipix_depth[1:]: # If the current pixel does not follow the previous one # then we can end a range and serializes it if ipix > last_range + 1: res = write_cells(res, beg_range, last_range, sep=',') # The current pixel is the beginning of a new range beg_range = ipix last_range = ipix # Write the last range res = write_cells(res, beg_range, last_range) # Add a ' ' separator before writing serializing the pixels of the next depth res += ' ' # Remove the last ' ' character res = res[:-1] return res
[ "def", "_to_str", "(", "uniq", ")", ":", "def", "write_cells", "(", "serial", ",", "a", ",", "b", ",", "sep", "=", "''", ")", ":", "if", "a", "==", "b", ":", "serial", "+=", "'{0}{1}'", ".", "format", "(", "a", ",", "sep", ")", "else", ":", "serial", "+=", "'{0}-{1}{2}'", ".", "format", "(", "a", ",", "b", ",", "sep", ")", "return", "serial", "res", "=", "''", "if", "uniq", ".", "size", "==", "0", ":", "return", "res", "depth", ",", "ipixels", "=", "utils", ".", "uniq2orderipix", "(", "uniq", ")", "min_depth", "=", "np", ".", "min", "(", "depth", "[", "0", "]", ")", "max_depth", "=", "np", ".", "max", "(", "depth", "[", "-", "1", "]", ")", "for", "d", "in", "range", "(", "min_depth", ",", "max_depth", "+", "1", ")", ":", "pix_index", "=", "np", ".", "where", "(", "depth", "==", "d", ")", "[", "0", "]", "if", "pix_index", ".", "size", ">", "0", ":", "# Serialize the depth followed by a slash", "res", "+=", "'{0}/'", ".", "format", "(", "d", ")", "# Retrieve the pixel(s) for this depth", "ipix_depth", "=", "ipixels", "[", "pix_index", "]", "if", "ipix_depth", ".", "size", "==", "1", ":", "# If there is only one pixel we serialize it and", "# go to the next depth", "res", "=", "write_cells", "(", "res", ",", "ipix_depth", "[", "0", "]", ",", "ipix_depth", "[", "0", "]", ")", "else", ":", "# Sort them in case there are several", "ipix_depth", "=", "np", ".", "sort", "(", "ipix_depth", ")", "beg_range", "=", "ipix_depth", "[", "0", "]", "last_range", "=", "beg_range", "# Loop over the sorted pixels by tracking the lower bound of", "# the current range and the last pixel.", "for", "ipix", "in", "ipix_depth", "[", "1", ":", "]", ":", "# If the current pixel does not follow the previous one", "# then we can end a range and serializes it", "if", "ipix", ">", "last_range", "+", "1", ":", "res", "=", "write_cells", "(", "res", ",", "beg_range", ",", "last_range", ",", "sep", "=", "','", ")", "# The current pixel is the beginning of a new range", "beg_range", "=", "ipix", "last_range", "=", "ipix", "# Write the last range", "res", "=", "write_cells", "(", "res", ",", "beg_range", ",", "last_range", ")", "# Add a ' ' separator before writing serializing the pixels of the next depth", "res", "+=", "' '", "# Remove the last ' ' character", "res", "=", "res", "[", ":", "-", "1", "]", "return", "res" ]
Serializes a MOC to the STRING format. HEALPix cells are separated by a comma. The HEALPix cell at order 0 and number 10 is encoded by the string: "0/10", the first digit representing the depth and the second the HEALPix cell number for this depth. HEALPix cells next to each other within a specific depth can be expressed as a range and therefore written like that: "12/10-150". This encodes the list of HEALPix cells from 10 to 150 at the depth 12. Parameters ---------- uniq : `~numpy.ndarray` The array of HEALPix cells representing the MOC to serialize. Returns ------- result : str The serialized MOC.
[ "Serializes", "a", "MOC", "to", "the", "STRING", "format", "." ]
python
train
hayd/pep8radius
pep8radius/vcs.py
https://github.com/hayd/pep8radius/blob/0c1d14835d390f7feeb602f35a768e52ce306a0a/pep8radius/vcs.py#L246-L256
def parse_diff_filenames(diff_files): """Parse the output of filenames_diff_cmd.""" # ? .gitignore # M 0.txt files = [] for line in diff_files.splitlines(): line = line.strip() fn = re.findall('[^ ]+\s+(.*.py)', line) if fn and not line.startswith('?'): files.append(fn[0]) return files
[ "def", "parse_diff_filenames", "(", "diff_files", ")", ":", "# ? .gitignore", "# M 0.txt", "files", "=", "[", "]", "for", "line", "in", "diff_files", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "fn", "=", "re", ".", "findall", "(", "'[^ ]+\\s+(.*.py)'", ",", "line", ")", "if", "fn", "and", "not", "line", ".", "startswith", "(", "'?'", ")", ":", "files", ".", "append", "(", "fn", "[", "0", "]", ")", "return", "files" ]
Parse the output of filenames_diff_cmd.
[ "Parse", "the", "output", "of", "filenames_diff_cmd", "." ]
python
train
biocommons/bioutils
src/bioutils/cytobands.py
https://github.com/biocommons/bioutils/blob/88bcbdfa707268fed1110800e91b6d4f8e9475a0/src/bioutils/cytobands.py#L13-L23
def get_cytoband_names(): """Returns the names of available cytoband data files >> get_cytoband_names() ['ucsc-hg38', 'ucsc-hg19'] """ return [ n.replace(".json.gz", "") for n in pkg_resources.resource_listdir(__name__, _data_dir) if n.endswith(".json.gz") ]
[ "def", "get_cytoband_names", "(", ")", ":", "return", "[", "n", ".", "replace", "(", "\".json.gz\"", ",", "\"\"", ")", "for", "n", "in", "pkg_resources", ".", "resource_listdir", "(", "__name__", ",", "_data_dir", ")", "if", "n", ".", "endswith", "(", "\".json.gz\"", ")", "]" ]
Returns the names of available cytoband data files >> get_cytoband_names() ['ucsc-hg38', 'ucsc-hg19']
[ "Returns", "the", "names", "of", "available", "cytoband", "data", "files" ]
python
train
Contraz/demosys-py
demosys/context/__init__.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/context/__init__.py#L23-L29
def ctx() -> moderngl.Context: """ModernGL context""" win = window() if not win.ctx: raise RuntimeError("Attempting to get context before creation") return win.ctx
[ "def", "ctx", "(", ")", "->", "moderngl", ".", "Context", ":", "win", "=", "window", "(", ")", "if", "not", "win", ".", "ctx", ":", "raise", "RuntimeError", "(", "\"Attempting to get context before creation\"", ")", "return", "win", ".", "ctx" ]
ModernGL context
[ "ModernGL", "context" ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/completer.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/completer.py#L521-L602
def file_matches(self, text): """Match filenames, expanding ~USER type strings. Most of the seemingly convoluted logic in this completer is an attempt to handle filenames with spaces in them. And yet it's not quite perfect, because Python's readline doesn't expose all of the GNU readline details needed for this to be done correctly. For a filename with a space in it, the printed completions will be only the parts after what's already been typed (instead of the full completions, as is normally done). I don't think with the current (as of Python 2.3) Python readline it's possible to do better.""" #io.rprint('Completer->file_matches: <%r>' % text) # dbg # chars that require escaping with backslash - i.e. chars # that readline treats incorrectly as delimiters, but we # don't want to treat as delimiters in filename matching # when escaped with backslash if text.startswith('!'): text = text[1:] text_prefix = '!' else: text_prefix = '' text_until_cursor = self.text_until_cursor # track strings with open quotes open_quotes = has_open_quotes(text_until_cursor) if '(' in text_until_cursor or '[' in text_until_cursor: lsplit = text else: try: # arg_split ~ shlex.split, but with unicode bugs fixed by us lsplit = arg_split(text_until_cursor)[-1] except ValueError: # typically an unmatched ", or backslash without escaped char. if open_quotes: lsplit = text_until_cursor.split(open_quotes)[-1] else: return [] except IndexError: # tab pressed on empty line lsplit = "" if not open_quotes and lsplit != protect_filename(lsplit): # if protectables are found, do matching on the whole escaped name has_protectables = True text0,text = text,lsplit else: has_protectables = False text = os.path.expanduser(text) if text == "": return [text_prefix + protect_filename(f) for f in self.glob("*")] # Compute the matches from the filesystem m0 = self.clean_glob(text.replace('\\','')) if has_protectables: # If we had protectables, we need to revert our changes to the # beginning of filename so that we don't double-write the part # of the filename we have so far len_lsplit = len(lsplit) matches = [text_prefix + text0 + protect_filename(f[len_lsplit:]) for f in m0] else: if open_quotes: # if we have a string with an open quote, we don't need to # protect the names at all (and we _shouldn't_, as it # would cause bugs when the filesystem call is made). matches = m0 else: matches = [text_prefix + protect_filename(f) for f in m0] #io.rprint('mm', matches) # dbg # Mark directories in input list by appending '/' to their names. matches = [x+'/' if os.path.isdir(x) else x for x in matches] return matches
[ "def", "file_matches", "(", "self", ",", "text", ")", ":", "#io.rprint('Completer->file_matches: <%r>' % text) # dbg", "# chars that require escaping with backslash - i.e. chars", "# that readline treats incorrectly as delimiters, but we", "# don't want to treat as delimiters in filename matching", "# when escaped with backslash", "if", "text", ".", "startswith", "(", "'!'", ")", ":", "text", "=", "text", "[", "1", ":", "]", "text_prefix", "=", "'!'", "else", ":", "text_prefix", "=", "''", "text_until_cursor", "=", "self", ".", "text_until_cursor", "# track strings with open quotes", "open_quotes", "=", "has_open_quotes", "(", "text_until_cursor", ")", "if", "'('", "in", "text_until_cursor", "or", "'['", "in", "text_until_cursor", ":", "lsplit", "=", "text", "else", ":", "try", ":", "# arg_split ~ shlex.split, but with unicode bugs fixed by us", "lsplit", "=", "arg_split", "(", "text_until_cursor", ")", "[", "-", "1", "]", "except", "ValueError", ":", "# typically an unmatched \", or backslash without escaped char.", "if", "open_quotes", ":", "lsplit", "=", "text_until_cursor", ".", "split", "(", "open_quotes", ")", "[", "-", "1", "]", "else", ":", "return", "[", "]", "except", "IndexError", ":", "# tab pressed on empty line", "lsplit", "=", "\"\"", "if", "not", "open_quotes", "and", "lsplit", "!=", "protect_filename", "(", "lsplit", ")", ":", "# if protectables are found, do matching on the whole escaped name", "has_protectables", "=", "True", "text0", ",", "text", "=", "text", ",", "lsplit", "else", ":", "has_protectables", "=", "False", "text", "=", "os", ".", "path", ".", "expanduser", "(", "text", ")", "if", "text", "==", "\"\"", ":", "return", "[", "text_prefix", "+", "protect_filename", "(", "f", ")", "for", "f", "in", "self", ".", "glob", "(", "\"*\"", ")", "]", "# Compute the matches from the filesystem", "m0", "=", "self", ".", "clean_glob", "(", "text", ".", "replace", "(", "'\\\\'", ",", "''", ")", ")", "if", "has_protectables", ":", "# If we had protectables, we need to revert our changes to the", "# beginning of filename so that we don't double-write the part", "# of the filename we have so far", "len_lsplit", "=", "len", "(", "lsplit", ")", "matches", "=", "[", "text_prefix", "+", "text0", "+", "protect_filename", "(", "f", "[", "len_lsplit", ":", "]", ")", "for", "f", "in", "m0", "]", "else", ":", "if", "open_quotes", ":", "# if we have a string with an open quote, we don't need to", "# protect the names at all (and we _shouldn't_, as it", "# would cause bugs when the filesystem call is made).", "matches", "=", "m0", "else", ":", "matches", "=", "[", "text_prefix", "+", "protect_filename", "(", "f", ")", "for", "f", "in", "m0", "]", "#io.rprint('mm', matches) # dbg", "# Mark directories in input list by appending '/' to their names.", "matches", "=", "[", "x", "+", "'/'", "if", "os", ".", "path", ".", "isdir", "(", "x", ")", "else", "x", "for", "x", "in", "matches", "]", "return", "matches" ]
Match filenames, expanding ~USER type strings. Most of the seemingly convoluted logic in this completer is an attempt to handle filenames with spaces in them. And yet it's not quite perfect, because Python's readline doesn't expose all of the GNU readline details needed for this to be done correctly. For a filename with a space in it, the printed completions will be only the parts after what's already been typed (instead of the full completions, as is normally done). I don't think with the current (as of Python 2.3) Python readline it's possible to do better.
[ "Match", "filenames", "expanding", "~USER", "type", "strings", "." ]
python
test
cirruscluster/cirruscluster
cirruscluster/core.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/core.py#L500-L519
def PrivateToPublicOpenSSH(key, host): """ Computes the OpenSSH public key format given a private key. """ # Create public key from private key. ssh_rsa = '00000007' + base64.b16encode('ssh-rsa') # Exponent. exponent = '%x' % (key.e,) if len(exponent) % 2: exponent = '0' + exponent ssh_rsa += '%08x' % (len(exponent) / 2,) ssh_rsa += exponent modulus = '%x' % (key.n,) if len(modulus) % 2: modulus = '0' + modulus if modulus[0] in '89abcdef': modulus = '00' + modulus ssh_rsa += '%08x' % (len(modulus) / 2,) ssh_rsa += modulus hash_string = base64.b64encode(base64.b16decode(ssh_rsa.upper())) public_key = 'ssh-rsa %s %s' % (hash_string, host) return public_key
[ "def", "PrivateToPublicOpenSSH", "(", "key", ",", "host", ")", ":", "# Create public key from private key.", "ssh_rsa", "=", "'00000007'", "+", "base64", ".", "b16encode", "(", "'ssh-rsa'", ")", "# Exponent.", "exponent", "=", "'%x'", "%", "(", "key", ".", "e", ",", ")", "if", "len", "(", "exponent", ")", "%", "2", ":", "exponent", "=", "'0'", "+", "exponent", "ssh_rsa", "+=", "'%08x'", "%", "(", "len", "(", "exponent", ")", "/", "2", ",", ")", "ssh_rsa", "+=", "exponent", "modulus", "=", "'%x'", "%", "(", "key", ".", "n", ",", ")", "if", "len", "(", "modulus", ")", "%", "2", ":", "modulus", "=", "'0'", "+", "modulus", "if", "modulus", "[", "0", "]", "in", "'89abcdef'", ":", "modulus", "=", "'00'", "+", "modulus", "ssh_rsa", "+=", "'%08x'", "%", "(", "len", "(", "modulus", ")", "/", "2", ",", ")", "ssh_rsa", "+=", "modulus", "hash_string", "=", "base64", ".", "b64encode", "(", "base64", ".", "b16decode", "(", "ssh_rsa", ".", "upper", "(", ")", ")", ")", "public_key", "=", "'ssh-rsa %s %s'", "%", "(", "hash_string", ",", "host", ")", "return", "public_key" ]
Computes the OpenSSH public key format given a private key.
[ "Computes", "the", "OpenSSH", "public", "key", "format", "given", "a", "private", "key", "." ]
python
train
gamechanger/schemer
schemer/validators.py
https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/validators.py#L8-L21
def one_of(*args): """ Validates that a field value matches one of the values given to this validator. """ if len(args) == 1 and isinstance(args[0], list): items = args[0] else: items = list(args) def validate(value): if not value in items: return e("{} is not in the list {}", value, items) return validate
[ "def", "one_of", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", ":", "items", "=", "args", "[", "0", "]", "else", ":", "items", "=", "list", "(", "args", ")", "def", "validate", "(", "value", ")", ":", "if", "not", "value", "in", "items", ":", "return", "e", "(", "\"{} is not in the list {}\"", ",", "value", ",", "items", ")", "return", "validate" ]
Validates that a field value matches one of the values given to this validator.
[ "Validates", "that", "a", "field", "value", "matches", "one", "of", "the", "values", "given", "to", "this", "validator", "." ]
python
train
ibm-watson-data-lab/ibmseti
ibmseti/features.py
https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L29-L60
def difference(arr, n=1, axis=0, **kwargs): ''' Assuming that `arr` is a 2D spectrogram returned by ibmseti.dsp.raw_to_spectrogram(data), this function uses the Numpy.diff function to calculate the nth difference along either time or frequency. If axis = 0 and n=1, then the first difference is taken between subsequent time samples If axis = 1 and n=1, then the first difference is taken between frequency bins. For example: //each column is a frequency bin x = np.array([ [ 1, 3, 6, 10], //each row is a time sample [ 0, 5, 6, 8], [ 2, 6, 9, 12]]) ibmseti.features.first_difference(x, axis=1) >>> array([[2, 3, 4], [5, 1, 2], [4, 3, 3]]) ibmseti.features.first_difference(x, axis=0) >>> array([[-1, 2, 0, -2], [ 2, 1, 3, 4]]) ''' return np.diff(arr, n=n, axis=axis, **kwargs)
[ "def", "difference", "(", "arr", ",", "n", "=", "1", ",", "axis", "=", "0", ",", "*", "*", "kwargs", ")", ":", "return", "np", ".", "diff", "(", "arr", ",", "n", "=", "n", ",", "axis", "=", "axis", ",", "*", "*", "kwargs", ")" ]
Assuming that `arr` is a 2D spectrogram returned by ibmseti.dsp.raw_to_spectrogram(data), this function uses the Numpy.diff function to calculate the nth difference along either time or frequency. If axis = 0 and n=1, then the first difference is taken between subsequent time samples If axis = 1 and n=1, then the first difference is taken between frequency bins. For example: //each column is a frequency bin x = np.array([ [ 1, 3, 6, 10], //each row is a time sample [ 0, 5, 6, 8], [ 2, 6, 9, 12]]) ibmseti.features.first_difference(x, axis=1) >>> array([[2, 3, 4], [5, 1, 2], [4, 3, 3]]) ibmseti.features.first_difference(x, axis=0) >>> array([[-1, 2, 0, -2], [ 2, 1, 3, 4]])
[ "Assuming", "that", "arr", "is", "a", "2D", "spectrogram", "returned", "by", "ibmseti", ".", "dsp", ".", "raw_to_spectrogram", "(", "data", ")", "this", "function", "uses", "the", "Numpy", ".", "diff", "function", "to", "calculate", "the", "nth", "difference", "along", "either", "time", "or", "frequency", "." ]
python
train
shazow/unstdlib.py
unstdlib/standard/string_.py
https://github.com/shazow/unstdlib.py/blob/e0632fe165cfbfdb5a7e4bc7b412c9d6f2ebad83/unstdlib/standard/string_.py#L420-L425
def slugify(s, delimiter='-'): """ Normalize `s` into ASCII and replace non-word characters with `delimiter`. """ s = unicodedata.normalize('NFKD', to_unicode(s)).encode('ascii', 'ignore').decode('ascii') return RE_SLUG.sub(delimiter, s).strip(delimiter).lower()
[ "def", "slugify", "(", "s", ",", "delimiter", "=", "'-'", ")", ":", "s", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "to_unicode", "(", "s", ")", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "'ascii'", ")", "return", "RE_SLUG", ".", "sub", "(", "delimiter", ",", "s", ")", ".", "strip", "(", "delimiter", ")", ".", "lower", "(", ")" ]
Normalize `s` into ASCII and replace non-word characters with `delimiter`.
[ "Normalize", "s", "into", "ASCII", "and", "replace", "non", "-", "word", "characters", "with", "delimiter", "." ]
python
train
AtmaHou/atma
Metrics.py
https://github.com/AtmaHou/atma/blob/41cd8ea9443a9c3b2dd71432f46f44a0f83093c7/Metrics.py#L102-L130
def top_x_bleu(query_dic, mark, x=1): """ Calculate the top x average bleu value predictions ranking by item, x default is set above :param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key :param mark:string, which indicates which method is evaluated, also used as output file name here. :param x:int, define top x :return:average bleu score """ all_total = 0.0 with open(top_bleu_path + mark, 'w') as writer: for k in query_dic: candidate_lst = query_dic[k] top_x = sorted(candidate_lst, key=lambda a: a[0], reverse=True)[:x] total = 0 for t in top_x: total += t[1] ave_bleu = total / x writer.write('%s\tAverageBleu:%f\tTop%d:%s\n' % (k, ave_bleu, x, str(top_x))) all_total += ave_bleu if k in contrast_dic: contrast_dic[k].append(str(ave_bleu)) else: contrast_dic[k] = [] contrast_dic[k].append(str(ave_bleu)) result_string = '%s\ttop%d_Bleu:\t%f' % (mark, x, all_total / len(query_dic)) print result_string # eval_result_dict['Bleu'].append(result_string) return ['Bleu', result_string]
[ "def", "top_x_bleu", "(", "query_dic", ",", "mark", ",", "x", "=", "1", ")", ":", "all_total", "=", "0.0", "with", "open", "(", "top_bleu_path", "+", "mark", ",", "'w'", ")", "as", "writer", ":", "for", "k", "in", "query_dic", ":", "candidate_lst", "=", "query_dic", "[", "k", "]", "top_x", "=", "sorted", "(", "candidate_lst", ",", "key", "=", "lambda", "a", ":", "a", "[", "0", "]", ",", "reverse", "=", "True", ")", "[", ":", "x", "]", "total", "=", "0", "for", "t", "in", "top_x", ":", "total", "+=", "t", "[", "1", "]", "ave_bleu", "=", "total", "/", "x", "writer", ".", "write", "(", "'%s\\tAverageBleu:%f\\tTop%d:%s\\n'", "%", "(", "k", ",", "ave_bleu", ",", "x", ",", "str", "(", "top_x", ")", ")", ")", "all_total", "+=", "ave_bleu", "if", "k", "in", "contrast_dic", ":", "contrast_dic", "[", "k", "]", ".", "append", "(", "str", "(", "ave_bleu", ")", ")", "else", ":", "contrast_dic", "[", "k", "]", "=", "[", "]", "contrast_dic", "[", "k", "]", ".", "append", "(", "str", "(", "ave_bleu", ")", ")", "result_string", "=", "'%s\\ttop%d_Bleu:\\t%f'", "%", "(", "mark", ",", "x", ",", "all_total", "/", "len", "(", "query_dic", ")", ")", "print", "result_string", "# eval_result_dict['Bleu'].append(result_string)\r", "return", "[", "'Bleu'", ",", "result_string", "]" ]
Calculate the top x average bleu value predictions ranking by item, x default is set above :param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key :param mark:string, which indicates which method is evaluated, also used as output file name here. :param x:int, define top x :return:average bleu score
[ "Calculate", "the", "top", "x", "average", "bleu", "value", "predictions", "ranking", "by", "item", "x", "default", "is", "set", "above", ":", "param", "query_dic", ":", "dict", "key", "is", "qid", "value", "is", "(", "item", "bleu", ")", "tuple", "list", "which", "will", "be", "ranked", "by", "item", "as", "key", ":", "param", "mark", ":", "string", "which", "indicates", "which", "method", "is", "evaluated", "also", "used", "as", "output", "file", "name", "here", ".", ":", "param", "x", ":", "int", "define", "top", "x", ":", "return", ":", "average", "bleu", "score" ]
python
train
monarch-initiative/dipper
dipper/models/Genotype.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Genotype.py#L316-L332
def addTaxon(self, taxon_id, genopart_id): """ The supplied geno part will have the specified taxon added with RO:in_taxon relation. Generally the taxon is associated with a genomic_background, but could be added to any genotype part (including a gene, regulatory element, or sequence alteration). :param taxon_id: :param genopart_id: :return: """ self.graph.addTriple( genopart_id, self.globaltt['in taxon'], taxon_id) return
[ "def", "addTaxon", "(", "self", ",", "taxon_id", ",", "genopart_id", ")", ":", "self", ".", "graph", ".", "addTriple", "(", "genopart_id", ",", "self", ".", "globaltt", "[", "'in taxon'", "]", ",", "taxon_id", ")", "return" ]
The supplied geno part will have the specified taxon added with RO:in_taxon relation. Generally the taxon is associated with a genomic_background, but could be added to any genotype part (including a gene, regulatory element, or sequence alteration). :param taxon_id: :param genopart_id: :return:
[ "The", "supplied", "geno", "part", "will", "have", "the", "specified", "taxon", "added", "with", "RO", ":", "in_taxon", "relation", ".", "Generally", "the", "taxon", "is", "associated", "with", "a", "genomic_background", "but", "could", "be", "added", "to", "any", "genotype", "part", "(", "including", "a", "gene", "regulatory", "element", "or", "sequence", "alteration", ")", ".", ":", "param", "taxon_id", ":", ":", "param", "genopart_id", ":" ]
python
train
docker/docker-py
docker/api/daemon.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/daemon.py#L168-L181
def version(self, api_version=True): """ Returns version information from the server. Similar to the ``docker version`` command. Returns: (dict): The server version information Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url("/version", versioned_api=api_version) return self._result(self._get(url), json=True)
[ "def", "version", "(", "self", ",", "api_version", "=", "True", ")", ":", "url", "=", "self", ".", "_url", "(", "\"/version\"", ",", "versioned_api", "=", "api_version", ")", "return", "self", ".", "_result", "(", "self", ".", "_get", "(", "url", ")", ",", "json", "=", "True", ")" ]
Returns version information from the server. Similar to the ``docker version`` command. Returns: (dict): The server version information Raises: :py:class:`docker.errors.APIError` If the server returns an error.
[ "Returns", "version", "information", "from", "the", "server", ".", "Similar", "to", "the", "docker", "version", "command", "." ]
python
train
appointlet/span
span/__init__.py
https://github.com/appointlet/span/blob/6d4f2920e45df827890ebe55b1c41b1f3414c0c9/span/__init__.py#L55-L62
def rtouches(self, span): """ Returns true if the start of this span touches the right (ending) side of the given span. """ if isinstance(span, list): return [sp for sp in span if self._rtouches(sp)] return self._rtouches(span)
[ "def", "rtouches", "(", "self", ",", "span", ")", ":", "if", "isinstance", "(", "span", ",", "list", ")", ":", "return", "[", "sp", "for", "sp", "in", "span", "if", "self", ".", "_rtouches", "(", "sp", ")", "]", "return", "self", ".", "_rtouches", "(", "span", ")" ]
Returns true if the start of this span touches the right (ending) side of the given span.
[ "Returns", "true", "if", "the", "start", "of", "this", "span", "touches", "the", "right", "(", "ending", ")", "side", "of", "the", "given", "span", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/anchor/verifier.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L194-L317
async def build_proof_req_json(self, cd_id2spec: dict) -> str: """ Build and return indy-sdk proof request for input attributes and non-revocation intervals by cred def id. :param cd_id2spec: dict mapping cred def ids to: - (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none) - (optionally) '>=': (pred) inclusive int lower-bounds of interest (omit, empty list, or None for none) - (optionally) '>': (pred) exclusive int lower-bounds of interest (omit, empty list, or None for none) - (optionally) '<=': (pred) inclusive int upper-bounds of interest (omit, empty list, or None for none) - (optionally) '<': (pred) exclusive int upper-bounds of interest (omit, empty list, or None for none) - (optionally), 'interval': either - (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or - | single epoch second count to set 'from' and 'to' the same; default | (now, now) for cred defs supporting revocation or None otherwise; e.g., :: { 'Vx4E82R17q...:3:CL:16:tag': { 'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema 'name', 'favouriteDrink' ], '>=': { # request predicate score>=80 from this cred def 'score': 80 } '<=': { # request ranking <=10 from this cred def 'ranking': 10 } 'interval': 1528116008 # same instant for all attrs and preds of corresponding schema }, 'R17v42T4pk...:3:CL:19:tag': None, # request all attrs, no preds, default intervals on all attrs 'e3vc5K168n...:3:CL:23:tag': {}, # request all attrs, no preds, default intervals on all attrs 'Z9ccax812j...:3:CL:27:tag': { # request all attrs, no preds, this interval on all attrs 'interval': (1528112408, 1528116008) }, '9cHbp54C8n...:3:CL:37:tag': { # request no attrs and some predicates; specify interval 'attrs': [], # or equivalently, 'attrs': None '>=': { 'employees': '50' # nicety: implementation converts to int for caller }, '>=': { 'revenue': '10000000' # nicety: implementation converts to int for caller 'ebidta': 0 } 'interval': (1528029608, 1528116008) }, '6caBcmLi33...:3:CL:41:tag': { # all attrs, one pred, default intervals to now on attrs & pred '>': { 'regEpoch': 1514782800 } }, ... } :return: indy-sdk proof request json """ LOGGER.debug('Verifier.build_proof_req_json >>> cd_id2spec: %s', cd_id2spec) cd_id2schema = {} now = int(time()) rv = { 'nonce': str(int(time())), 'name': 'proof_req', 'version': '0.0', 'requested_attributes': {}, 'requested_predicates': {} } for cd_id in cd_id2spec: if not ok_cred_def_id(cd_id): LOGGER.debug('Verifier.build_proof_req_json <!< Bad cred def id %s', cd_id) raise BadIdentifier('Bad cred def id {}'.format(cd_id)) interval = None cred_def = json.loads(await self.get_cred_def(cd_id)) seq_no = cred_def_id2seq_no(cd_id) cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no)) if 'revocation' in cred_def['value']: fro_to = cd_id2spec[cd_id].get('interval', (now, now)) if cd_id2spec[cd_id] else (now, now) interval = { 'from': fro_to if isinstance(fro_to, int) else min(fro_to), 'to': fro_to if isinstance(fro_to, int) else max(fro_to) } for attr in (cd_id2spec[cd_id].get('attrs', cd_id2schema[cd_id]['attrNames']) or [] if cd_id2spec[cd_id] else cd_id2schema[cd_id]['attrNames']): attr_uuid = '{}_{}_uuid'.format(seq_no, canon(attr)) rv['requested_attributes'][attr_uuid] = { 'name': attr, 'restrictions': [{ 'cred_def_id': cd_id }] } if interval: rv['requested_attributes'][attr_uuid]['non_revoked'] = interval for pred in Predicate: for attr in (cd_id2spec[cd_id].get(pred.value.math, {}) or {} if cd_id2spec[cd_id] else {}): pred_uuid = '{}_{}_{}_uuid'.format(seq_no, canon(attr), pred.value.fortran) try: rv['requested_predicates'][pred_uuid] = { 'name': attr, 'p_type': pred.value.math, 'p_value': Predicate.to_int(cd_id2spec[cd_id][pred.value.math][attr]), 'restrictions': [{ 'cred_def_id': cd_id }] } except ValueError: LOGGER.info( 'cannot build %s predicate on non-int bound %s for %s', pred.value.fortran, cd_id2spec[cd_id][pred.value.math][attr], attr) continue # int conversion failed - reject candidate if interval: rv['requested_predicates'][pred_uuid]['non_revoked'] = interval LOGGER.debug('Verifier.build_proof_req_json <<< %s', json.dumps(rv)) return json.dumps(rv)
[ "async", "def", "build_proof_req_json", "(", "self", ",", "cd_id2spec", ":", "dict", ")", "->", "str", ":", "LOGGER", ".", "debug", "(", "'Verifier.build_proof_req_json >>> cd_id2spec: %s'", ",", "cd_id2spec", ")", "cd_id2schema", "=", "{", "}", "now", "=", "int", "(", "time", "(", ")", ")", "rv", "=", "{", "'nonce'", ":", "str", "(", "int", "(", "time", "(", ")", ")", ")", ",", "'name'", ":", "'proof_req'", ",", "'version'", ":", "'0.0'", ",", "'requested_attributes'", ":", "{", "}", ",", "'requested_predicates'", ":", "{", "}", "}", "for", "cd_id", "in", "cd_id2spec", ":", "if", "not", "ok_cred_def_id", "(", "cd_id", ")", ":", "LOGGER", ".", "debug", "(", "'Verifier.build_proof_req_json <!< Bad cred def id %s'", ",", "cd_id", ")", "raise", "BadIdentifier", "(", "'Bad cred def id {}'", ".", "format", "(", "cd_id", ")", ")", "interval", "=", "None", "cred_def", "=", "json", ".", "loads", "(", "await", "self", ".", "get_cred_def", "(", "cd_id", ")", ")", "seq_no", "=", "cred_def_id2seq_no", "(", "cd_id", ")", "cd_id2schema", "[", "cd_id", "]", "=", "json", ".", "loads", "(", "await", "self", ".", "get_schema", "(", "seq_no", ")", ")", "if", "'revocation'", "in", "cred_def", "[", "'value'", "]", ":", "fro_to", "=", "cd_id2spec", "[", "cd_id", "]", ".", "get", "(", "'interval'", ",", "(", "now", ",", "now", ")", ")", "if", "cd_id2spec", "[", "cd_id", "]", "else", "(", "now", ",", "now", ")", "interval", "=", "{", "'from'", ":", "fro_to", "if", "isinstance", "(", "fro_to", ",", "int", ")", "else", "min", "(", "fro_to", ")", ",", "'to'", ":", "fro_to", "if", "isinstance", "(", "fro_to", ",", "int", ")", "else", "max", "(", "fro_to", ")", "}", "for", "attr", "in", "(", "cd_id2spec", "[", "cd_id", "]", ".", "get", "(", "'attrs'", ",", "cd_id2schema", "[", "cd_id", "]", "[", "'attrNames'", "]", ")", "or", "[", "]", "if", "cd_id2spec", "[", "cd_id", "]", "else", "cd_id2schema", "[", "cd_id", "]", "[", "'attrNames'", "]", ")", ":", "attr_uuid", "=", "'{}_{}_uuid'", ".", "format", "(", "seq_no", ",", "canon", "(", "attr", ")", ")", "rv", "[", "'requested_attributes'", "]", "[", "attr_uuid", "]", "=", "{", "'name'", ":", "attr", ",", "'restrictions'", ":", "[", "{", "'cred_def_id'", ":", "cd_id", "}", "]", "}", "if", "interval", ":", "rv", "[", "'requested_attributes'", "]", "[", "attr_uuid", "]", "[", "'non_revoked'", "]", "=", "interval", "for", "pred", "in", "Predicate", ":", "for", "attr", "in", "(", "cd_id2spec", "[", "cd_id", "]", ".", "get", "(", "pred", ".", "value", ".", "math", ",", "{", "}", ")", "or", "{", "}", "if", "cd_id2spec", "[", "cd_id", "]", "else", "{", "}", ")", ":", "pred_uuid", "=", "'{}_{}_{}_uuid'", ".", "format", "(", "seq_no", ",", "canon", "(", "attr", ")", ",", "pred", ".", "value", ".", "fortran", ")", "try", ":", "rv", "[", "'requested_predicates'", "]", "[", "pred_uuid", "]", "=", "{", "'name'", ":", "attr", ",", "'p_type'", ":", "pred", ".", "value", ".", "math", ",", "'p_value'", ":", "Predicate", ".", "to_int", "(", "cd_id2spec", "[", "cd_id", "]", "[", "pred", ".", "value", ".", "math", "]", "[", "attr", "]", ")", ",", "'restrictions'", ":", "[", "{", "'cred_def_id'", ":", "cd_id", "}", "]", "}", "except", "ValueError", ":", "LOGGER", ".", "info", "(", "'cannot build %s predicate on non-int bound %s for %s'", ",", "pred", ".", "value", ".", "fortran", ",", "cd_id2spec", "[", "cd_id", "]", "[", "pred", ".", "value", ".", "math", "]", "[", "attr", "]", ",", "attr", ")", "continue", "# int conversion failed - reject candidate", "if", "interval", ":", "rv", "[", "'requested_predicates'", "]", "[", "pred_uuid", "]", "[", "'non_revoked'", "]", "=", "interval", "LOGGER", ".", "debug", "(", "'Verifier.build_proof_req_json <<< %s'", ",", "json", ".", "dumps", "(", "rv", ")", ")", "return", "json", ".", "dumps", "(", "rv", ")" ]
Build and return indy-sdk proof request for input attributes and non-revocation intervals by cred def id. :param cd_id2spec: dict mapping cred def ids to: - (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none) - (optionally) '>=': (pred) inclusive int lower-bounds of interest (omit, empty list, or None for none) - (optionally) '>': (pred) exclusive int lower-bounds of interest (omit, empty list, or None for none) - (optionally) '<=': (pred) inclusive int upper-bounds of interest (omit, empty list, or None for none) - (optionally) '<': (pred) exclusive int upper-bounds of interest (omit, empty list, or None for none) - (optionally), 'interval': either - (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or - | single epoch second count to set 'from' and 'to' the same; default | (now, now) for cred defs supporting revocation or None otherwise; e.g., :: { 'Vx4E82R17q...:3:CL:16:tag': { 'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema 'name', 'favouriteDrink' ], '>=': { # request predicate score>=80 from this cred def 'score': 80 } '<=': { # request ranking <=10 from this cred def 'ranking': 10 } 'interval': 1528116008 # same instant for all attrs and preds of corresponding schema }, 'R17v42T4pk...:3:CL:19:tag': None, # request all attrs, no preds, default intervals on all attrs 'e3vc5K168n...:3:CL:23:tag': {}, # request all attrs, no preds, default intervals on all attrs 'Z9ccax812j...:3:CL:27:tag': { # request all attrs, no preds, this interval on all attrs 'interval': (1528112408, 1528116008) }, '9cHbp54C8n...:3:CL:37:tag': { # request no attrs and some predicates; specify interval 'attrs': [], # or equivalently, 'attrs': None '>=': { 'employees': '50' # nicety: implementation converts to int for caller }, '>=': { 'revenue': '10000000' # nicety: implementation converts to int for caller 'ebidta': 0 } 'interval': (1528029608, 1528116008) }, '6caBcmLi33...:3:CL:41:tag': { # all attrs, one pred, default intervals to now on attrs & pred '>': { 'regEpoch': 1514782800 } }, ... } :return: indy-sdk proof request json
[ "Build", "and", "return", "indy", "-", "sdk", "proof", "request", "for", "input", "attributes", "and", "non", "-", "revocation", "intervals", "by", "cred", "def", "id", "." ]
python
train
jay-johnson/antinex-client
antinex_client/ai_client.py
https://github.com/jay-johnson/antinex-client/blob/850ba2a2fe21c836e071def618dcecc9caf5d59c/antinex_client/ai_client.py#L121-L186
def login( self): """login""" auth_url = self.api_urls["login"] if self.verbose: log.info(("log in user={} url={} ca_dir={} cert={}") .format( self.user, auth_url, self.ca_dir, self.cert)) use_headers = { "Content-type": "application/json" } login_data = { "username": self.user, "password": self.password } if self.debug: log.info(( "LOGIN with body={} headers={} url={} " "verify={} cert={}").format( login_data, use_headers, auth_url, self.use_verify, self.cert)) response = requests.post( auth_url, verify=self.use_verify, cert=self.cert, data=json.dumps(login_data), headers=use_headers) if self.debug: log.info(("LOGIN response status_code={} text={} reason={}") .format( response.status_code, response.text, response.reason)) user_token = "" if response.status_code == 200: user_token = json.loads(response.text)["token"] if user_token != "": self.token = user_token self.login_status = LOGIN_SUCCESS if self.verbose: log.debug("login success") else: log.error(("failed to login user={} to url={} text={}") .format( self.user, auth_url, response.text)) self.login_status = LOGIN_FAILED # if the user token exists return self.login_status
[ "def", "login", "(", "self", ")", ":", "auth_url", "=", "self", ".", "api_urls", "[", "\"login\"", "]", "if", "self", ".", "verbose", ":", "log", ".", "info", "(", "(", "\"log in user={} url={} ca_dir={} cert={}\"", ")", ".", "format", "(", "self", ".", "user", ",", "auth_url", ",", "self", ".", "ca_dir", ",", "self", ".", "cert", ")", ")", "use_headers", "=", "{", "\"Content-type\"", ":", "\"application/json\"", "}", "login_data", "=", "{", "\"username\"", ":", "self", ".", "user", ",", "\"password\"", ":", "self", ".", "password", "}", "if", "self", ".", "debug", ":", "log", ".", "info", "(", "(", "\"LOGIN with body={} headers={} url={} \"", "\"verify={} cert={}\"", ")", ".", "format", "(", "login_data", ",", "use_headers", ",", "auth_url", ",", "self", ".", "use_verify", ",", "self", ".", "cert", ")", ")", "response", "=", "requests", ".", "post", "(", "auth_url", ",", "verify", "=", "self", ".", "use_verify", ",", "cert", "=", "self", ".", "cert", ",", "data", "=", "json", ".", "dumps", "(", "login_data", ")", ",", "headers", "=", "use_headers", ")", "if", "self", ".", "debug", ":", "log", ".", "info", "(", "(", "\"LOGIN response status_code={} text={} reason={}\"", ")", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "text", ",", "response", ".", "reason", ")", ")", "user_token", "=", "\"\"", "if", "response", ".", "status_code", "==", "200", ":", "user_token", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "[", "\"token\"", "]", "if", "user_token", "!=", "\"\"", ":", "self", ".", "token", "=", "user_token", "self", ".", "login_status", "=", "LOGIN_SUCCESS", "if", "self", ".", "verbose", ":", "log", ".", "debug", "(", "\"login success\"", ")", "else", ":", "log", ".", "error", "(", "(", "\"failed to login user={} to url={} text={}\"", ")", ".", "format", "(", "self", ".", "user", ",", "auth_url", ",", "response", ".", "text", ")", ")", "self", ".", "login_status", "=", "LOGIN_FAILED", "# if the user token exists", "return", "self", ".", "login_status" ]
login
[ "login" ]
python
train
mrsarm/mongotail
mongotail/conn.py
https://github.com/mrsarm/mongotail/blob/82ba74e32eff92faa320833a8d19c58555f9cd49/mongotail/conn.py#L31-L78
def connect(address, args): """ Connect with `address`, and return a tuple with a :class:`~pymongo.MongoClient`, and a :class:`~pymongo.database.Database` object. :param address: a string representation with the db address :param args: connection arguments: - username: username for authentication (optional) - password: password for authentication. If username is given and password isn't, it's asked from tty. - auth_database: authenticate the username and password against that database (optional). If not specified, the database specified in address will be used. - ssl, ssl_certfile, ssl_keyfile, ssl_cert_reqs, ssl_ca_certs: SSL authentication options :return: a tuple with ``(client, db)`` """ try: host, port, dbname = get_res_address(address) except AddressError as e: error_parsing(str(e).replace("resource", "database")) try: options = {} if args.ssl: options["ssl"] = True options["ssl_certfile"] = args.ssl_cert_file options["ssl_keyfile"] = args.ssl_key_file options["ssl_cert_reqs"] = args.ssl_cert_reqs options["ssl_ca_certs"] = args.ssl_ca_certs client = MongoClient(host=host, port=port, **options) except Exception as e: error("Error trying to connect: %s" % str(e), ECONNREFUSED) username = args.username password = args.password auth_database = args.auth_database if username: if password is None: password = getpass.getpass() if auth_database is None: auth_database = dbname try: auth_db = client[auth_database] auth_db.authenticate(username, password) except Exception as e: error("Error trying to authenticate: %s" % str(e), -3) db = client[dbname] return client, db
[ "def", "connect", "(", "address", ",", "args", ")", ":", "try", ":", "host", ",", "port", ",", "dbname", "=", "get_res_address", "(", "address", ")", "except", "AddressError", "as", "e", ":", "error_parsing", "(", "str", "(", "e", ")", ".", "replace", "(", "\"resource\"", ",", "\"database\"", ")", ")", "try", ":", "options", "=", "{", "}", "if", "args", ".", "ssl", ":", "options", "[", "\"ssl\"", "]", "=", "True", "options", "[", "\"ssl_certfile\"", "]", "=", "args", ".", "ssl_cert_file", "options", "[", "\"ssl_keyfile\"", "]", "=", "args", ".", "ssl_key_file", "options", "[", "\"ssl_cert_reqs\"", "]", "=", "args", ".", "ssl_cert_reqs", "options", "[", "\"ssl_ca_certs\"", "]", "=", "args", ".", "ssl_ca_certs", "client", "=", "MongoClient", "(", "host", "=", "host", ",", "port", "=", "port", ",", "*", "*", "options", ")", "except", "Exception", "as", "e", ":", "error", "(", "\"Error trying to connect: %s\"", "%", "str", "(", "e", ")", ",", "ECONNREFUSED", ")", "username", "=", "args", ".", "username", "password", "=", "args", ".", "password", "auth_database", "=", "args", ".", "auth_database", "if", "username", ":", "if", "password", "is", "None", ":", "password", "=", "getpass", ".", "getpass", "(", ")", "if", "auth_database", "is", "None", ":", "auth_database", "=", "dbname", "try", ":", "auth_db", "=", "client", "[", "auth_database", "]", "auth_db", ".", "authenticate", "(", "username", ",", "password", ")", "except", "Exception", "as", "e", ":", "error", "(", "\"Error trying to authenticate: %s\"", "%", "str", "(", "e", ")", ",", "-", "3", ")", "db", "=", "client", "[", "dbname", "]", "return", "client", ",", "db" ]
Connect with `address`, and return a tuple with a :class:`~pymongo.MongoClient`, and a :class:`~pymongo.database.Database` object. :param address: a string representation with the db address :param args: connection arguments: - username: username for authentication (optional) - password: password for authentication. If username is given and password isn't, it's asked from tty. - auth_database: authenticate the username and password against that database (optional). If not specified, the database specified in address will be used. - ssl, ssl_certfile, ssl_keyfile, ssl_cert_reqs, ssl_ca_certs: SSL authentication options :return: a tuple with ``(client, db)``
[ "Connect", "with", "address", "and", "return", "a", "tuple", "with", "a", ":", "class", ":", "~pymongo", ".", "MongoClient", "and", "a", ":", "class", ":", "~pymongo", ".", "database", ".", "Database", "object", ".", ":", "param", "address", ":", "a", "string", "representation", "with", "the", "db", "address", ":", "param", "args", ":", "connection", "arguments", ":", "-", "username", ":", "username", "for", "authentication", "(", "optional", ")", "-", "password", ":", "password", "for", "authentication", ".", "If", "username", "is", "given", "and", "password", "isn", "t", "it", "s", "asked", "from", "tty", ".", "-", "auth_database", ":", "authenticate", "the", "username", "and", "password", "against", "that", "database", "(", "optional", ")", ".", "If", "not", "specified", "the", "database", "specified", "in", "address", "will", "be", "used", ".", "-", "ssl", "ssl_certfile", "ssl_keyfile", "ssl_cert_reqs", "ssl_ca_certs", ":", "SSL", "authentication", "options", ":", "return", ":", "a", "tuple", "with", "(", "client", "db", ")" ]
python
test
robmarkcole/HASS-data-detective
detective/auth.py
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/auth.py#L35-L42
def user_name(self, user_id): """Return name for user.""" user = self.users.get(user_id) if user is None: return "Unknown user ({})".format(user_id) return user["name"]
[ "def", "user_name", "(", "self", ",", "user_id", ")", ":", "user", "=", "self", ".", "users", ".", "get", "(", "user_id", ")", "if", "user", "is", "None", ":", "return", "\"Unknown user ({})\"", ".", "format", "(", "user_id", ")", "return", "user", "[", "\"name\"", "]" ]
Return name for user.
[ "Return", "name", "for", "user", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/layers/distribution_layer.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L732-L751
def get_config(self): """Returns the config of this layer. NOTE: At the moment, this configuration can only be serialized if the Layer's `convert_to_tensor_fn` is a serializable Keras object (i.e., implements `get_config`) or one of the standard values: - `Distribution.sample` (or `"sample"`) - `Distribution.mean` (or `"mean"`) - `Distribution.mode` (or `"mode"`) - `Distribution.stddev` (or `"stddev"`) - `Distribution.variance` (or `"variance"`) """ config = { 'event_shape': self._event_shape, 'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn), 'sample_dtype': self._sample_dtype, 'validate_args': self._validate_args } base_config = super(IndependentBernoulli, self).get_config() return dict(list(base_config.items()) + list(config.items()))
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "{", "'event_shape'", ":", "self", ".", "_event_shape", ",", "'convert_to_tensor_fn'", ":", "_serialize", "(", "self", ".", "_convert_to_tensor_fn", ")", ",", "'sample_dtype'", ":", "self", ".", "_sample_dtype", ",", "'validate_args'", ":", "self", ".", "_validate_args", "}", "base_config", "=", "super", "(", "IndependentBernoulli", ",", "self", ")", ".", "get_config", "(", ")", "return", "dict", "(", "list", "(", "base_config", ".", "items", "(", ")", ")", "+", "list", "(", "config", ".", "items", "(", ")", ")", ")" ]
Returns the config of this layer. NOTE: At the moment, this configuration can only be serialized if the Layer's `convert_to_tensor_fn` is a serializable Keras object (i.e., implements `get_config`) or one of the standard values: - `Distribution.sample` (or `"sample"`) - `Distribution.mean` (or `"mean"`) - `Distribution.mode` (or `"mode"`) - `Distribution.stddev` (or `"stddev"`) - `Distribution.variance` (or `"variance"`)
[ "Returns", "the", "config", "of", "this", "layer", "." ]
python
test
dopefishh/pympi
pympi/Elan.py
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L514-L524
def generate_annotation_id(self): """Generate the next annotation id, this function is mainly used internally. """ if not self.maxaid: valid_anns = [int(''.join(filter(str.isdigit, a))) for a in self.timeslots] self.maxaid = max(valid_anns + [1])+1 else: self.maxaid += 1 return 'a{:d}'.format(self.maxaid)
[ "def", "generate_annotation_id", "(", "self", ")", ":", "if", "not", "self", ".", "maxaid", ":", "valid_anns", "=", "[", "int", "(", "''", ".", "join", "(", "filter", "(", "str", ".", "isdigit", ",", "a", ")", ")", ")", "for", "a", "in", "self", ".", "timeslots", "]", "self", ".", "maxaid", "=", "max", "(", "valid_anns", "+", "[", "1", "]", ")", "+", "1", "else", ":", "self", ".", "maxaid", "+=", "1", "return", "'a{:d}'", ".", "format", "(", "self", ".", "maxaid", ")" ]
Generate the next annotation id, this function is mainly used internally.
[ "Generate", "the", "next", "annotation", "id", "this", "function", "is", "mainly", "used", "internally", "." ]
python
test
EntilZha/PyFunctional
functional/transformations.py
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/transformations.py#L320-L331
def cartesian_t(iterables, repeat): """ Transformation for Sequence.cartesian :param iterables: elements for cartesian product :param repeat: how many times to repeat iterables :return: transformation """ return Transformation( 'cartesian', lambda sequence: product(sequence, *iterables, repeat=repeat), None )
[ "def", "cartesian_t", "(", "iterables", ",", "repeat", ")", ":", "return", "Transformation", "(", "'cartesian'", ",", "lambda", "sequence", ":", "product", "(", "sequence", ",", "*", "iterables", ",", "repeat", "=", "repeat", ")", ",", "None", ")" ]
Transformation for Sequence.cartesian :param iterables: elements for cartesian product :param repeat: how many times to repeat iterables :return: transformation
[ "Transformation", "for", "Sequence", ".", "cartesian", ":", "param", "iterables", ":", "elements", "for", "cartesian", "product", ":", "param", "repeat", ":", "how", "many", "times", "to", "repeat", "iterables", ":", "return", ":", "transformation" ]
python
train
coldfix/udiskie
udiskie/udisks2.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L532-L540
def set_autoclear(self, value, auth_no_user_interaction=None): """Set autoclear flag for loop partition.""" return self._M.Loop.SetAutoclear( '(ba{sv})', value, filter_opt({ 'auth.no_user_interaction': ('b', auth_no_user_interaction), }) )
[ "def", "set_autoclear", "(", "self", ",", "value", ",", "auth_no_user_interaction", "=", "None", ")", ":", "return", "self", ".", "_M", ".", "Loop", ".", "SetAutoclear", "(", "'(ba{sv})'", ",", "value", ",", "filter_opt", "(", "{", "'auth.no_user_interaction'", ":", "(", "'b'", ",", "auth_no_user_interaction", ")", ",", "}", ")", ")" ]
Set autoclear flag for loop partition.
[ "Set", "autoclear", "flag", "for", "loop", "partition", "." ]
python
train
datosgobar/pydatajson
pydatajson/readers.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/readers.py#L286-L318
def _get_dataset_index(catalog, dataset_identifier, dataset_title, logger=None): """Devuelve el índice de un dataset en el catálogo en función de su identificador""" logger = logger or pydj_logger matching_datasets = [] for idx, dataset in enumerate(catalog["catalog_dataset"]): if dataset["dataset_identifier"] == dataset_identifier: if dataset["dataset_title"] == dataset_title: matching_datasets.append(idx) else: logger.warning( ce.DatasetUnexpectedTitle( dataset_identifier, dataset["dataset_title"], dataset_title ) ) # Debe haber exactamente un dataset con el identificador provisto. no_dsets_msg = "No hay ningun dataset con el identifier {}".format( dataset_identifier) many_dsets_msg = "Hay mas de un dataset con el identifier {}: {}".format( dataset_identifier, matching_datasets) if len(matching_datasets) == 0: logger.error(no_dsets_msg) return None elif len(matching_datasets) > 1: logger.error(many_dsets_msg) return None else: return matching_datasets[0]
[ "def", "_get_dataset_index", "(", "catalog", ",", "dataset_identifier", ",", "dataset_title", ",", "logger", "=", "None", ")", ":", "logger", "=", "logger", "or", "pydj_logger", "matching_datasets", "=", "[", "]", "for", "idx", ",", "dataset", "in", "enumerate", "(", "catalog", "[", "\"catalog_dataset\"", "]", ")", ":", "if", "dataset", "[", "\"dataset_identifier\"", "]", "==", "dataset_identifier", ":", "if", "dataset", "[", "\"dataset_title\"", "]", "==", "dataset_title", ":", "matching_datasets", ".", "append", "(", "idx", ")", "else", ":", "logger", ".", "warning", "(", "ce", ".", "DatasetUnexpectedTitle", "(", "dataset_identifier", ",", "dataset", "[", "\"dataset_title\"", "]", ",", "dataset_title", ")", ")", "# Debe haber exactamente un dataset con el identificador provisto.", "no_dsets_msg", "=", "\"No hay ningun dataset con el identifier {}\"", ".", "format", "(", "dataset_identifier", ")", "many_dsets_msg", "=", "\"Hay mas de un dataset con el identifier {}: {}\"", ".", "format", "(", "dataset_identifier", ",", "matching_datasets", ")", "if", "len", "(", "matching_datasets", ")", "==", "0", ":", "logger", ".", "error", "(", "no_dsets_msg", ")", "return", "None", "elif", "len", "(", "matching_datasets", ")", ">", "1", ":", "logger", ".", "error", "(", "many_dsets_msg", ")", "return", "None", "else", ":", "return", "matching_datasets", "[", "0", "]" ]
Devuelve el índice de un dataset en el catálogo en función de su identificador
[ "Devuelve", "el", "índice", "de", "un", "dataset", "en", "el", "catálogo", "en", "función", "de", "su", "identificador" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/visualization/visualization.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/visualization/visualization.py#L159-L205
def get_att_mats(translate_model): """Get's the tensors representing the attentions from a build model. The attentions are stored in a dict on the Transformer object while building the graph. Args: translate_model: Transformer object to fetch the attention weights from. Returns: Tuple of attention matrices; ( enc_atts: Encoder self attention weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, inp_len, inp_len) dec_atts: Decoder self attetnion weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, out_len, out_len) encdec_atts: Encoder-Decoder attention weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, out_len, inp_len) ) """ enc_atts = [] dec_atts = [] encdec_atts = [] prefix = "transformer/body/" postfix_self_attention = "/multihead_attention/dot_product_attention" if translate_model.hparams.self_attention_type == "dot_product_relative": postfix_self_attention = ("/multihead_attention/" "dot_product_attention_relative") postfix_encdec = "/multihead_attention/dot_product_attention" for i in range(translate_model.hparams.num_hidden_layers): enc_att = translate_model.attention_weights[ "%sencoder/layer_%i/self_attention%s" % (prefix, i, postfix_self_attention)] dec_att = translate_model.attention_weights[ "%sdecoder/layer_%i/self_attention%s" % (prefix, i, postfix_self_attention)] encdec_att = translate_model.attention_weights[ "%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)] enc_atts.append(enc_att) dec_atts.append(dec_att) encdec_atts.append(encdec_att) return enc_atts, dec_atts, encdec_atts
[ "def", "get_att_mats", "(", "translate_model", ")", ":", "enc_atts", "=", "[", "]", "dec_atts", "=", "[", "]", "encdec_atts", "=", "[", "]", "prefix", "=", "\"transformer/body/\"", "postfix_self_attention", "=", "\"/multihead_attention/dot_product_attention\"", "if", "translate_model", ".", "hparams", ".", "self_attention_type", "==", "\"dot_product_relative\"", ":", "postfix_self_attention", "=", "(", "\"/multihead_attention/\"", "\"dot_product_attention_relative\"", ")", "postfix_encdec", "=", "\"/multihead_attention/dot_product_attention\"", "for", "i", "in", "range", "(", "translate_model", ".", "hparams", ".", "num_hidden_layers", ")", ":", "enc_att", "=", "translate_model", ".", "attention_weights", "[", "\"%sencoder/layer_%i/self_attention%s\"", "%", "(", "prefix", ",", "i", ",", "postfix_self_attention", ")", "]", "dec_att", "=", "translate_model", ".", "attention_weights", "[", "\"%sdecoder/layer_%i/self_attention%s\"", "%", "(", "prefix", ",", "i", ",", "postfix_self_attention", ")", "]", "encdec_att", "=", "translate_model", ".", "attention_weights", "[", "\"%sdecoder/layer_%i/encdec_attention%s\"", "%", "(", "prefix", ",", "i", ",", "postfix_encdec", ")", "]", "enc_atts", ".", "append", "(", "enc_att", ")", "dec_atts", ".", "append", "(", "dec_att", ")", "encdec_atts", ".", "append", "(", "encdec_att", ")", "return", "enc_atts", ",", "dec_atts", ",", "encdec_atts" ]
Get's the tensors representing the attentions from a build model. The attentions are stored in a dict on the Transformer object while building the graph. Args: translate_model: Transformer object to fetch the attention weights from. Returns: Tuple of attention matrices; ( enc_atts: Encoder self attention weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, inp_len, inp_len) dec_atts: Decoder self attetnion weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, out_len, out_len) encdec_atts: Encoder-Decoder attention weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, out_len, inp_len) )
[ "Get", "s", "the", "tensors", "representing", "the", "attentions", "from", "a", "build", "model", "." ]
python
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L264-L306
def stop(self, unique_id, configs=None): """Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("stopping " + unique_id) if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't stop {0}: process not known".format(unique_id)) raise DeploymentError("Can't stop {0}: process not known".format(unique_id)) if configs.get('terminate_only', False): self.terminate(unique_id, configs) else: stop_command = configs.get('stop_command') or self.default_configs.get('stop_command') env = configs.get("env", {}) if stop_command is not None: install_path = self.processes[unique_id].install_path with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command), msg="Failed to stop {0}".format(unique_id), env=env)) else: self.terminate(unique_id, configs) if 'delay' in configs: time.sleep(configs['delay'])
[ "def", "stop", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "# the following is necessay to set the configs for this function as the combination of the", "# default configurations and the parameter with the parameter superceding the defaults but", "# not modifying the defaults", "if", "configs", "is", "None", ":", "configs", "=", "{", "}", "tmp", "=", "self", ".", "default_configs", ".", "copy", "(", ")", "tmp", ".", "update", "(", "configs", ")", "configs", "=", "tmp", "logger", ".", "debug", "(", "\"stopping \"", "+", "unique_id", ")", "if", "unique_id", "in", "self", ".", "processes", ":", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "else", ":", "logger", ".", "error", "(", "\"Can't stop {0}: process not known\"", ".", "format", "(", "unique_id", ")", ")", "raise", "DeploymentError", "(", "\"Can't stop {0}: process not known\"", ".", "format", "(", "unique_id", ")", ")", "if", "configs", ".", "get", "(", "'terminate_only'", ",", "False", ")", ":", "self", ".", "terminate", "(", "unique_id", ",", "configs", ")", "else", ":", "stop_command", "=", "configs", ".", "get", "(", "'stop_command'", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'stop_command'", ")", "env", "=", "configs", ".", "get", "(", "\"env\"", ",", "{", "}", ")", "if", "stop_command", "is", "not", "None", ":", "install_path", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "install_path", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "log_output", "(", "exec_with_env", "(", "ssh", ",", "\"cd {0}; {1}\"", ".", "format", "(", "install_path", ",", "stop_command", ")", ",", "msg", "=", "\"Failed to stop {0}\"", ".", "format", "(", "unique_id", ")", ",", "env", "=", "env", ")", ")", "else", ":", "self", ".", "terminate", "(", "unique_id", ",", "configs", ")", "if", "'delay'", "in", "configs", ":", "time", ".", "sleep", "(", "configs", "[", "'delay'", "]", ")" ]
Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return:
[ "Stop", "the", "service", ".", "If", "the", "deployer", "has", "not", "started", "a", "service", "with", "unique_id", "the", "deployer", "will", "raise", "an", "Exception", "There", "are", "two", "configs", "that", "will", "be", "considered", ":", "terminate_only", ":", "if", "this", "config", "is", "passed", "in", "then", "this", "method", "is", "the", "same", "as", "terminate", "(", "unique_id", ")", "(", "this", "is", "also", "the", "behavior", "if", "stop_command", "is", "None", "and", "not", "overridden", ")", "stop_command", ":", "overrides", "the", "default", "stop_command" ]
python
train
stitchfix/pyxley
examples/metricsgraphics/project/buildui.py
https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/examples/metricsgraphics/project/buildui.py#L23-L35
def create_histogram(df): """ create a mg line plot Args: df (pandas.DataFrame): data to plot """ fig = Figure("/mg/histogram/", "mg_histogram") fig.layout.set_size(width=450, height=200) fig.layout.set_margin(left=40, right=40) fig.graphics.animate_on_load() # Make a histogram with 20 bins return Histogram(df, fig, "value", 20, init_params={"Data": "Steps"})
[ "def", "create_histogram", "(", "df", ")", ":", "fig", "=", "Figure", "(", "\"/mg/histogram/\"", ",", "\"mg_histogram\"", ")", "fig", ".", "layout", ".", "set_size", "(", "width", "=", "450", ",", "height", "=", "200", ")", "fig", ".", "layout", ".", "set_margin", "(", "left", "=", "40", ",", "right", "=", "40", ")", "fig", ".", "graphics", ".", "animate_on_load", "(", ")", "# Make a histogram with 20 bins", "return", "Histogram", "(", "df", ",", "fig", ",", "\"value\"", ",", "20", ",", "init_params", "=", "{", "\"Data\"", ":", "\"Steps\"", "}", ")" ]
create a mg line plot Args: df (pandas.DataFrame): data to plot
[ "create", "a", "mg", "line", "plot" ]
python
train
ziwenxie/netease-dl
netease/logger.py
https://github.com/ziwenxie/netease-dl/blob/84b226fc07b10f7f66580f0fc69f10356f66b5c3/netease/logger.py#L24-L38
def get_logger(name): """Return a logger with a file handler.""" logger = logging.getLogger(name) logger.setLevel(logging.INFO) # File output handler file_handler = logging.FileHandler(log_path) file_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
[ "def", "get_logger", "(", "name", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "# File output handler", "file_handler", "=", "logging", ".", "FileHandler", "(", "log_path", ")", "file_handler", ".", "setLevel", "(", "logging", ".", "INFO", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s'", ",", "datefmt", "=", "'%m/%d/%Y %I:%M:%S %p'", ")", "file_handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "file_handler", ")", "return", "logger" ]
Return a logger with a file handler.
[ "Return", "a", "logger", "with", "a", "file", "handler", "." ]
python
train