repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
kobejohn/PQHelper
pqhelper/base.py
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/base.py#L440-L492
def _simulated_chain_result(self, potential_chain, already_used_bonus): """Simulate any chain reactions. Arguments: potential_chain: a state to be tested for chain reactions already_used_bonus: boolean indicating whether a bonus turn was already applied during this action Return: final result state or None (if state is filtered out in capture) Note that if there is no chain reaction, the final result is the same as the original state received. """ while potential_chain: # hook for capture game optimizations. no effect in base # warning: only do this ONCE for any given state or it will # always filter the second time if self._disallow_state(potential_chain): potential_chain.graft_child(Filtered()) return None # no more simulation for this filtered state result_board, destroyed_groups = \ potential_chain.board.execute_once(random_fill= self.random_fill) # yield the state if nothing happened during execution (chain done) if not destroyed_groups: # yield this state as the final result of the chain return potential_chain # attach the transition chain = ChainReaction() potential_chain.graft_child(chain) # attach the result state if already_used_bonus: # disallow bonus action if already applied bonus_action = 0 else: # allow bonus action once and then flag as used bonus_action = any(len(group) >= 4 for group in destroyed_groups) already_used_bonus = True cls = potential_chain.__class__ chain_result = cls(board=result_board, turn=potential_chain.turn, actions_remaining= potential_chain.actions_remaining + bonus_action, player=potential_chain.player.copy(), opponent=potential_chain.opponent.copy()) # update the player and opponent base_attack = \ chain_result.active.apply_tile_groups(destroyed_groups) chain_result.passive.apply_attack(base_attack) chain.graft_child(chain_result) # prepare to try for another chain reaction potential_chain = chain_result
[ "def", "_simulated_chain_result", "(", "self", ",", "potential_chain", ",", "already_used_bonus", ")", ":", "while", "potential_chain", ":", "# hook for capture game optimizations. no effect in base", "# warning: only do this ONCE for any given state or it will", "# always filter the second time", "if", "self", ".", "_disallow_state", "(", "potential_chain", ")", ":", "potential_chain", ".", "graft_child", "(", "Filtered", "(", ")", ")", "return", "None", "# no more simulation for this filtered state", "result_board", ",", "destroyed_groups", "=", "potential_chain", ".", "board", ".", "execute_once", "(", "random_fill", "=", "self", ".", "random_fill", ")", "# yield the state if nothing happened during execution (chain done)", "if", "not", "destroyed_groups", ":", "# yield this state as the final result of the chain", "return", "potential_chain", "# attach the transition", "chain", "=", "ChainReaction", "(", ")", "potential_chain", ".", "graft_child", "(", "chain", ")", "# attach the result state", "if", "already_used_bonus", ":", "# disallow bonus action if already applied", "bonus_action", "=", "0", "else", ":", "# allow bonus action once and then flag as used", "bonus_action", "=", "any", "(", "len", "(", "group", ")", ">=", "4", "for", "group", "in", "destroyed_groups", ")", "already_used_bonus", "=", "True", "cls", "=", "potential_chain", ".", "__class__", "chain_result", "=", "cls", "(", "board", "=", "result_board", ",", "turn", "=", "potential_chain", ".", "turn", ",", "actions_remaining", "=", "potential_chain", ".", "actions_remaining", "+", "bonus_action", ",", "player", "=", "potential_chain", ".", "player", ".", "copy", "(", ")", ",", "opponent", "=", "potential_chain", ".", "opponent", ".", "copy", "(", ")", ")", "# update the player and opponent", "base_attack", "=", "chain_result", ".", "active", ".", "apply_tile_groups", "(", "destroyed_groups", ")", "chain_result", ".", "passive", ".", "apply_attack", "(", "base_attack", ")", "chain", ".", "graft_child", "(", "chain_result", ")", "# prepare to try for another chain reaction", "potential_chain", "=", "chain_result" ]
Simulate any chain reactions. Arguments: potential_chain: a state to be tested for chain reactions already_used_bonus: boolean indicating whether a bonus turn was already applied during this action Return: final result state or None (if state is filtered out in capture) Note that if there is no chain reaction, the final result is the same as the original state received.
[ "Simulate", "any", "chain", "reactions", "." ]
python
train
abseil/abseil-py
absl/flags/_argument_parser.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_argument_parser.py#L474-L481
def parse(self, argument): """See base class.""" if isinstance(argument, list): return argument elif not argument: return [] else: return [s.strip() for s in argument.split(self._token)]
[ "def", "parse", "(", "self", ",", "argument", ")", ":", "if", "isinstance", "(", "argument", ",", "list", ")", ":", "return", "argument", "elif", "not", "argument", ":", "return", "[", "]", "else", ":", "return", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "argument", ".", "split", "(", "self", ".", "_token", ")", "]" ]
See base class.
[ "See", "base", "class", "." ]
python
train
adrn/gala
gala/dynamics/orbit.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L422-L484
def pericenter(self, return_times=False, func=np.mean, interp_kwargs=None, minimize_kwargs=None, approximate=False): """ Estimate the pericenter(s) of the orbit by identifying local minima in the spherical radius and interpolating between timesteps near the minima. By default, this returns the mean of all local minima (pericenters). To get, e.g., the minimum pericenter, pass in ``func=np.min``. To get all pericenters, pass in ``func=None``. Parameters ---------- func : func (optional) A function to evaluate on all of the identified pericenter times. return_times : bool (optional) Also return the pericenter times. interp_kwargs : dict (optional) Keyword arguments to be passed to :class:`scipy.interpolate.InterpolatedUnivariateSpline`. minimize_kwargs : dict (optional) Keyword arguments to be passed to :class:`scipy.optimize.minimize`. approximate : bool (optional) Compute an approximate pericenter by skipping interpolation. Returns ------- peri : float, :class:`~numpy.ndarray` Either a single number or an array of pericenters. times : :class:`~numpy.ndarray` (optional, see ``return_times``) If ``return_times=True``, also returns an array of the pericenter times. """ if return_times and func is not None: raise ValueError("Cannot return times if reducing pericenters " "using an input function. Pass `func=None` if " "you want to return all individual pericenters " "and times.") if func is None: reduce = False func = lambda x: x else: reduce = True # time must increase if self.t[-1] < self.t[0]: self = self[::-1] vals = [] times = [] for orbit in self.orbit_gen(): v, t = orbit._max_helper(-orbit.physicsspherical.r, # pericenter interp_kwargs=interp_kwargs, minimize_kwargs=minimize_kwargs, approximate=approximate) vals.append(func(-v)) # negative for pericenter times.append(t) return self._max_return_helper(vals, times, return_times, reduce)
[ "def", "pericenter", "(", "self", ",", "return_times", "=", "False", ",", "func", "=", "np", ".", "mean", ",", "interp_kwargs", "=", "None", ",", "minimize_kwargs", "=", "None", ",", "approximate", "=", "False", ")", ":", "if", "return_times", "and", "func", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot return times if reducing pericenters \"", "\"using an input function. Pass `func=None` if \"", "\"you want to return all individual pericenters \"", "\"and times.\"", ")", "if", "func", "is", "None", ":", "reduce", "=", "False", "func", "=", "lambda", "x", ":", "x", "else", ":", "reduce", "=", "True", "# time must increase", "if", "self", ".", "t", "[", "-", "1", "]", "<", "self", ".", "t", "[", "0", "]", ":", "self", "=", "self", "[", ":", ":", "-", "1", "]", "vals", "=", "[", "]", "times", "=", "[", "]", "for", "orbit", "in", "self", ".", "orbit_gen", "(", ")", ":", "v", ",", "t", "=", "orbit", ".", "_max_helper", "(", "-", "orbit", ".", "physicsspherical", ".", "r", ",", "# pericenter", "interp_kwargs", "=", "interp_kwargs", ",", "minimize_kwargs", "=", "minimize_kwargs", ",", "approximate", "=", "approximate", ")", "vals", ".", "append", "(", "func", "(", "-", "v", ")", ")", "# negative for pericenter", "times", ".", "append", "(", "t", ")", "return", "self", ".", "_max_return_helper", "(", "vals", ",", "times", ",", "return_times", ",", "reduce", ")" ]
Estimate the pericenter(s) of the orbit by identifying local minima in the spherical radius and interpolating between timesteps near the minima. By default, this returns the mean of all local minima (pericenters). To get, e.g., the minimum pericenter, pass in ``func=np.min``. To get all pericenters, pass in ``func=None``. Parameters ---------- func : func (optional) A function to evaluate on all of the identified pericenter times. return_times : bool (optional) Also return the pericenter times. interp_kwargs : dict (optional) Keyword arguments to be passed to :class:`scipy.interpolate.InterpolatedUnivariateSpline`. minimize_kwargs : dict (optional) Keyword arguments to be passed to :class:`scipy.optimize.minimize`. approximate : bool (optional) Compute an approximate pericenter by skipping interpolation. Returns ------- peri : float, :class:`~numpy.ndarray` Either a single number or an array of pericenters. times : :class:`~numpy.ndarray` (optional, see ``return_times``) If ``return_times=True``, also returns an array of the pericenter times.
[ "Estimate", "the", "pericenter", "(", "s", ")", "of", "the", "orbit", "by", "identifying", "local", "minima", "in", "the", "spherical", "radius", "and", "interpolating", "between", "timesteps", "near", "the", "minima", "." ]
python
train
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/__init__.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/__init__.py#L153-L186
def get_oauth_params(self, request): """Get the basic OAuth parameters to be used in generating a signature. """ nonce = (generate_nonce() if self.nonce is None else self.nonce) timestamp = (generate_timestamp() if self.timestamp is None else self.timestamp) params = [ ('oauth_nonce', nonce), ('oauth_timestamp', timestamp), ('oauth_version', '1.0'), ('oauth_signature_method', self.signature_method), ('oauth_consumer_key', self.client_key), ] if self.resource_owner_key: params.append(('oauth_token', self.resource_owner_key)) if self.callback_uri: params.append(('oauth_callback', self.callback_uri)) if self.verifier: params.append(('oauth_verifier', self.verifier)) # providing body hash for requests other than x-www-form-urlencoded # as described in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-4.1.1 # 4.1.1. When to include the body hash # * [...] MUST NOT include an oauth_body_hash parameter on requests with form-encoded request bodies # * [...] SHOULD include the oauth_body_hash parameter on all other requests. # Note that SHA-1 is vulnerable. The spec acknowledges that in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-6.2 # At this time, no further effort has been made to replace SHA-1 for the OAuth Request Body Hash extension. content_type = request.headers.get('Content-Type', None) content_type_eligible = content_type and content_type.find('application/x-www-form-urlencoded') < 0 if request.body is not None and content_type_eligible: params.append(('oauth_body_hash', base64.b64encode(hashlib.sha1(request.body.encode('utf-8')).digest()).decode('utf-8'))) return params
[ "def", "get_oauth_params", "(", "self", ",", "request", ")", ":", "nonce", "=", "(", "generate_nonce", "(", ")", "if", "self", ".", "nonce", "is", "None", "else", "self", ".", "nonce", ")", "timestamp", "=", "(", "generate_timestamp", "(", ")", "if", "self", ".", "timestamp", "is", "None", "else", "self", ".", "timestamp", ")", "params", "=", "[", "(", "'oauth_nonce'", ",", "nonce", ")", ",", "(", "'oauth_timestamp'", ",", "timestamp", ")", ",", "(", "'oauth_version'", ",", "'1.0'", ")", ",", "(", "'oauth_signature_method'", ",", "self", ".", "signature_method", ")", ",", "(", "'oauth_consumer_key'", ",", "self", ".", "client_key", ")", ",", "]", "if", "self", ".", "resource_owner_key", ":", "params", ".", "append", "(", "(", "'oauth_token'", ",", "self", ".", "resource_owner_key", ")", ")", "if", "self", ".", "callback_uri", ":", "params", ".", "append", "(", "(", "'oauth_callback'", ",", "self", ".", "callback_uri", ")", ")", "if", "self", ".", "verifier", ":", "params", ".", "append", "(", "(", "'oauth_verifier'", ",", "self", ".", "verifier", ")", ")", "# providing body hash for requests other than x-www-form-urlencoded", "# as described in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-4.1.1", "# 4.1.1. When to include the body hash", "# * [...] MUST NOT include an oauth_body_hash parameter on requests with form-encoded request bodies", "# * [...] SHOULD include the oauth_body_hash parameter on all other requests.", "# Note that SHA-1 is vulnerable. The spec acknowledges that in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-6.2", "# At this time, no further effort has been made to replace SHA-1 for the OAuth Request Body Hash extension.", "content_type", "=", "request", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "None", ")", "content_type_eligible", "=", "content_type", "and", "content_type", ".", "find", "(", "'application/x-www-form-urlencoded'", ")", "<", "0", "if", "request", ".", "body", "is", "not", "None", "and", "content_type_eligible", ":", "params", ".", "append", "(", "(", "'oauth_body_hash'", ",", "base64", ".", "b64encode", "(", "hashlib", ".", "sha1", "(", "request", ".", "body", ".", "encode", "(", "'utf-8'", ")", ")", ".", "digest", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", ")", ")", "return", "params" ]
Get the basic OAuth parameters to be used in generating a signature.
[ "Get", "the", "basic", "OAuth", "parameters", "to", "be", "used", "in", "generating", "a", "signature", "." ]
python
train
nicolargo/glances
glances/config.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/config.py#L33-L51
def user_config_dir(): r"""Return the per-user config dir (full path). - Linux, *BSD, SunOS: ~/.config/glances - macOS: ~/Library/Application Support/glances - Windows: %APPDATA%\glances """ if WINDOWS: path = os.environ.get('APPDATA') elif MACOS: path = os.path.expanduser('~/Library/Application Support') else: path = os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config') if path is None: path = '' else: path = os.path.join(path, 'glances') return path
[ "def", "user_config_dir", "(", ")", ":", "if", "WINDOWS", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'APPDATA'", ")", "elif", "MACOS", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/Library/Application Support'", ")", "else", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'XDG_CONFIG_HOME'", ")", "or", "os", ".", "path", ".", "expanduser", "(", "'~/.config'", ")", "if", "path", "is", "None", ":", "path", "=", "''", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'glances'", ")", "return", "path" ]
r"""Return the per-user config dir (full path). - Linux, *BSD, SunOS: ~/.config/glances - macOS: ~/Library/Application Support/glances - Windows: %APPDATA%\glances
[ "r", "Return", "the", "per", "-", "user", "config", "dir", "(", "full", "path", ")", "." ]
python
train
frascoweb/frasco
frasco/actions/common.py
https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/actions/common.py#L150-L157
def redirect(view=None, url=None, **kwargs): """Redirects to the specified view or url """ if view: if url: kwargs["url"] = url url = flask.url_for(view, **kwargs) current_context.exit(flask.redirect(url))
[ "def", "redirect", "(", "view", "=", "None", ",", "url", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "view", ":", "if", "url", ":", "kwargs", "[", "\"url\"", "]", "=", "url", "url", "=", "flask", ".", "url_for", "(", "view", ",", "*", "*", "kwargs", ")", "current_context", ".", "exit", "(", "flask", ".", "redirect", "(", "url", ")", ")" ]
Redirects to the specified view or url
[ "Redirects", "to", "the", "specified", "view", "or", "url" ]
python
train
python-visualization/branca
branca/utilities.py
https://github.com/python-visualization/branca/blob/4e89e88a5a7ff3586f0852249c2c125f72316da8/branca/utilities.py#L218-L256
def image_to_url(image, colormap=None, origin='upper'): """Infers the type of an image argument and transforms it into a URL. Parameters ---------- image: string, file or array-like object * If string, it will be written directly in the output file. * If file, it's content will be converted as embedded in the output file. * If array-like, it will be converted to PNG base64 string and embedded in the output. origin : ['upper' | 'lower'], optional, default 'upper' Place the [0, 0] index of the array in the upper left or lower left corner of the axes. colormap : callable, used only for `mono` image. Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)] for transforming a mono image into RGB. It must output iterables of length 3 or 4, with values between 0. and 1. Hint : you can use colormaps from `matplotlib.cm`. """ if hasattr(image, 'read'): # We got an image file. if hasattr(image, 'name'): # We try to get the image format from the file name. fileformat = image.name.lower().split('.')[-1] else: fileformat = 'png' url = 'data:image/{};base64,{}'.format( fileformat, base64.b64encode(image.read()).decode('utf-8')) elif (not (isinstance(image, text_type) or isinstance(image, binary_type))) and hasattr(image, '__iter__'): # We got an array-like object. png = write_png(image, origin=origin, colormap=colormap) url = 'data:image/png;base64,' + base64.b64encode(png).decode('utf-8') else: # We got an URL. url = json.loads(json.dumps(image)) return url.replace('\n', ' ')
[ "def", "image_to_url", "(", "image", ",", "colormap", "=", "None", ",", "origin", "=", "'upper'", ")", ":", "if", "hasattr", "(", "image", ",", "'read'", ")", ":", "# We got an image file.", "if", "hasattr", "(", "image", ",", "'name'", ")", ":", "# We try to get the image format from the file name.", "fileformat", "=", "image", ".", "name", ".", "lower", "(", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "else", ":", "fileformat", "=", "'png'", "url", "=", "'data:image/{};base64,{}'", ".", "format", "(", "fileformat", ",", "base64", ".", "b64encode", "(", "image", ".", "read", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", ")", "elif", "(", "not", "(", "isinstance", "(", "image", ",", "text_type", ")", "or", "isinstance", "(", "image", ",", "binary_type", ")", ")", ")", "and", "hasattr", "(", "image", ",", "'__iter__'", ")", ":", "# We got an array-like object.", "png", "=", "write_png", "(", "image", ",", "origin", "=", "origin", ",", "colormap", "=", "colormap", ")", "url", "=", "'data:image/png;base64,'", "+", "base64", ".", "b64encode", "(", "png", ")", ".", "decode", "(", "'utf-8'", ")", "else", ":", "# We got an URL.", "url", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "image", ")", ")", "return", "url", ".", "replace", "(", "'\\n'", ",", "' '", ")" ]
Infers the type of an image argument and transforms it into a URL. Parameters ---------- image: string, file or array-like object * If string, it will be written directly in the output file. * If file, it's content will be converted as embedded in the output file. * If array-like, it will be converted to PNG base64 string and embedded in the output. origin : ['upper' | 'lower'], optional, default 'upper' Place the [0, 0] index of the array in the upper left or lower left corner of the axes. colormap : callable, used only for `mono` image. Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)] for transforming a mono image into RGB. It must output iterables of length 3 or 4, with values between 0. and 1. Hint : you can use colormaps from `matplotlib.cm`.
[ "Infers", "the", "type", "of", "an", "image", "argument", "and", "transforms", "it", "into", "a", "URL", "." ]
python
train
Telefonica/toolium
toolium/pageelements/page_elements.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/pageelements/page_elements.py#L68-L78
def reset_object(self, driver_wrapper=None): """Reset each page element object :param driver_wrapper: driver wrapper instance """ if driver_wrapper: self.driver_wrapper = driver_wrapper for element in self._page_elements: element.reset_object(driver_wrapper) self._web_elements = [] self._page_elements = []
[ "def", "reset_object", "(", "self", ",", "driver_wrapper", "=", "None", ")", ":", "if", "driver_wrapper", ":", "self", ".", "driver_wrapper", "=", "driver_wrapper", "for", "element", "in", "self", ".", "_page_elements", ":", "element", ".", "reset_object", "(", "driver_wrapper", ")", "self", ".", "_web_elements", "=", "[", "]", "self", ".", "_page_elements", "=", "[", "]" ]
Reset each page element object :param driver_wrapper: driver wrapper instance
[ "Reset", "each", "page", "element", "object" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L2065-L2084
def destination(self): """google.cloud.bigquery.table.TableReference: table where results are written or :data:`None` if not set. The ``destination`` setter accepts: - a :class:`~google.cloud.bigquery.table.Table`, or - a :class:`~google.cloud.bigquery.table.TableReference`, or - a :class:`str` of the fully-qualified table ID in standard SQL format. The value must included a project ID, dataset ID, and table ID, each separated by ``.``. For example: ``your-project.your_dataset.your_table``. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable """ prop = self._get_sub_prop("destinationTable") if prop is not None: prop = TableReference.from_api_repr(prop) return prop
[ "def", "destination", "(", "self", ")", ":", "prop", "=", "self", ".", "_get_sub_prop", "(", "\"destinationTable\"", ")", "if", "prop", "is", "not", "None", ":", "prop", "=", "TableReference", ".", "from_api_repr", "(", "prop", ")", "return", "prop" ]
google.cloud.bigquery.table.TableReference: table where results are written or :data:`None` if not set. The ``destination`` setter accepts: - a :class:`~google.cloud.bigquery.table.Table`, or - a :class:`~google.cloud.bigquery.table.TableReference`, or - a :class:`str` of the fully-qualified table ID in standard SQL format. The value must included a project ID, dataset ID, and table ID, each separated by ``.``. For example: ``your-project.your_dataset.your_table``. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable
[ "google", ".", "cloud", ".", "bigquery", ".", "table", ".", "TableReference", ":", "table", "where", "results", "are", "written", "or", ":", "data", ":", "None", "if", "not", "set", "." ]
python
train
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L2585-L2602
def greater_than_obs_constraints(self): """get the names of the observations that are listed as greater than inequality constraints. Zero- weighted obs are skipped Returns ------- pandas.Series : obsnme of obseravtions that are non-zero weighted greater than constraints """ obs = self.observation_data gt_obs = obs.loc[obs.apply(lambda x: self._is_greater_const(x.obgnme) \ and x.weight != 0.0,axis=1),"obsnme"] return gt_obs
[ "def", "greater_than_obs_constraints", "(", "self", ")", ":", "obs", "=", "self", ".", "observation_data", "gt_obs", "=", "obs", ".", "loc", "[", "obs", ".", "apply", "(", "lambda", "x", ":", "self", ".", "_is_greater_const", "(", "x", ".", "obgnme", ")", "and", "x", ".", "weight", "!=", "0.0", ",", "axis", "=", "1", ")", ",", "\"obsnme\"", "]", "return", "gt_obs" ]
get the names of the observations that are listed as greater than inequality constraints. Zero- weighted obs are skipped Returns ------- pandas.Series : obsnme of obseravtions that are non-zero weighted greater than constraints
[ "get", "the", "names", "of", "the", "observations", "that", "are", "listed", "as", "greater", "than", "inequality", "constraints", ".", "Zero", "-", "weighted", "obs", "are", "skipped" ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_nic.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_nic.py#L291-L324
def update_properties(self, properties): """ Update writeable properties of this NIC. Authorization requirements: * Object-access permission to the Partition containing this NIC. * Object-access permission to the backing Adapter for this NIC. * Task permission to the "Partition Details" task. Parameters: properties (dict): New values for the properties to be updated. Properties not to be updated are omitted. Allowable properties are the properties with qualifier (w) in section 'Data model - NIC Element Object' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ self.manager.session.post(self.uri, body=properties) is_rename = self.manager._name_prop in properties if is_rename: # Delete the old name from the cache self.manager._name_uri_cache.delete(self.name) self.properties.update(copy.deepcopy(properties)) if is_rename: # Add the new name to the cache self.manager._name_uri_cache.update(self.name, self.uri)
[ "def", "update_properties", "(", "self", ",", "properties", ")", ":", "self", ".", "manager", ".", "session", ".", "post", "(", "self", ".", "uri", ",", "body", "=", "properties", ")", "is_rename", "=", "self", ".", "manager", ".", "_name_prop", "in", "properties", "if", "is_rename", ":", "# Delete the old name from the cache", "self", ".", "manager", ".", "_name_uri_cache", ".", "delete", "(", "self", ".", "name", ")", "self", ".", "properties", ".", "update", "(", "copy", ".", "deepcopy", "(", "properties", ")", ")", "if", "is_rename", ":", "# Add the new name to the cache", "self", ".", "manager", ".", "_name_uri_cache", ".", "update", "(", "self", ".", "name", ",", "self", ".", "uri", ")" ]
Update writeable properties of this NIC. Authorization requirements: * Object-access permission to the Partition containing this NIC. * Object-access permission to the backing Adapter for this NIC. * Task permission to the "Partition Details" task. Parameters: properties (dict): New values for the properties to be updated. Properties not to be updated are omitted. Allowable properties are the properties with qualifier (w) in section 'Data model - NIC Element Object' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
[ "Update", "writeable", "properties", "of", "this", "NIC", "." ]
python
train
pymc-devs/pymc
pymc/diagnostics.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L497-L552
def effective_n(x): """ Returns estimate of the effective sample size of a set of traces. Parameters ---------- x : array-like An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic. Returns ------- n_eff : float Return the effective sample size, :math:`\hat{n}_{eff}` Notes ----- The diagnostic is computed by: .. math:: \hat{n}_{eff} = \frac{mn}}{1 + 2 \sum_{t=1}^T \hat{\rho}_t} where :math:`\hat{\rho}_t` is the estimated autocorrelation at lag t, and T is the first odd positive integer for which the sum :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}` is negative. References ---------- Gelman et al. (2014)""" if np.shape(x) < (2,): raise ValueError( 'Calculation of effective sample size requires multiple chains of the same length.') try: m, n = np.shape(x) except ValueError: return [effective_n(np.transpose(y)) for y in np.transpose(x)] s2 = gelman_rubin(x, return_var=True) negative_autocorr = False t = 1 variogram = lambda t: (sum(sum((x[j][i] - x[j][i-t])**2 for i in range(t,n)) for j in range(m)) / (m*(n - t))) rho = np.ones(n) # Iterate until the sum of consecutive estimates of autocorrelation is negative while not negative_autocorr and (t < n): rho[t] = 1. - variogram(t)/(2.*s2) if not t % 2: negative_autocorr = sum(rho[t-1:t+1]) < 0 t += 1 return int(m*n / (1 + 2*rho[1:t].sum()))
[ "def", "effective_n", "(", "x", ")", ":", "if", "np", ".", "shape", "(", "x", ")", "<", "(", "2", ",", ")", ":", "raise", "ValueError", "(", "'Calculation of effective sample size requires multiple chains of the same length.'", ")", "try", ":", "m", ",", "n", "=", "np", ".", "shape", "(", "x", ")", "except", "ValueError", ":", "return", "[", "effective_n", "(", "np", ".", "transpose", "(", "y", ")", ")", "for", "y", "in", "np", ".", "transpose", "(", "x", ")", "]", "s2", "=", "gelman_rubin", "(", "x", ",", "return_var", "=", "True", ")", "negative_autocorr", "=", "False", "t", "=", "1", "variogram", "=", "lambda", "t", ":", "(", "sum", "(", "sum", "(", "(", "x", "[", "j", "]", "[", "i", "]", "-", "x", "[", "j", "]", "[", "i", "-", "t", "]", ")", "**", "2", "for", "i", "in", "range", "(", "t", ",", "n", ")", ")", "for", "j", "in", "range", "(", "m", ")", ")", "/", "(", "m", "*", "(", "n", "-", "t", ")", ")", ")", "rho", "=", "np", ".", "ones", "(", "n", ")", "# Iterate until the sum of consecutive estimates of autocorrelation is negative", "while", "not", "negative_autocorr", "and", "(", "t", "<", "n", ")", ":", "rho", "[", "t", "]", "=", "1.", "-", "variogram", "(", "t", ")", "/", "(", "2.", "*", "s2", ")", "if", "not", "t", "%", "2", ":", "negative_autocorr", "=", "sum", "(", "rho", "[", "t", "-", "1", ":", "t", "+", "1", "]", ")", "<", "0", "t", "+=", "1", "return", "int", "(", "m", "*", "n", "/", "(", "1", "+", "2", "*", "rho", "[", "1", ":", "t", "]", ".", "sum", "(", ")", ")", ")" ]
Returns estimate of the effective sample size of a set of traces. Parameters ---------- x : array-like An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic. Returns ------- n_eff : float Return the effective sample size, :math:`\hat{n}_{eff}` Notes ----- The diagnostic is computed by: .. math:: \hat{n}_{eff} = \frac{mn}}{1 + 2 \sum_{t=1}^T \hat{\rho}_t} where :math:`\hat{\rho}_t` is the estimated autocorrelation at lag t, and T is the first odd positive integer for which the sum :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}` is negative. References ---------- Gelman et al. (2014)
[ "Returns", "estimate", "of", "the", "effective", "sample", "size", "of", "a", "set", "of", "traces", "." ]
python
train
opendatateam/udata
udata/core/dataset/models.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L149-L185
def guess_one(cls, text): ''' Try to guess license from a string. Try to exact match on identifier then slugified title and fallback on edit distance ranking (after slugification) ''' if not text: return qs = cls.objects text = text.strip().lower() # Stored identifiers are lower case slug = cls.slug.slugify(text) # Use slug as it normalize string license = qs( db.Q(id=text) | db.Q(slug=slug) | db.Q(url=text) | db.Q(alternate_urls=text) ).first() if license is None: # Try to single match with a low Damerau-Levenshtein distance computed = ((l, rdlevenshtein(l.slug, slug)) for l in cls.objects) candidates = [l for l, d in computed if d <= MAX_DISTANCE] # If there is more that one match, we cannot determinate # which one is closer to safely choose between candidates if len(candidates) == 1: license = candidates[0] if license is None: # Try to single match with a low Damerau-Levenshtein distance computed = ( (l, rdlevenshtein(cls.slug.slugify(t), slug)) for l in cls.objects for t in l.alternate_titles ) candidates = [l for l, d in computed if d <= MAX_DISTANCE] # If there is more that one match, we cannot determinate # which one is closer to safely choose between candidates if len(candidates) == 1: license = candidates[0] return license
[ "def", "guess_one", "(", "cls", ",", "text", ")", ":", "if", "not", "text", ":", "return", "qs", "=", "cls", ".", "objects", "text", "=", "text", ".", "strip", "(", ")", ".", "lower", "(", ")", "# Stored identifiers are lower case", "slug", "=", "cls", ".", "slug", ".", "slugify", "(", "text", ")", "# Use slug as it normalize string", "license", "=", "qs", "(", "db", ".", "Q", "(", "id", "=", "text", ")", "|", "db", ".", "Q", "(", "slug", "=", "slug", ")", "|", "db", ".", "Q", "(", "url", "=", "text", ")", "|", "db", ".", "Q", "(", "alternate_urls", "=", "text", ")", ")", ".", "first", "(", ")", "if", "license", "is", "None", ":", "# Try to single match with a low Damerau-Levenshtein distance", "computed", "=", "(", "(", "l", ",", "rdlevenshtein", "(", "l", ".", "slug", ",", "slug", ")", ")", "for", "l", "in", "cls", ".", "objects", ")", "candidates", "=", "[", "l", "for", "l", ",", "d", "in", "computed", "if", "d", "<=", "MAX_DISTANCE", "]", "# If there is more that one match, we cannot determinate", "# which one is closer to safely choose between candidates", "if", "len", "(", "candidates", ")", "==", "1", ":", "license", "=", "candidates", "[", "0", "]", "if", "license", "is", "None", ":", "# Try to single match with a low Damerau-Levenshtein distance", "computed", "=", "(", "(", "l", ",", "rdlevenshtein", "(", "cls", ".", "slug", ".", "slugify", "(", "t", ")", ",", "slug", ")", ")", "for", "l", "in", "cls", ".", "objects", "for", "t", "in", "l", ".", "alternate_titles", ")", "candidates", "=", "[", "l", "for", "l", ",", "d", "in", "computed", "if", "d", "<=", "MAX_DISTANCE", "]", "# If there is more that one match, we cannot determinate", "# which one is closer to safely choose between candidates", "if", "len", "(", "candidates", ")", "==", "1", ":", "license", "=", "candidates", "[", "0", "]", "return", "license" ]
Try to guess license from a string. Try to exact match on identifier then slugified title and fallback on edit distance ranking (after slugification)
[ "Try", "to", "guess", "license", "from", "a", "string", "." ]
python
train
NuGrid/NuGridPy
nugridpy/mesa.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1446-L1536
def kippenhahn_CO(self, num_frame, xax, t0_model=0, title='Kippenhahn diagram', tp_agb=0., ylim_CO=[0,0]): """ Kippenhahn plot as a function of time or model with CO ratio Parameters ---------- num_frame : integer Number of frame to plot this plot into. xax : string Either model or time to indicate what is to be used on the x-axis. t0_model : integer, optional Model for the zero point in time, for AGB plots this would be usually the model of the 1st TP, which can be found with the Kippenhahn plot. The default is 0. title : string, optional Figure title. The defalut is "Kippenhahn diagram". tp_agb : float, optional If >= 0 then, ylim=[h1_min*1.-tp_agb/100 : h1_max*1.+tp_agb/100] with h1_min, h1_max the min and max H-free core mass coordinate. The defalut is 0. ylim_CO : list if ylim_CO is [0,0], then it is automaticly set. The default is [0,0]. """ pyl.figure(num_frame) if xax == 'time': xaxisarray = self.get('star_age') elif xax == 'model': xaxisarray = self.get('model_number') else: print('kippenhahn_error: invalid string for x-axis selction.'+\ ' needs to be "time" or "model"') t0_mod=xaxisarray[t0_model] plot_bounds=True try: h1_boundary_mass = self.get('h1_boundary_mass') he4_boundary_mass = self.get('he4_boundary_mass') except: try: h1_boundary_mass = self.get('he_core_mass') he4_boundary_mass = self.get('c_core_mass') except: plot_bounds=False star_mass = self.get('star_mass') mx1_bot = self.get('mx1_bot')*star_mass mx1_top = self.get('mx1_top')*star_mass mx2_bot = self.get('mx2_bot')*star_mass mx2_top = self.get('mx2_top')*star_mass surface_c12 = self.get('surface_c12') surface_o16 = self.get('surface_o16') COratio=old_div((surface_c12*4.),(surface_o16*3.)) pyl.plot(xaxisarray[t0_model:]-t0_mod,COratio[t0_model:],'-k',label='CO ratio') pyl.ylabel('C/O ratio') pyl.legend(loc=4) if ylim_CO[0] is not 0 and ylim_CO[1] is not 0: pyl.ylim(ylim_CO) if xax == 'time': pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number') pyl.twinx() if plot_bounds: pyl.plot(xaxisarray[t0_model:]-t0_mod,h1_boundary_mass[t0_model:],label='h1_boundary_mass') pyl.plot(xaxisarray[t0_model:]-t0_mod,he4_boundary_mass[t0_model:],label='he4_boundary_mass') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx1_bot[t0_model:],',r',label='conv bound') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx1_top[t0_model:],',r') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx2_bot[t0_model:],',r') pyl.plot(xaxisarray[t0_model:]-t0_mod,mx2_top[t0_model:],',r') pyl.plot(xaxisarray[t0_model:]-t0_mod,star_mass[t0_model:],label='star_mass') pyl.ylabel('mass coordinate') pyl.legend(loc=2) if tp_agb > 0.: h1_min = min(h1_boundary_mass[t0_model:]) h1_max = max(h1_boundary_mass[t0_model:]) h1_min = h1_min*(1.-old_div(tp_agb,100.)) h1_max = h1_max*(1.+old_div(tp_agb,100.)) print('setting ylim to zoom in on H-burning:',h1_min,h1_max) pyl.ylim(h1_min,h1_max)
[ "def", "kippenhahn_CO", "(", "self", ",", "num_frame", ",", "xax", ",", "t0_model", "=", "0", ",", "title", "=", "'Kippenhahn diagram'", ",", "tp_agb", "=", "0.", ",", "ylim_CO", "=", "[", "0", ",", "0", "]", ")", ":", "pyl", ".", "figure", "(", "num_frame", ")", "if", "xax", "==", "'time'", ":", "xaxisarray", "=", "self", ".", "get", "(", "'star_age'", ")", "elif", "xax", "==", "'model'", ":", "xaxisarray", "=", "self", ".", "get", "(", "'model_number'", ")", "else", ":", "print", "(", "'kippenhahn_error: invalid string for x-axis selction.'", "+", "' needs to be \"time\" or \"model\"'", ")", "t0_mod", "=", "xaxisarray", "[", "t0_model", "]", "plot_bounds", "=", "True", "try", ":", "h1_boundary_mass", "=", "self", ".", "get", "(", "'h1_boundary_mass'", ")", "he4_boundary_mass", "=", "self", ".", "get", "(", "'he4_boundary_mass'", ")", "except", ":", "try", ":", "h1_boundary_mass", "=", "self", ".", "get", "(", "'he_core_mass'", ")", "he4_boundary_mass", "=", "self", ".", "get", "(", "'c_core_mass'", ")", "except", ":", "plot_bounds", "=", "False", "star_mass", "=", "self", ".", "get", "(", "'star_mass'", ")", "mx1_bot", "=", "self", ".", "get", "(", "'mx1_bot'", ")", "*", "star_mass", "mx1_top", "=", "self", ".", "get", "(", "'mx1_top'", ")", "*", "star_mass", "mx2_bot", "=", "self", ".", "get", "(", "'mx2_bot'", ")", "*", "star_mass", "mx2_top", "=", "self", ".", "get", "(", "'mx2_top'", ")", "*", "star_mass", "surface_c12", "=", "self", ".", "get", "(", "'surface_c12'", ")", "surface_o16", "=", "self", ".", "get", "(", "'surface_o16'", ")", "COratio", "=", "old_div", "(", "(", "surface_c12", "*", "4.", ")", ",", "(", "surface_o16", "*", "3.", ")", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "COratio", "[", "t0_model", ":", "]", ",", "'-k'", ",", "label", "=", "'CO ratio'", ")", "pyl", ".", "ylabel", "(", "'C/O ratio'", ")", "pyl", ".", "legend", "(", "loc", "=", "4", ")", "if", "ylim_CO", "[", "0", "]", "is", "not", "0", "and", "ylim_CO", "[", "1", "]", "is", "not", "0", ":", "pyl", ".", "ylim", "(", "ylim_CO", ")", "if", "xax", "==", "'time'", ":", "pyl", ".", "xlabel", "(", "'t / yrs'", ")", "elif", "xax", "==", "'model'", ":", "pyl", ".", "xlabel", "(", "'model number'", ")", "pyl", ".", "twinx", "(", ")", "if", "plot_bounds", ":", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "h1_boundary_mass", "[", "t0_model", ":", "]", ",", "label", "=", "'h1_boundary_mass'", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "he4_boundary_mass", "[", "t0_model", ":", "]", ",", "label", "=", "'he4_boundary_mass'", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "mx1_bot", "[", "t0_model", ":", "]", ",", "',r'", ",", "label", "=", "'conv bound'", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "mx1_top", "[", "t0_model", ":", "]", ",", "',r'", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "mx2_bot", "[", "t0_model", ":", "]", ",", "',r'", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "mx2_top", "[", "t0_model", ":", "]", ",", "',r'", ")", "pyl", ".", "plot", "(", "xaxisarray", "[", "t0_model", ":", "]", "-", "t0_mod", ",", "star_mass", "[", "t0_model", ":", "]", ",", "label", "=", "'star_mass'", ")", "pyl", ".", "ylabel", "(", "'mass coordinate'", ")", "pyl", ".", "legend", "(", "loc", "=", "2", ")", "if", "tp_agb", ">", "0.", ":", "h1_min", "=", "min", "(", "h1_boundary_mass", "[", "t0_model", ":", "]", ")", "h1_max", "=", "max", "(", "h1_boundary_mass", "[", "t0_model", ":", "]", ")", "h1_min", "=", "h1_min", "*", "(", "1.", "-", "old_div", "(", "tp_agb", ",", "100.", ")", ")", "h1_max", "=", "h1_max", "*", "(", "1.", "+", "old_div", "(", "tp_agb", ",", "100.", ")", ")", "print", "(", "'setting ylim to zoom in on H-burning:'", ",", "h1_min", ",", "h1_max", ")", "pyl", ".", "ylim", "(", "h1_min", ",", "h1_max", ")" ]
Kippenhahn plot as a function of time or model with CO ratio Parameters ---------- num_frame : integer Number of frame to plot this plot into. xax : string Either model or time to indicate what is to be used on the x-axis. t0_model : integer, optional Model for the zero point in time, for AGB plots this would be usually the model of the 1st TP, which can be found with the Kippenhahn plot. The default is 0. title : string, optional Figure title. The defalut is "Kippenhahn diagram". tp_agb : float, optional If >= 0 then, ylim=[h1_min*1.-tp_agb/100 : h1_max*1.+tp_agb/100] with h1_min, h1_max the min and max H-free core mass coordinate. The defalut is 0. ylim_CO : list if ylim_CO is [0,0], then it is automaticly set. The default is [0,0].
[ "Kippenhahn", "plot", "as", "a", "function", "of", "time", "or", "model", "with", "CO", "ratio" ]
python
train
pandas-dev/pandas
pandas/core/arrays/categorical.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2031-L2037
def _maybe_coerce_indexer(self, indexer): """ return an indexer coerced to the codes dtype """ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i': indexer = indexer.astype(self._codes.dtype) return indexer
[ "def", "_maybe_coerce_indexer", "(", "self", ",", "indexer", ")", ":", "if", "isinstance", "(", "indexer", ",", "np", ".", "ndarray", ")", "and", "indexer", ".", "dtype", ".", "kind", "==", "'i'", ":", "indexer", "=", "indexer", ".", "astype", "(", "self", ".", "_codes", ".", "dtype", ")", "return", "indexer" ]
return an indexer coerced to the codes dtype
[ "return", "an", "indexer", "coerced", "to", "the", "codes", "dtype" ]
python
train
senaite/senaite.core
bika/lims/browser/analyses/view.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L259-L293
def is_analysis_edition_allowed(self, analysis_brain): """Returns if the analysis passed in can be edited by the current user :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the analysis, otherwise False """ if not self.context_active: # The current context must be active. We cannot edit analyses from # inside a deactivated Analysis Request, for instance return False analysis_obj = api.get_object(analysis_brain) if analysis_obj.getPointOfCapture() == 'field': # This analysis must be captured on field, during sampling. if not self.has_permission(EditFieldResults, analysis_obj): # Current user cannot edit field analyses. return False elif not self.has_permission(EditResults, analysis_obj): # The Point of Capture is 'lab' and the current user cannot edit # lab analyses. return False # Check if the user is allowed to enter a value to to Result field if not self.has_permission(FieldEditAnalysisResult, analysis_obj): return False # Is the instrument out of date? # The user can assign a result to the analysis if it does not have any # instrument assigned or the instrument assigned is valid. if not self.is_analysis_instrument_valid(analysis_brain): # return if it is allowed to enter a manual result return analysis_obj.getManualEntryOfResults() return True
[ "def", "is_analysis_edition_allowed", "(", "self", ",", "analysis_brain", ")", ":", "if", "not", "self", ".", "context_active", ":", "# The current context must be active. We cannot edit analyses from", "# inside a deactivated Analysis Request, for instance", "return", "False", "analysis_obj", "=", "api", ".", "get_object", "(", "analysis_brain", ")", "if", "analysis_obj", ".", "getPointOfCapture", "(", ")", "==", "'field'", ":", "# This analysis must be captured on field, during sampling.", "if", "not", "self", ".", "has_permission", "(", "EditFieldResults", ",", "analysis_obj", ")", ":", "# Current user cannot edit field analyses.", "return", "False", "elif", "not", "self", ".", "has_permission", "(", "EditResults", ",", "analysis_obj", ")", ":", "# The Point of Capture is 'lab' and the current user cannot edit", "# lab analyses.", "return", "False", "# Check if the user is allowed to enter a value to to Result field", "if", "not", "self", ".", "has_permission", "(", "FieldEditAnalysisResult", ",", "analysis_obj", ")", ":", "return", "False", "# Is the instrument out of date?", "# The user can assign a result to the analysis if it does not have any", "# instrument assigned or the instrument assigned is valid.", "if", "not", "self", ".", "is_analysis_instrument_valid", "(", "analysis_brain", ")", ":", "# return if it is allowed to enter a manual result", "return", "analysis_obj", ".", "getManualEntryOfResults", "(", ")", "return", "True" ]
Returns if the analysis passed in can be edited by the current user :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the analysis, otherwise False
[ "Returns", "if", "the", "analysis", "passed", "in", "can", "be", "edited", "by", "the", "current", "user" ]
python
train
dddomodossola/remi
examples/examples_from_contributors/Display_TreeTable.py
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/examples/examples_from_contributors/Display_TreeTable.py#L51-L120
def Display_TreeTable(self, table): ''' Display a table in which the values in first column form one or more trees. The table has row with fields that are strings of identifiers/names. First convert each row into a row_widget and item_widgets that are displayed in a TableTree. Each input row shall start with a parent field (field[0]) that determines the tree hierarchy but that is not displayed on that row. The parent widget becomes an attribute of the first child widget. Field[1] is the row color, field[2:] contains the row values. Top child(s) shall have a parent field value that is blank (''). The input table rows shall be in the correct sequence. ''' parent_names = [] hierarchy = {} indent_level = 0 widget_dict = {} # key, value = name, widget for row in table: parent_name = row[0] row_color = row[1] child_name = row[2] row_widget = gui.TableRow(style={'background-color': row_color}) # Determine whether hierarchy of sub_sub concepts shall be open or not openness = 'true' row_widget.attributes['treeopen'] = openness # widget_dict[child_name] = row_widget for index, field in enumerate(row[2:]): # Determine field color field_color = '#ffff99' row_item = gui.TableItem(text=field, style={'text-align': 'left', 'background-color': field_color}) row_widget.append(row_item, field) if index == 0: row_item.parent = parent_name child_id = row_item # The root of each tree has a parent that is blank (''). # Each row with childs has as attribute openness, which by default is 'true'. # The fields can be given other attributes, such as color. # Verify whether the parent_name (child.parent) # is present or is in the list of parent_names. print('parent-child:', parent_name, child_name) if parent_name == '': hierarchy[child_name] = 0 parent_names.append(child_name) target_level = 0 elif parent_name in parent_names: hierarchy[child_name] = hierarchy[parent_name] + 1 target_level = hierarchy[child_name] else: # Parent not in parent_names print('Error: Parent name "{}" does not appear in network' .format(parent_name)) return print('indent, target-pre:', indent_level, target_level, parent_name, child_name) # Indentation if target_level > indent_level: self.tree_table.begin_fold() indent_level += 1 if target_level < indent_level: while target_level < indent_level: indent_level += -1 self.tree_table.end_fold() print('indent, target-post:', indent_level, target_level, parent_name, child_name) if child_name not in parent_names: parent_names.append(child_name) self.tree_table.append(row_widget, child_name)
[ "def", "Display_TreeTable", "(", "self", ",", "table", ")", ":", "parent_names", "=", "[", "]", "hierarchy", "=", "{", "}", "indent_level", "=", "0", "widget_dict", "=", "{", "}", "# key, value = name, widget", "for", "row", "in", "table", ":", "parent_name", "=", "row", "[", "0", "]", "row_color", "=", "row", "[", "1", "]", "child_name", "=", "row", "[", "2", "]", "row_widget", "=", "gui", ".", "TableRow", "(", "style", "=", "{", "'background-color'", ":", "row_color", "}", ")", "# Determine whether hierarchy of sub_sub concepts shall be open or not", "openness", "=", "'true'", "row_widget", ".", "attributes", "[", "'treeopen'", "]", "=", "openness", "# widget_dict[child_name] = row_widget", "for", "index", ",", "field", "in", "enumerate", "(", "row", "[", "2", ":", "]", ")", ":", "# Determine field color", "field_color", "=", "'#ffff99'", "row_item", "=", "gui", ".", "TableItem", "(", "text", "=", "field", ",", "style", "=", "{", "'text-align'", ":", "'left'", ",", "'background-color'", ":", "field_color", "}", ")", "row_widget", ".", "append", "(", "row_item", ",", "field", ")", "if", "index", "==", "0", ":", "row_item", ".", "parent", "=", "parent_name", "child_id", "=", "row_item", "# The root of each tree has a parent that is blank ('').", "# Each row with childs has as attribute openness, which by default is 'true'.", "# The fields can be given other attributes, such as color.", "# Verify whether the parent_name (child.parent)", "# is present or is in the list of parent_names.", "print", "(", "'parent-child:'", ",", "parent_name", ",", "child_name", ")", "if", "parent_name", "==", "''", ":", "hierarchy", "[", "child_name", "]", "=", "0", "parent_names", ".", "append", "(", "child_name", ")", "target_level", "=", "0", "elif", "parent_name", "in", "parent_names", ":", "hierarchy", "[", "child_name", "]", "=", "hierarchy", "[", "parent_name", "]", "+", "1", "target_level", "=", "hierarchy", "[", "child_name", "]", "else", ":", "# Parent not in parent_names", "print", "(", "'Error: Parent name \"{}\" does not appear in network'", ".", "format", "(", "parent_name", ")", ")", "return", "print", "(", "'indent, target-pre:'", ",", "indent_level", ",", "target_level", ",", "parent_name", ",", "child_name", ")", "# Indentation", "if", "target_level", ">", "indent_level", ":", "self", ".", "tree_table", ".", "begin_fold", "(", ")", "indent_level", "+=", "1", "if", "target_level", "<", "indent_level", ":", "while", "target_level", "<", "indent_level", ":", "indent_level", "+=", "-", "1", "self", ".", "tree_table", ".", "end_fold", "(", ")", "print", "(", "'indent, target-post:'", ",", "indent_level", ",", "target_level", ",", "parent_name", ",", "child_name", ")", "if", "child_name", "not", "in", "parent_names", ":", "parent_names", ".", "append", "(", "child_name", ")", "self", ".", "tree_table", ".", "append", "(", "row_widget", ",", "child_name", ")" ]
Display a table in which the values in first column form one or more trees. The table has row with fields that are strings of identifiers/names. First convert each row into a row_widget and item_widgets that are displayed in a TableTree. Each input row shall start with a parent field (field[0]) that determines the tree hierarchy but that is not displayed on that row. The parent widget becomes an attribute of the first child widget. Field[1] is the row color, field[2:] contains the row values. Top child(s) shall have a parent field value that is blank (''). The input table rows shall be in the correct sequence.
[ "Display", "a", "table", "in", "which", "the", "values", "in", "first", "column", "form", "one", "or", "more", "trees", ".", "The", "table", "has", "row", "with", "fields", "that", "are", "strings", "of", "identifiers", "/", "names", ".", "First", "convert", "each", "row", "into", "a", "row_widget", "and", "item_widgets", "that", "are", "displayed", "in", "a", "TableTree", ".", "Each", "input", "row", "shall", "start", "with", "a", "parent", "field", "(", "field", "[", "0", "]", ")", "that", "determines", "the", "tree", "hierarchy", "but", "that", "is", "not", "displayed", "on", "that", "row", ".", "The", "parent", "widget", "becomes", "an", "attribute", "of", "the", "first", "child", "widget", ".", "Field", "[", "1", "]", "is", "the", "row", "color", "field", "[", "2", ":", "]", "contains", "the", "row", "values", ".", "Top", "child", "(", "s", ")", "shall", "have", "a", "parent", "field", "value", "that", "is", "blank", "(", ")", ".", "The", "input", "table", "rows", "shall", "be", "in", "the", "correct", "sequence", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/dataframes.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/dataframes.py#L36-L55
def create_dataframe(ensemble): """ Create a data frame from given nested lists of ensemble data :param list ensemble: Ensemble data :return obj: Dataframe """ logger_dataframes.info("enter ens_to_df") # "Flatten" the nested lists. Bring all nested lists up to top-level. Output looks like [ [1,2], [1,2], ... ] ll = unwrap_arrays(ensemble) # Check that list lengths are all equal valid = match_arr_lengths(ll) if valid: # Lists are equal lengths, create the dataframe df = pd.DataFrame(ll) else: # Lists are unequal. Print error and return nothing. df = "empty" print("Error: Numpy Array lengths do not match. Cannot create data frame") logger_dataframes.info("exit ens_to_df") return df
[ "def", "create_dataframe", "(", "ensemble", ")", ":", "logger_dataframes", ".", "info", "(", "\"enter ens_to_df\"", ")", "# \"Flatten\" the nested lists. Bring all nested lists up to top-level. Output looks like [ [1,2], [1,2], ... ]", "ll", "=", "unwrap_arrays", "(", "ensemble", ")", "# Check that list lengths are all equal", "valid", "=", "match_arr_lengths", "(", "ll", ")", "if", "valid", ":", "# Lists are equal lengths, create the dataframe", "df", "=", "pd", ".", "DataFrame", "(", "ll", ")", "else", ":", "# Lists are unequal. Print error and return nothing.", "df", "=", "\"empty\"", "print", "(", "\"Error: Numpy Array lengths do not match. Cannot create data frame\"", ")", "logger_dataframes", ".", "info", "(", "\"exit ens_to_df\"", ")", "return", "df" ]
Create a data frame from given nested lists of ensemble data :param list ensemble: Ensemble data :return obj: Dataframe
[ "Create", "a", "data", "frame", "from", "given", "nested", "lists", "of", "ensemble", "data", ":", "param", "list", "ensemble", ":", "Ensemble", "data", ":", "return", "obj", ":", "Dataframe" ]
python
train
zomux/deepy
examples/attention_models/first_glimpse_model.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/examples/attention_models/first_glimpse_model.py#L38-L54
def _first_glimpse_sensor(self, x_t): """ Compute first glimpse position using down-sampled image. """ downsampled_img = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4)) downsampled_img = downsampled_img.flatten() first_l = T.dot(downsampled_img, self.W_f) if self.disable_reinforce: wf_grad = self.W_f if self.random_glimpse: first_l = self.srng.uniform((2,), low=-1.7, high=1.7) else: sampled_l_t = self._sample_gaussian(first_l, self.cov) sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), first_l) wf_grad = T.grad(T.log(sampled_pdf), self.W_f) first_l = sampled_l_t return first_l, wf_grad
[ "def", "_first_glimpse_sensor", "(", "self", ",", "x_t", ")", ":", "downsampled_img", "=", "theano", ".", "tensor", ".", "signal", ".", "downsample", ".", "max_pool_2d", "(", "x_t", ",", "(", "4", ",", "4", ")", ")", "downsampled_img", "=", "downsampled_img", ".", "flatten", "(", ")", "first_l", "=", "T", ".", "dot", "(", "downsampled_img", ",", "self", ".", "W_f", ")", "if", "self", ".", "disable_reinforce", ":", "wf_grad", "=", "self", ".", "W_f", "if", "self", ".", "random_glimpse", ":", "first_l", "=", "self", ".", "srng", ".", "uniform", "(", "(", "2", ",", ")", ",", "low", "=", "-", "1.7", ",", "high", "=", "1.7", ")", "else", ":", "sampled_l_t", "=", "self", ".", "_sample_gaussian", "(", "first_l", ",", "self", ".", "cov", ")", "sampled_pdf", "=", "self", ".", "_multi_gaussian_pdf", "(", "disconnected_grad", "(", "sampled_l_t", ")", ",", "first_l", ")", "wf_grad", "=", "T", ".", "grad", "(", "T", ".", "log", "(", "sampled_pdf", ")", ",", "self", ".", "W_f", ")", "first_l", "=", "sampled_l_t", "return", "first_l", ",", "wf_grad" ]
Compute first glimpse position using down-sampled image.
[ "Compute", "first", "glimpse", "position", "using", "down", "-", "sampled", "image", "." ]
python
test
MrYsLab/PyMata
examples/i2c/pymata_i2c_write/bicolor_display_controller.py
https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/examples/i2c/pymata_i2c_write/bicolor_display_controller.py#L96-L104
def set_blink_rate(self, b): """ Set the user's desired blink rate (0 - 3) @param b: blink rate """ if b > 3: b = 0 # turn off if not sure self.firmata.i2c_write(self.board_address, (self.HT16K33_BLINK_CMD | self.HT16K33_BLINK_DISPLAYON | (b << 1)))
[ "def", "set_blink_rate", "(", "self", ",", "b", ")", ":", "if", "b", ">", "3", ":", "b", "=", "0", "# turn off if not sure", "self", ".", "firmata", ".", "i2c_write", "(", "self", ".", "board_address", ",", "(", "self", ".", "HT16K33_BLINK_CMD", "|", "self", ".", "HT16K33_BLINK_DISPLAYON", "|", "(", "b", "<<", "1", ")", ")", ")" ]
Set the user's desired blink rate (0 - 3) @param b: blink rate
[ "Set", "the", "user", "s", "desired", "blink", "rate", "(", "0", "-", "3", ")" ]
python
valid
apache/airflow
airflow/contrib/hooks/gcp_mlengine_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L60-L121
def create_job(self, project_id, job, use_existing_job_fn=None): """ Launches a MLEngine job and wait for it to reach a terminal state. :param project_id: The Google Cloud project id within which MLEngine job will be launched. :type project_id: str :param job: MLEngine Job object that should be provided to the MLEngine API, such as: :: { 'jobId': 'my_job_id', 'trainingInput': { 'scaleTier': 'STANDARD_1', ... } } :type job: dict :param use_existing_job_fn: In case that a MLEngine job with the same job_id already exist, this method (if provided) will decide whether we should use this existing job, continue waiting for it to finish and returning the job object. It should accepts a MLEngine job object, and returns a boolean value indicating whether it is OK to reuse the existing job. If 'use_existing_job_fn' is not provided, we by default reuse the existing MLEngine job. :type use_existing_job_fn: function :return: The MLEngine job object if the job successfully reach a terminal state (which might be FAILED or CANCELLED state). :rtype: dict """ request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: # 409 means there is an existing job with the same job ID. if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
[ "def", "create_job", "(", "self", ",", "project_id", ",", "job", ",", "use_existing_job_fn", "=", "None", ")", ":", "request", "=", "self", ".", "_mlengine", ".", "projects", "(", ")", ".", "jobs", "(", ")", ".", "create", "(", "parent", "=", "'projects/{}'", ".", "format", "(", "project_id", ")", ",", "body", "=", "job", ")", "job_id", "=", "job", "[", "'jobId'", "]", "try", ":", "request", ".", "execute", "(", ")", "except", "HttpError", "as", "e", ":", "# 409 means there is an existing job with the same job ID.", "if", "e", ".", "resp", ".", "status", "==", "409", ":", "if", "use_existing_job_fn", "is", "not", "None", ":", "existing_job", "=", "self", ".", "_get_job", "(", "project_id", ",", "job_id", ")", "if", "not", "use_existing_job_fn", "(", "existing_job", ")", ":", "self", ".", "log", ".", "error", "(", "'Job with job_id %s already exist, but it does '", "'not match our expectation: %s'", ",", "job_id", ",", "existing_job", ")", "raise", "self", ".", "log", ".", "info", "(", "'Job with job_id %s already exist. Will waiting for it to finish'", ",", "job_id", ")", "else", ":", "self", ".", "log", ".", "error", "(", "'Failed to create MLEngine job: {}'", ".", "format", "(", "e", ")", ")", "raise", "return", "self", ".", "_wait_for_job_done", "(", "project_id", ",", "job_id", ")" ]
Launches a MLEngine job and wait for it to reach a terminal state. :param project_id: The Google Cloud project id within which MLEngine job will be launched. :type project_id: str :param job: MLEngine Job object that should be provided to the MLEngine API, such as: :: { 'jobId': 'my_job_id', 'trainingInput': { 'scaleTier': 'STANDARD_1', ... } } :type job: dict :param use_existing_job_fn: In case that a MLEngine job with the same job_id already exist, this method (if provided) will decide whether we should use this existing job, continue waiting for it to finish and returning the job object. It should accepts a MLEngine job object, and returns a boolean value indicating whether it is OK to reuse the existing job. If 'use_existing_job_fn' is not provided, we by default reuse the existing MLEngine job. :type use_existing_job_fn: function :return: The MLEngine job object if the job successfully reach a terminal state (which might be FAILED or CANCELLED state). :rtype: dict
[ "Launches", "a", "MLEngine", "job", "and", "wait", "for", "it", "to", "reach", "a", "terminal", "state", "." ]
python
test
waqasbhatti/astrobase
astrobase/varclass/starfeatures.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/starfeatures.py#L102-L174
def coord_features(objectinfo): '''Calculates object coordinates features, including: - galactic coordinates - total proper motion from pmra, pmdecl - reduced J proper motion from propermotion and Jmag Parameters ---------- objectinfo : dict This is an objectinfo dict from a light curve file read into an `lcdict`. The format and the minimum keys required are:: {'ra': the right ascension of the object in decimal degrees, 'decl': the declination of the object in decimal degrees, 'pmra': the proper motion in right ascension in mas/yr, 'pmdecl': the proper motion in declination in mas/yr, 'jmag': the 2MASS J mag of this object} Returns ------- dict A dict containing the total proper motion ''' retdict = {'propermotion': np.nan, 'gl':np.nan, 'gb':np.nan, 'rpmj':np.nan} if ('ra' in objectinfo and objectinfo['ra'] is not None and np.isfinite(objectinfo['ra']) and 'decl' in objectinfo and objectinfo['decl'] is not None and np.isfinite(objectinfo['decl'])): retdict['gl'], retdict['gb'] = coordutils.equatorial_to_galactic( objectinfo['ra'], objectinfo['decl'] ) if ('pmra' in objectinfo and objectinfo['pmra'] is not None and np.isfinite(objectinfo['pmra']) and 'pmdecl' in objectinfo and objectinfo['pmdecl'] is not None and np.isfinite(objectinfo['pmdecl']) and 'decl' in objectinfo and objectinfo['decl'] is not None and np.isfinite(objectinfo['decl'])): retdict['propermotion'] = coordutils.total_proper_motion( objectinfo['pmra'], objectinfo['pmdecl'], objectinfo['decl'] ) if ('jmag' in objectinfo and objectinfo['jmag'] is not None and np.isfinite(objectinfo['jmag']) and np.isfinite(retdict['propermotion'])): retdict['rpmj'] = coordutils.reduced_proper_motion( objectinfo['jmag'], retdict['propermotion'] ) return retdict
[ "def", "coord_features", "(", "objectinfo", ")", ":", "retdict", "=", "{", "'propermotion'", ":", "np", ".", "nan", ",", "'gl'", ":", "np", ".", "nan", ",", "'gb'", ":", "np", ".", "nan", ",", "'rpmj'", ":", "np", ".", "nan", "}", "if", "(", "'ra'", "in", "objectinfo", "and", "objectinfo", "[", "'ra'", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "objectinfo", "[", "'ra'", "]", ")", "and", "'decl'", "in", "objectinfo", "and", "objectinfo", "[", "'decl'", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "objectinfo", "[", "'decl'", "]", ")", ")", ":", "retdict", "[", "'gl'", "]", ",", "retdict", "[", "'gb'", "]", "=", "coordutils", ".", "equatorial_to_galactic", "(", "objectinfo", "[", "'ra'", "]", ",", "objectinfo", "[", "'decl'", "]", ")", "if", "(", "'pmra'", "in", "objectinfo", "and", "objectinfo", "[", "'pmra'", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "objectinfo", "[", "'pmra'", "]", ")", "and", "'pmdecl'", "in", "objectinfo", "and", "objectinfo", "[", "'pmdecl'", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "objectinfo", "[", "'pmdecl'", "]", ")", "and", "'decl'", "in", "objectinfo", "and", "objectinfo", "[", "'decl'", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "objectinfo", "[", "'decl'", "]", ")", ")", ":", "retdict", "[", "'propermotion'", "]", "=", "coordutils", ".", "total_proper_motion", "(", "objectinfo", "[", "'pmra'", "]", ",", "objectinfo", "[", "'pmdecl'", "]", ",", "objectinfo", "[", "'decl'", "]", ")", "if", "(", "'jmag'", "in", "objectinfo", "and", "objectinfo", "[", "'jmag'", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "objectinfo", "[", "'jmag'", "]", ")", "and", "np", ".", "isfinite", "(", "retdict", "[", "'propermotion'", "]", ")", ")", ":", "retdict", "[", "'rpmj'", "]", "=", "coordutils", ".", "reduced_proper_motion", "(", "objectinfo", "[", "'jmag'", "]", ",", "retdict", "[", "'propermotion'", "]", ")", "return", "retdict" ]
Calculates object coordinates features, including: - galactic coordinates - total proper motion from pmra, pmdecl - reduced J proper motion from propermotion and Jmag Parameters ---------- objectinfo : dict This is an objectinfo dict from a light curve file read into an `lcdict`. The format and the minimum keys required are:: {'ra': the right ascension of the object in decimal degrees, 'decl': the declination of the object in decimal degrees, 'pmra': the proper motion in right ascension in mas/yr, 'pmdecl': the proper motion in declination in mas/yr, 'jmag': the 2MASS J mag of this object} Returns ------- dict A dict containing the total proper motion
[ "Calculates", "object", "coordinates", "features", "including", ":" ]
python
valid
craigahobbs/chisel
src/chisel/app.py
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/app.py#L178-L185
def add_header(self, key, value): """ Add a response header """ assert isinstance(key, str), 'header key must be of type str' assert isinstance(value, str), 'header value must be of type str' self.headers[key] = value
[ "def", "add_header", "(", "self", ",", "key", ",", "value", ")", ":", "assert", "isinstance", "(", "key", ",", "str", ")", ",", "'header key must be of type str'", "assert", "isinstance", "(", "value", ",", "str", ")", ",", "'header value must be of type str'", "self", ".", "headers", "[", "key", "]", "=", "value" ]
Add a response header
[ "Add", "a", "response", "header" ]
python
train
rwl/pylon
pylon/io/excel.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/excel.py#L76-L84
def write_generator_data(self, file): """ Write generator data to file. """ generator_sheet = self.book.add_sheet("Generators") for j, generator in enumerate(self.case.generators): i = generator.bus._i for k, attr in enumerate(GENERATOR_ATTRS): generator_sheet.write(j, 0, i)
[ "def", "write_generator_data", "(", "self", ",", "file", ")", ":", "generator_sheet", "=", "self", ".", "book", ".", "add_sheet", "(", "\"Generators\"", ")", "for", "j", ",", "generator", "in", "enumerate", "(", "self", ".", "case", ".", "generators", ")", ":", "i", "=", "generator", ".", "bus", ".", "_i", "for", "k", ",", "attr", "in", "enumerate", "(", "GENERATOR_ATTRS", ")", ":", "generator_sheet", ".", "write", "(", "j", ",", "0", ",", "i", ")" ]
Write generator data to file.
[ "Write", "generator", "data", "to", "file", "." ]
python
train
annoviko/pyclustering
pyclustering/nnet/legion.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/legion.py#L193-L206
def allocate_sync_ensembles(self, tolerance = 0.1): """! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ]. """ if (self.__ccore_legion_dynamic_pointer is not None): self.__output = wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer); return allocate_sync_ensembles(self.__output, tolerance);
[ "def", "allocate_sync_ensembles", "(", "self", ",", "tolerance", "=", "0.1", ")", ":", "if", "(", "self", ".", "__ccore_legion_dynamic_pointer", "is", "not", "None", ")", ":", "self", ".", "__output", "=", "wrapper", ".", "legion_dynamic_get_output", "(", "self", ".", "__ccore_legion_dynamic_pointer", ")", "return", "allocate_sync_ensembles", "(", "self", ".", "__output", ",", "tolerance", ")" ]
! @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster. @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators. @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].
[ "!" ]
python
valid
jmgilman/Neolib
neolib/pyamf/amf3.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1271-L1298
def writeList(self, n, is_proxy=False): """ Writes a C{tuple}, C{set} or C{list} to the stream. @type n: One of C{__builtin__.tuple}, C{__builtin__.set} or C{__builtin__.list} @param n: The C{list} data to be encoded to the AMF3 data stream. """ if self.use_proxies and not is_proxy: self.writeProxy(n) return self.stream.write(TYPE_ARRAY) ref = self.context.getObjectReference(n) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(n) self._writeInteger((len(n) << 1) | REFERENCE_BIT) self.stream.write('\x01') [self.writeElement(x) for x in n]
[ "def", "writeList", "(", "self", ",", "n", ",", "is_proxy", "=", "False", ")", ":", "if", "self", ".", "use_proxies", "and", "not", "is_proxy", ":", "self", ".", "writeProxy", "(", "n", ")", "return", "self", ".", "stream", ".", "write", "(", "TYPE_ARRAY", ")", "ref", "=", "self", ".", "context", ".", "getObjectReference", "(", "n", ")", "if", "ref", "!=", "-", "1", ":", "self", ".", "_writeInteger", "(", "ref", "<<", "1", ")", "return", "self", ".", "context", ".", "addObject", "(", "n", ")", "self", ".", "_writeInteger", "(", "(", "len", "(", "n", ")", "<<", "1", ")", "|", "REFERENCE_BIT", ")", "self", ".", "stream", ".", "write", "(", "'\\x01'", ")", "[", "self", ".", "writeElement", "(", "x", ")", "for", "x", "in", "n", "]" ]
Writes a C{tuple}, C{set} or C{list} to the stream. @type n: One of C{__builtin__.tuple}, C{__builtin__.set} or C{__builtin__.list} @param n: The C{list} data to be encoded to the AMF3 data stream.
[ "Writes", "a", "C", "{", "tuple", "}", "C", "{", "set", "}", "or", "C", "{", "list", "}", "to", "the", "stream", "." ]
python
train
AndrewIngram/django-extra-views
extra_views/formsets.py
https://github.com/AndrewIngram/django-extra-views/blob/188e1bf1f15a44d9a599028d020083af9fb43ea7/extra_views/formsets.py#L85-L102
def get_factory_kwargs(self): """ Returns the keyword arguments for calling the formset factory """ # Perform deprecation check for attr in ['extra', 'max_num', 'can_order', 'can_delete', 'ct_field', 'formfield_callback', 'fk_name', 'widgets', 'ct_fk_field']: if hasattr(self, attr): klass = type(self).__name__ raise DeprecationWarning( 'Setting `{0}.{1}` at the class level is now deprecated. ' 'Set `{0}.factory_kwargs` instead.'.format(klass, attr) ) kwargs = self.factory_kwargs.copy() if self.get_formset_class(): kwargs['formset'] = self.get_formset_class() return kwargs
[ "def", "get_factory_kwargs", "(", "self", ")", ":", "# Perform deprecation check", "for", "attr", "in", "[", "'extra'", ",", "'max_num'", ",", "'can_order'", ",", "'can_delete'", ",", "'ct_field'", ",", "'formfield_callback'", ",", "'fk_name'", ",", "'widgets'", ",", "'ct_fk_field'", "]", ":", "if", "hasattr", "(", "self", ",", "attr", ")", ":", "klass", "=", "type", "(", "self", ")", ".", "__name__", "raise", "DeprecationWarning", "(", "'Setting `{0}.{1}` at the class level is now deprecated. '", "'Set `{0}.factory_kwargs` instead.'", ".", "format", "(", "klass", ",", "attr", ")", ")", "kwargs", "=", "self", ".", "factory_kwargs", ".", "copy", "(", ")", "if", "self", ".", "get_formset_class", "(", ")", ":", "kwargs", "[", "'formset'", "]", "=", "self", ".", "get_formset_class", "(", ")", "return", "kwargs" ]
Returns the keyword arguments for calling the formset factory
[ "Returns", "the", "keyword", "arguments", "for", "calling", "the", "formset", "factory" ]
python
valid
pantsbuild/pants
src/python/pants/build_graph/build_configuration.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_configuration.py#L116-L136
def register_optionables(self, optionables): """Registers the given subsystem types. :param optionables: The Optionable types to register. :type optionables: :class:`collections.Iterable` containing :class:`pants.option.optionable.Optionable` subclasses. """ if not isinstance(optionables, Iterable): raise TypeError('The optionables must be an iterable, given {}'.format(optionables)) optionables = tuple(optionables) if not optionables: return invalid_optionables = [s for s in optionables if not isinstance(s, type) or not issubclass(s, Optionable)] if invalid_optionables: raise TypeError('The following items from the given optionables are not Optionable ' 'subclasses:\n\t{}'.format('\n\t'.join(str(i) for i in invalid_optionables))) self._optionables.update(optionables)
[ "def", "register_optionables", "(", "self", ",", "optionables", ")", ":", "if", "not", "isinstance", "(", "optionables", ",", "Iterable", ")", ":", "raise", "TypeError", "(", "'The optionables must be an iterable, given {}'", ".", "format", "(", "optionables", ")", ")", "optionables", "=", "tuple", "(", "optionables", ")", "if", "not", "optionables", ":", "return", "invalid_optionables", "=", "[", "s", "for", "s", "in", "optionables", "if", "not", "isinstance", "(", "s", ",", "type", ")", "or", "not", "issubclass", "(", "s", ",", "Optionable", ")", "]", "if", "invalid_optionables", ":", "raise", "TypeError", "(", "'The following items from the given optionables are not Optionable '", "'subclasses:\\n\\t{}'", ".", "format", "(", "'\\n\\t'", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "invalid_optionables", ")", ")", ")", "self", ".", "_optionables", ".", "update", "(", "optionables", ")" ]
Registers the given subsystem types. :param optionables: The Optionable types to register. :type optionables: :class:`collections.Iterable` containing :class:`pants.option.optionable.Optionable` subclasses.
[ "Registers", "the", "given", "subsystem", "types", "." ]
python
train
keenlabs/KeenClient-Python
keen/scoped_keys.py
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/scoped_keys.py#L23-L32
def pad_aes256(s): """ Pads an input string to a given block size. :param s: string :returns: The padded string. """ if len(s) % AES.block_size == 0: return s return Padding.appendPadding(s, blocksize=AES.block_size)
[ "def", "pad_aes256", "(", "s", ")", ":", "if", "len", "(", "s", ")", "%", "AES", ".", "block_size", "==", "0", ":", "return", "s", "return", "Padding", ".", "appendPadding", "(", "s", ",", "blocksize", "=", "AES", ".", "block_size", ")" ]
Pads an input string to a given block size. :param s: string :returns: The padded string.
[ "Pads", "an", "input", "string", "to", "a", "given", "block", "size", ".", ":", "param", "s", ":", "string", ":", "returns", ":", "The", "padded", "string", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/state/batch_tracker.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/state/batch_tracker.py#L112-L128
def get_status(self, batch_id): """Returns the status enum for a batch. Args: batch_id (str): The id of the batch to get the status for Returns: int: The status enum """ with self._lock: if self._batch_committed(batch_id): return ClientBatchStatus.COMMITTED if batch_id in self._invalid: return ClientBatchStatus.INVALID if batch_id in self._pending: return ClientBatchStatus.PENDING return ClientBatchStatus.UNKNOWN
[ "def", "get_status", "(", "self", ",", "batch_id", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_batch_committed", "(", "batch_id", ")", ":", "return", "ClientBatchStatus", ".", "COMMITTED", "if", "batch_id", "in", "self", ".", "_invalid", ":", "return", "ClientBatchStatus", ".", "INVALID", "if", "batch_id", "in", "self", ".", "_pending", ":", "return", "ClientBatchStatus", ".", "PENDING", "return", "ClientBatchStatus", ".", "UNKNOWN" ]
Returns the status enum for a batch. Args: batch_id (str): The id of the batch to get the status for Returns: int: The status enum
[ "Returns", "the", "status", "enum", "for", "a", "batch", "." ]
python
train
wummel/linkchecker
third_party/dnspython/dns/message.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/message.py#L271-L290
def is_response(self, other): """Is other a response to self? @rtype: bool""" if other.flags & dns.flags.QR == 0 or \ self.id != other.id or \ dns.opcode.from_flags(self.flags) != \ dns.opcode.from_flags(other.flags): return False if dns.rcode.from_flags(other.flags, other.ednsflags) != \ dns.rcode.NOERROR: return True if dns.opcode.is_update(self.flags): return True for n in self.question: if n not in other.question: return False for n in other.question: if n not in self.question: return False return True
[ "def", "is_response", "(", "self", ",", "other", ")", ":", "if", "other", ".", "flags", "&", "dns", ".", "flags", ".", "QR", "==", "0", "or", "self", ".", "id", "!=", "other", ".", "id", "or", "dns", ".", "opcode", ".", "from_flags", "(", "self", ".", "flags", ")", "!=", "dns", ".", "opcode", ".", "from_flags", "(", "other", ".", "flags", ")", ":", "return", "False", "if", "dns", ".", "rcode", ".", "from_flags", "(", "other", ".", "flags", ",", "other", ".", "ednsflags", ")", "!=", "dns", ".", "rcode", ".", "NOERROR", ":", "return", "True", "if", "dns", ".", "opcode", ".", "is_update", "(", "self", ".", "flags", ")", ":", "return", "True", "for", "n", "in", "self", ".", "question", ":", "if", "n", "not", "in", "other", ".", "question", ":", "return", "False", "for", "n", "in", "other", ".", "question", ":", "if", "n", "not", "in", "self", ".", "question", ":", "return", "False", "return", "True" ]
Is other a response to self? @rtype: bool
[ "Is", "other", "a", "response", "to", "self?" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/expressions.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/expressions.py#L769-L779
def validate(self): """Validate that the BinaryComposition is correctly representable.""" _validate_operator_name(self.operator, BinaryComposition.SUPPORTED_OPERATORS) if not isinstance(self.left, Expression): raise TypeError(u'Expected Expression left, got: {} {} {}'.format( type(self.left).__name__, self.left, self)) if not isinstance(self.right, Expression): raise TypeError(u'Expected Expression right, got: {} {}'.format( type(self.right).__name__, self.right))
[ "def", "validate", "(", "self", ")", ":", "_validate_operator_name", "(", "self", ".", "operator", ",", "BinaryComposition", ".", "SUPPORTED_OPERATORS", ")", "if", "not", "isinstance", "(", "self", ".", "left", ",", "Expression", ")", ":", "raise", "TypeError", "(", "u'Expected Expression left, got: {} {} {}'", ".", "format", "(", "type", "(", "self", ".", "left", ")", ".", "__name__", ",", "self", ".", "left", ",", "self", ")", ")", "if", "not", "isinstance", "(", "self", ".", "right", ",", "Expression", ")", ":", "raise", "TypeError", "(", "u'Expected Expression right, got: {} {}'", ".", "format", "(", "type", "(", "self", ".", "right", ")", ".", "__name__", ",", "self", ".", "right", ")", ")" ]
Validate that the BinaryComposition is correctly representable.
[ "Validate", "that", "the", "BinaryComposition", "is", "correctly", "representable", "." ]
python
train
jobovy/galpy
galpy/potential/PowerSphericalPotentialwCutoff.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/PowerSphericalPotentialwCutoff.py#L132-L150
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: _Rderiv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2013-06-28 - Written - Bovy (IAS) """ r= nu.sqrt(R*R+z*z) return 4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)*R**2.\ +self._mass(r)/r**5.*(z**2.-2.*R**2.)
[ "def", "_R2deriv", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "r", "=", "nu", ".", "sqrt", "(", "R", "*", "R", "+", "z", "*", "z", ")", "return", "4.", "*", "nu", ".", "pi", "*", "r", "**", "(", "-", "2.", "-", "self", ".", "alpha", ")", "*", "nu", ".", "exp", "(", "-", "(", "r", "/", "self", ".", "rc", ")", "**", "2.", ")", "*", "R", "**", "2.", "+", "self", ".", "_mass", "(", "r", ")", "/", "r", "**", "5.", "*", "(", "z", "**", "2.", "-", "2.", "*", "R", "**", "2.", ")" ]
NAME: _Rderiv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2013-06-28 - Written - Bovy (IAS)
[ "NAME", ":", "_Rderiv", "PURPOSE", ":", "evaluate", "the", "second", "radial", "derivative", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the", "second", "radial", "derivative", "HISTORY", ":", "2013", "-", "06", "-", "28", "-", "Written", "-", "Bovy", "(", "IAS", ")" ]
python
train
greenape/mktheapidocs
mktheapidocs/mkapi.py
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L615-L632
def _split_props(thing, doc): """ Separate properties from other kinds of member. """ props = inspect.getmembers(thing, lambda o: isinstance(o, property)) ps = [] docs = [ (*_get_names(names, types), names, types, desc) for names, types, desc in doc ] for prop_name, prop in props: in_doc = [d for d in enumerate(docs) if prop_name in d[0]] for d in in_doc: docs.remove(d) ps.append(prop_name) if len(docs) > 0: _, _, names, types, descs = zip(*docs) return ps, zip(names, types, descs) return ps, []
[ "def", "_split_props", "(", "thing", ",", "doc", ")", ":", "props", "=", "inspect", ".", "getmembers", "(", "thing", ",", "lambda", "o", ":", "isinstance", "(", "o", ",", "property", ")", ")", "ps", "=", "[", "]", "docs", "=", "[", "(", "*", "_get_names", "(", "names", ",", "types", ")", ",", "names", ",", "types", ",", "desc", ")", "for", "names", ",", "types", ",", "desc", "in", "doc", "]", "for", "prop_name", ",", "prop", "in", "props", ":", "in_doc", "=", "[", "d", "for", "d", "in", "enumerate", "(", "docs", ")", "if", "prop_name", "in", "d", "[", "0", "]", "]", "for", "d", "in", "in_doc", ":", "docs", ".", "remove", "(", "d", ")", "ps", ".", "append", "(", "prop_name", ")", "if", "len", "(", "docs", ")", ">", "0", ":", "_", ",", "_", ",", "names", ",", "types", ",", "descs", "=", "zip", "(", "*", "docs", ")", "return", "ps", ",", "zip", "(", "names", ",", "types", ",", "descs", ")", "return", "ps", ",", "[", "]" ]
Separate properties from other kinds of member.
[ "Separate", "properties", "from", "other", "kinds", "of", "member", "." ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1366-L1385
def setdocument(self, doc): """Associate a document with this element. Arguments: doc (:class:`Document`): A document Each element must be associated with a FoLiA document. """ assert isinstance(doc, Document) if not self.doc: self.doc = doc if self.id: if self.id in doc: raise DuplicateIDError(self.id) else: self.doc.index[id] = self for e in self: #recursive for all children if isinstance(e,AbstractElement): e.setdocument(doc)
[ "def", "setdocument", "(", "self", ",", "doc", ")", ":", "assert", "isinstance", "(", "doc", ",", "Document", ")", "if", "not", "self", ".", "doc", ":", "self", ".", "doc", "=", "doc", "if", "self", ".", "id", ":", "if", "self", ".", "id", "in", "doc", ":", "raise", "DuplicateIDError", "(", "self", ".", "id", ")", "else", ":", "self", ".", "doc", ".", "index", "[", "id", "]", "=", "self", "for", "e", "in", "self", ":", "#recursive for all children", "if", "isinstance", "(", "e", ",", "AbstractElement", ")", ":", "e", ".", "setdocument", "(", "doc", ")" ]
Associate a document with this element. Arguments: doc (:class:`Document`): A document Each element must be associated with a FoLiA document.
[ "Associate", "a", "document", "with", "this", "element", "." ]
python
train
jonathf/chaospy
chaospy/poly/collection/core.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/poly/collection/core.py#L259-L307
def swapdim(P, dim1=1, dim2=0): """ Swap the dim between two variables. Args: P (Poly): Input polynomial. dim1 (int): First dim dim2 (int): Second dim. Returns: (Poly): Polynomial with swapped dimensions. Examples: >>> x,y = variable(2) >>> P = x**4-y >>> print(P) q0^4-q1 >>> print(swapdim(P)) q1^4-q0 """ if not isinstance(P, Poly): return numpy.swapaxes(P, dim1, dim2) dim = P.dim shape = P.shape dtype = P.dtype if dim1==dim2: return P m = max(dim1, dim2) if P.dim <= m: P = chaospy.poly.dimension.setdim(P, m+1) dim = m+1 A = {} for key in P.keys: val = P.A[key] key = list(key) key[dim1], key[dim2] = key[dim2], key[dim1] A[tuple(key)] = val return Poly(A, dim, shape, dtype)
[ "def", "swapdim", "(", "P", ",", "dim1", "=", "1", ",", "dim2", "=", "0", ")", ":", "if", "not", "isinstance", "(", "P", ",", "Poly", ")", ":", "return", "numpy", ".", "swapaxes", "(", "P", ",", "dim1", ",", "dim2", ")", "dim", "=", "P", ".", "dim", "shape", "=", "P", ".", "shape", "dtype", "=", "P", ".", "dtype", "if", "dim1", "==", "dim2", ":", "return", "P", "m", "=", "max", "(", "dim1", ",", "dim2", ")", "if", "P", ".", "dim", "<=", "m", ":", "P", "=", "chaospy", ".", "poly", ".", "dimension", ".", "setdim", "(", "P", ",", "m", "+", "1", ")", "dim", "=", "m", "+", "1", "A", "=", "{", "}", "for", "key", "in", "P", ".", "keys", ":", "val", "=", "P", ".", "A", "[", "key", "]", "key", "=", "list", "(", "key", ")", "key", "[", "dim1", "]", ",", "key", "[", "dim2", "]", "=", "key", "[", "dim2", "]", ",", "key", "[", "dim1", "]", "A", "[", "tuple", "(", "key", ")", "]", "=", "val", "return", "Poly", "(", "A", ",", "dim", ",", "shape", ",", "dtype", ")" ]
Swap the dim between two variables. Args: P (Poly): Input polynomial. dim1 (int): First dim dim2 (int): Second dim. Returns: (Poly): Polynomial with swapped dimensions. Examples: >>> x,y = variable(2) >>> P = x**4-y >>> print(P) q0^4-q1 >>> print(swapdim(P)) q1^4-q0
[ "Swap", "the", "dim", "between", "two", "variables", "." ]
python
train
amorison/loam
loam/cli.py
https://github.com/amorison/loam/blob/a566c943a75e068a4510099331a1ddfe5bbbdd94/loam/cli.py#L248-L292
def zsh_complete(self, path, cmd, *cmds, sourceable=False): """Write zsh compdef script. Args: path (path-like): desired path of the compdef script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed. sourceable (bool): if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion. """ grouping = internal.zsh_version() >= (5, 4) path = pathlib.Path(path) firstline = ['#compdef', cmd] firstline.extend(cmds) subcmds = list(self.subcmds.keys()) with path.open('w') as zcf: print(*firstline, end='\n\n', file=zcf) # main function print('function _{} {{'.format(cmd), file=zcf) print('local line', file=zcf) print('_arguments -C', end=BLK, file=zcf) if subcmds: # list of subcommands and their description substrs = ["{}\\:'{}'".format(sub, self.subcmds[sub].help) for sub in subcmds] print('"1:Commands:(({}))"'.format(' '.join(substrs)), end=BLK, file=zcf) self._zsh_comp_command(zcf, None, grouping) if subcmds: print("'*::arg:->args'", file=zcf) print('case $line[1] in', file=zcf) for sub in subcmds: print('{sub}) _{cmd}_{sub} ;;'.format(sub=sub, cmd=cmd), file=zcf) print('esac', file=zcf) print('}', file=zcf) # all subcommand completion handlers for sub in subcmds: print('\nfunction _{}_{} {{'.format(cmd, sub), file=zcf) print('_arguments', end=BLK, file=zcf) self._zsh_comp_command(zcf, sub, grouping) print('}', file=zcf) if sourceable: print('\ncompdef _{0} {0}'.format(cmd), *cmds, file=zcf)
[ "def", "zsh_complete", "(", "self", ",", "path", ",", "cmd", ",", "*", "cmds", ",", "sourceable", "=", "False", ")", ":", "grouping", "=", "internal", ".", "zsh_version", "(", ")", ">=", "(", "5", ",", "4", ")", "path", "=", "pathlib", ".", "Path", "(", "path", ")", "firstline", "=", "[", "'#compdef'", ",", "cmd", "]", "firstline", ".", "extend", "(", "cmds", ")", "subcmds", "=", "list", "(", "self", ".", "subcmds", ".", "keys", "(", ")", ")", "with", "path", ".", "open", "(", "'w'", ")", "as", "zcf", ":", "print", "(", "*", "firstline", ",", "end", "=", "'\\n\\n'", ",", "file", "=", "zcf", ")", "# main function", "print", "(", "'function _{} {{'", ".", "format", "(", "cmd", ")", ",", "file", "=", "zcf", ")", "print", "(", "'local line'", ",", "file", "=", "zcf", ")", "print", "(", "'_arguments -C'", ",", "end", "=", "BLK", ",", "file", "=", "zcf", ")", "if", "subcmds", ":", "# list of subcommands and their description", "substrs", "=", "[", "\"{}\\\\:'{}'\"", ".", "format", "(", "sub", ",", "self", ".", "subcmds", "[", "sub", "]", ".", "help", ")", "for", "sub", "in", "subcmds", "]", "print", "(", "'\"1:Commands:(({}))\"'", ".", "format", "(", "' '", ".", "join", "(", "substrs", ")", ")", ",", "end", "=", "BLK", ",", "file", "=", "zcf", ")", "self", ".", "_zsh_comp_command", "(", "zcf", ",", "None", ",", "grouping", ")", "if", "subcmds", ":", "print", "(", "\"'*::arg:->args'\"", ",", "file", "=", "zcf", ")", "print", "(", "'case $line[1] in'", ",", "file", "=", "zcf", ")", "for", "sub", "in", "subcmds", ":", "print", "(", "'{sub}) _{cmd}_{sub} ;;'", ".", "format", "(", "sub", "=", "sub", ",", "cmd", "=", "cmd", ")", ",", "file", "=", "zcf", ")", "print", "(", "'esac'", ",", "file", "=", "zcf", ")", "print", "(", "'}'", ",", "file", "=", "zcf", ")", "# all subcommand completion handlers", "for", "sub", "in", "subcmds", ":", "print", "(", "'\\nfunction _{}_{} {{'", ".", "format", "(", "cmd", ",", "sub", ")", ",", "file", "=", "zcf", ")", "print", "(", "'_arguments'", ",", "end", "=", "BLK", ",", "file", "=", "zcf", ")", "self", ".", "_zsh_comp_command", "(", "zcf", ",", "sub", ",", "grouping", ")", "print", "(", "'}'", ",", "file", "=", "zcf", ")", "if", "sourceable", ":", "print", "(", "'\\ncompdef _{0} {0}'", ".", "format", "(", "cmd", ")", ",", "*", "cmds", ",", "file", "=", "zcf", ")" ]
Write zsh compdef script. Args: path (path-like): desired path of the compdef script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed. sourceable (bool): if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion.
[ "Write", "zsh", "compdef", "script", "." ]
python
test
memphis-iis/GLUDB
gludb/backends/dynamodb.py
https://github.com/memphis-iis/GLUDB/blob/25692528ff6fe8184a3570f61f31f1a90088a388/gludb/backends/dynamodb.py#L116-L133
def ensure_table(self, cls): """Required functionality.""" exists = True conn = get_conn() try: descrip = conn.describe_table(cls.get_table_name()) assert descrip is not None except ResourceNotFoundException: # Expected - this is what we get if there is no table exists = False except JSONResponseError: # Also assuming no table exists = False if not exists: table = self.table_schema_call(Table.create, cls) assert table is not None
[ "def", "ensure_table", "(", "self", ",", "cls", ")", ":", "exists", "=", "True", "conn", "=", "get_conn", "(", ")", "try", ":", "descrip", "=", "conn", ".", "describe_table", "(", "cls", ".", "get_table_name", "(", ")", ")", "assert", "descrip", "is", "not", "None", "except", "ResourceNotFoundException", ":", "# Expected - this is what we get if there is no table", "exists", "=", "False", "except", "JSONResponseError", ":", "# Also assuming no table", "exists", "=", "False", "if", "not", "exists", ":", "table", "=", "self", ".", "table_schema_call", "(", "Table", ".", "create", ",", "cls", ")", "assert", "table", "is", "not", "None" ]
Required functionality.
[ "Required", "functionality", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py#L113-L134
def _handle_pyout(self, msg): """ Overridden to handle rich data types, like SVG. """ if not self._hidden and self._is_from_this_session(msg): content = msg['content'] prompt_number = content.get('execution_count', 0) data = content['data'] if data.has_key('image/svg+xml'): self._pre_image_append(msg, prompt_number) self._append_svg(data['image/svg+xml'], True) self._append_html(self.output_sep2, True) elif data.has_key('image/png'): self._pre_image_append(msg, prompt_number) self._append_png(decodestring(data['image/png'].encode('ascii')), True) self._append_html(self.output_sep2, True) elif data.has_key('image/jpeg') and self._jpg_supported: self._pre_image_append(msg, prompt_number) self._append_jpg(decodestring(data['image/jpeg'].encode('ascii')), True) self._append_html(self.output_sep2, True) else: # Default back to the plain text representation. return super(RichIPythonWidget, self)._handle_pyout(msg)
[ "def", "_handle_pyout", "(", "self", ",", "msg", ")", ":", "if", "not", "self", ".", "_hidden", "and", "self", ".", "_is_from_this_session", "(", "msg", ")", ":", "content", "=", "msg", "[", "'content'", "]", "prompt_number", "=", "content", ".", "get", "(", "'execution_count'", ",", "0", ")", "data", "=", "content", "[", "'data'", "]", "if", "data", ".", "has_key", "(", "'image/svg+xml'", ")", ":", "self", ".", "_pre_image_append", "(", "msg", ",", "prompt_number", ")", "self", ".", "_append_svg", "(", "data", "[", "'image/svg+xml'", "]", ",", "True", ")", "self", ".", "_append_html", "(", "self", ".", "output_sep2", ",", "True", ")", "elif", "data", ".", "has_key", "(", "'image/png'", ")", ":", "self", ".", "_pre_image_append", "(", "msg", ",", "prompt_number", ")", "self", ".", "_append_png", "(", "decodestring", "(", "data", "[", "'image/png'", "]", ".", "encode", "(", "'ascii'", ")", ")", ",", "True", ")", "self", ".", "_append_html", "(", "self", ".", "output_sep2", ",", "True", ")", "elif", "data", ".", "has_key", "(", "'image/jpeg'", ")", "and", "self", ".", "_jpg_supported", ":", "self", ".", "_pre_image_append", "(", "msg", ",", "prompt_number", ")", "self", ".", "_append_jpg", "(", "decodestring", "(", "data", "[", "'image/jpeg'", "]", ".", "encode", "(", "'ascii'", ")", ")", ",", "True", ")", "self", ".", "_append_html", "(", "self", ".", "output_sep2", ",", "True", ")", "else", ":", "# Default back to the plain text representation.", "return", "super", "(", "RichIPythonWidget", ",", "self", ")", ".", "_handle_pyout", "(", "msg", ")" ]
Overridden to handle rich data types, like SVG.
[ "Overridden", "to", "handle", "rich", "data", "types", "like", "SVG", "." ]
python
test
tchellomello/python-arlo
pyarlo/camera.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/camera.py#L244-L258
def triggers(self): """Get a camera's triggers.""" capabilities = self.capabilities if not capabilities: return None for capability in capabilities: if not isinstance(capability, dict): continue triggers = capability.get("Triggers") if triggers: return triggers return None
[ "def", "triggers", "(", "self", ")", ":", "capabilities", "=", "self", ".", "capabilities", "if", "not", "capabilities", ":", "return", "None", "for", "capability", "in", "capabilities", ":", "if", "not", "isinstance", "(", "capability", ",", "dict", ")", ":", "continue", "triggers", "=", "capability", ".", "get", "(", "\"Triggers\"", ")", "if", "triggers", ":", "return", "triggers", "return", "None" ]
Get a camera's triggers.
[ "Get", "a", "camera", "s", "triggers", "." ]
python
train
cosven/feeluown-core
fuocore/netease/models.py
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/netease/models.py#L67-L88
def url(self): """ We will always check if this song file exists in local library, if true, we return the url of the local file. .. note:: As netease song url will be expired after a period of time, we can not use static url here. Currently, we assume that the expiration time is 20 minutes, after the url expires, it will be automaticly refreshed. """ local_path = self._find_in_local() if local_path: return local_path if not self._url: self._refresh_url() elif time.time() > self._expired_at: logger.info('song({}) url is expired, refresh...'.format(self)) self._refresh_url() return self._url
[ "def", "url", "(", "self", ")", ":", "local_path", "=", "self", ".", "_find_in_local", "(", ")", "if", "local_path", ":", "return", "local_path", "if", "not", "self", ".", "_url", ":", "self", ".", "_refresh_url", "(", ")", "elif", "time", ".", "time", "(", ")", ">", "self", ".", "_expired_at", ":", "logger", ".", "info", "(", "'song({}) url is expired, refresh...'", ".", "format", "(", "self", ")", ")", "self", ".", "_refresh_url", "(", ")", "return", "self", ".", "_url" ]
We will always check if this song file exists in local library, if true, we return the url of the local file. .. note:: As netease song url will be expired after a period of time, we can not use static url here. Currently, we assume that the expiration time is 20 minutes, after the url expires, it will be automaticly refreshed.
[ "We", "will", "always", "check", "if", "this", "song", "file", "exists", "in", "local", "library", "if", "true", "we", "return", "the", "url", "of", "the", "local", "file", "." ]
python
train
g2p/bedup
bedup/platform/chattr.py
https://github.com/g2p/bedup/blob/9694f6f718844c33017052eb271f68b6c0d0b7d3/bedup/platform/chattr.py#L74-L82
def getflags(fd): """ Gets per-file filesystem flags. """ flags_ptr = ffi.new('uint64_t*') flags_buf = ffi.buffer(flags_ptr) fcntl.ioctl(fd, lib.FS_IOC_GETFLAGS, flags_buf) return flags_ptr[0]
[ "def", "getflags", "(", "fd", ")", ":", "flags_ptr", "=", "ffi", ".", "new", "(", "'uint64_t*'", ")", "flags_buf", "=", "ffi", ".", "buffer", "(", "flags_ptr", ")", "fcntl", ".", "ioctl", "(", "fd", ",", "lib", ".", "FS_IOC_GETFLAGS", ",", "flags_buf", ")", "return", "flags_ptr", "[", "0", "]" ]
Gets per-file filesystem flags.
[ "Gets", "per", "-", "file", "filesystem", "flags", "." ]
python
train
datacamp/protowhat
protowhat/checks/check_funcs.py
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L37-L101
def check_node( state, name, index=0, missing_msg="Check the {ast_path}. Could not find the {index}{node_name}.", priority=None, ): """Select a node from abstract syntax tree (AST), using its name and index position. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). name : the name of the abstract syntax tree node to find. index: the position of that node (see below for details). missing_msg: feedback message if node is not in student AST. priority: the priority level of the node being searched for. This determines whether to descend into other AST nodes during the search. Higher priority nodes descend into lower priority. Currently, the only important part of priority is that setting a very high priority (e.g. 99) will search every node. :Example: If both the student and solution code are.. :: SELECT a FROM b; SELECT x FROM y; then we can focus on the first select with:: # approach 1: with manually created State instance state = State(*args, **kwargs) new_state = check_node(state, 'SelectStmt', 0) # approach 2: with Ex and chaining new_state = Ex().check_node('SelectStmt', 0) """ df = partial(state.ast_dispatcher, name, priority=priority) sol_stmt_list = df(state.solution_ast) try: sol_stmt = sol_stmt_list[index] except IndexError: raise IndexError("Can't get %s statement at index %s" % (name, index)) stu_stmt_list = df(state.student_ast) try: stu_stmt = stu_stmt_list[index] except IndexError: # use speaker on ast dialect module to get message, or fall back to generic ast_path = state.get_ast_path() or "highlighted code" _msg = state.ast_dispatcher.describe( sol_stmt, missing_msg, index=index, ast_path=ast_path ) if _msg is None: _msg = MSG_CHECK_FALLBACK state.report(Feedback(_msg)) action = { "type": "check_node", "kwargs": {"name": name, "index": index}, "node": stu_stmt, } return state.to_child( student_ast=stu_stmt, solution_ast=sol_stmt, history=state.history + (action,) )
[ "def", "check_node", "(", "state", ",", "name", ",", "index", "=", "0", ",", "missing_msg", "=", "\"Check the {ast_path}. Could not find the {index}{node_name}.\"", ",", "priority", "=", "None", ",", ")", ":", "df", "=", "partial", "(", "state", ".", "ast_dispatcher", ",", "name", ",", "priority", "=", "priority", ")", "sol_stmt_list", "=", "df", "(", "state", ".", "solution_ast", ")", "try", ":", "sol_stmt", "=", "sol_stmt_list", "[", "index", "]", "except", "IndexError", ":", "raise", "IndexError", "(", "\"Can't get %s statement at index %s\"", "%", "(", "name", ",", "index", ")", ")", "stu_stmt_list", "=", "df", "(", "state", ".", "student_ast", ")", "try", ":", "stu_stmt", "=", "stu_stmt_list", "[", "index", "]", "except", "IndexError", ":", "# use speaker on ast dialect module to get message, or fall back to generic", "ast_path", "=", "state", ".", "get_ast_path", "(", ")", "or", "\"highlighted code\"", "_msg", "=", "state", ".", "ast_dispatcher", ".", "describe", "(", "sol_stmt", ",", "missing_msg", ",", "index", "=", "index", ",", "ast_path", "=", "ast_path", ")", "if", "_msg", "is", "None", ":", "_msg", "=", "MSG_CHECK_FALLBACK", "state", ".", "report", "(", "Feedback", "(", "_msg", ")", ")", "action", "=", "{", "\"type\"", ":", "\"check_node\"", ",", "\"kwargs\"", ":", "{", "\"name\"", ":", "name", ",", "\"index\"", ":", "index", "}", ",", "\"node\"", ":", "stu_stmt", ",", "}", "return", "state", ".", "to_child", "(", "student_ast", "=", "stu_stmt", ",", "solution_ast", "=", "sol_stmt", ",", "history", "=", "state", ".", "history", "+", "(", "action", ",", ")", ")" ]
Select a node from abstract syntax tree (AST), using its name and index position. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). name : the name of the abstract syntax tree node to find. index: the position of that node (see below for details). missing_msg: feedback message if node is not in student AST. priority: the priority level of the node being searched for. This determines whether to descend into other AST nodes during the search. Higher priority nodes descend into lower priority. Currently, the only important part of priority is that setting a very high priority (e.g. 99) will search every node. :Example: If both the student and solution code are.. :: SELECT a FROM b; SELECT x FROM y; then we can focus on the first select with:: # approach 1: with manually created State instance state = State(*args, **kwargs) new_state = check_node(state, 'SelectStmt', 0) # approach 2: with Ex and chaining new_state = Ex().check_node('SelectStmt', 0)
[ "Select", "a", "node", "from", "abstract", "syntax", "tree", "(", "AST", ")", "using", "its", "name", "and", "index", "position", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/features/pcoords.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/features/pcoords.py#L488-L520
def finalize(self, **kwargs): """ Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments. """ # Set the title self.set_title( 'Parallel Coordinates for {} Features'.format(len(self.features_)) ) # Add the vertical lines # TODO: Make an independent function for override! if self.show_vlines: for idx in self._increments: self.ax.axvline(idx, **self.vlines_kwds) # Set the limits self.ax.set_xticks(self._increments) self.ax.set_xticklabels(self.features_) self.ax.set_xlim(self._increments[0], self._increments[-1]) # Add the legend sorting classes by name labels = sorted(list(self._colors.keys())) colors = [self._colors[lbl] for lbl in labels] manual_legend(self, labels, colors, loc='best', frameon=True) # Add the grid view self.ax.grid()
[ "def", "finalize", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Set the title", "self", ".", "set_title", "(", "'Parallel Coordinates for {} Features'", ".", "format", "(", "len", "(", "self", ".", "features_", ")", ")", ")", "# Add the vertical lines", "# TODO: Make an independent function for override!", "if", "self", ".", "show_vlines", ":", "for", "idx", "in", "self", ".", "_increments", ":", "self", ".", "ax", ".", "axvline", "(", "idx", ",", "*", "*", "self", ".", "vlines_kwds", ")", "# Set the limits", "self", ".", "ax", ".", "set_xticks", "(", "self", ".", "_increments", ")", "self", ".", "ax", ".", "set_xticklabels", "(", "self", ".", "features_", ")", "self", ".", "ax", ".", "set_xlim", "(", "self", ".", "_increments", "[", "0", "]", ",", "self", ".", "_increments", "[", "-", "1", "]", ")", "# Add the legend sorting classes by name", "labels", "=", "sorted", "(", "list", "(", "self", ".", "_colors", ".", "keys", "(", ")", ")", ")", "colors", "=", "[", "self", ".", "_colors", "[", "lbl", "]", "for", "lbl", "in", "labels", "]", "manual_legend", "(", "self", ",", "labels", ",", "colors", ",", "loc", "=", "'best'", ",", "frameon", "=", "True", ")", "# Add the grid view", "self", ".", "ax", ".", "grid", "(", ")" ]
Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments.
[ "Finalize", "executes", "any", "subclass", "-", "specific", "axes", "finalization", "steps", ".", "The", "user", "calls", "poof", "and", "poof", "calls", "finalize", "." ]
python
train
AtomHash/evernode
evernode/classes/email.py
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/email.py#L95-L107
def send(self): """ Construct and execute sendemail.py script Finds python binary by os.py, then uses the /usr/bin/python to execute email script """ self.__create() email_script = \ os.path.join(Path(__file__).parents[1], 'scripts', 'sendemail.py') if os.path.exists(email_script): subprocess.Popen( [get_python_path(), email_script, self.__data], stdin=None, stdout=None, stderr=None, close_fds=True)
[ "def", "send", "(", "self", ")", ":", "self", ".", "__create", "(", ")", "email_script", "=", "os", ".", "path", ".", "join", "(", "Path", "(", "__file__", ")", ".", "parents", "[", "1", "]", ",", "'scripts'", ",", "'sendemail.py'", ")", "if", "os", ".", "path", ".", "exists", "(", "email_script", ")", ":", "subprocess", ".", "Popen", "(", "[", "get_python_path", "(", ")", ",", "email_script", ",", "self", ".", "__data", "]", ",", "stdin", "=", "None", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "close_fds", "=", "True", ")" ]
Construct and execute sendemail.py script Finds python binary by os.py, then uses the /usr/bin/python to execute email script
[ "Construct", "and", "execute", "sendemail", ".", "py", "script", "Finds", "python", "binary", "by", "os", ".", "py", "then", "uses", "the", "/", "usr", "/", "bin", "/", "python", "to", "execute", "email", "script" ]
python
train
summa-tx/riemann
riemann/tx/tx.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L588-L602
def _sighash_anyone_can_pay(self, index, copy_tx, sighash_type): ''' int, byte-like, Tx, int -> bytes Applies SIGHASH_ANYONECANPAY procedure. Should be called by another SIGHASH procedure. Not on its own. https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY ''' # The txCopy input vector is resized to a length of one. copy_tx_ins = [copy_tx.tx_ins[index]] copy_tx = copy_tx.copy(tx_ins=copy_tx_ins) return self._sighash_final_hashing( copy_tx, sighash_type | shared.SIGHASH_ANYONECANPAY)
[ "def", "_sighash_anyone_can_pay", "(", "self", ",", "index", ",", "copy_tx", ",", "sighash_type", ")", ":", "# The txCopy input vector is resized to a length of one.", "copy_tx_ins", "=", "[", "copy_tx", ".", "tx_ins", "[", "index", "]", "]", "copy_tx", "=", "copy_tx", ".", "copy", "(", "tx_ins", "=", "copy_tx_ins", ")", "return", "self", ".", "_sighash_final_hashing", "(", "copy_tx", ",", "sighash_type", "|", "shared", ".", "SIGHASH_ANYONECANPAY", ")" ]
int, byte-like, Tx, int -> bytes Applies SIGHASH_ANYONECANPAY procedure. Should be called by another SIGHASH procedure. Not on its own. https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
[ "int", "byte", "-", "like", "Tx", "int", "-", ">", "bytes", "Applies", "SIGHASH_ANYONECANPAY", "procedure", ".", "Should", "be", "called", "by", "another", "SIGHASH", "procedure", ".", "Not", "on", "its", "own", ".", "https", ":", "//", "en", ".", "bitcoin", ".", "it", "/", "wiki", "/", "OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY" ]
python
train
ansible/tower-cli
tower_cli/resources/job_template.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/job_template.py#L174-L193
def associate_notification_template(self, job_template, notification_template, status): """Associate a notification template from this job template. =====API DOCS===== Associate a notification template from this job template. :param job_template: The job template to associate to. :type job_template: str :param notification_template: The notification template to be associated. :type notification_template: str :param status: type of notification this notification template should be associated to. :type status: str :returns: Dictionary of only one key "changed", which indicates whether the association succeeded. :rtype: dict =====API DOCS===== """ return self._assoc('notification_templates_%s' % status, job_template, notification_template)
[ "def", "associate_notification_template", "(", "self", ",", "job_template", ",", "notification_template", ",", "status", ")", ":", "return", "self", ".", "_assoc", "(", "'notification_templates_%s'", "%", "status", ",", "job_template", ",", "notification_template", ")" ]
Associate a notification template from this job template. =====API DOCS===== Associate a notification template from this job template. :param job_template: The job template to associate to. :type job_template: str :param notification_template: The notification template to be associated. :type notification_template: str :param status: type of notification this notification template should be associated to. :type status: str :returns: Dictionary of only one key "changed", which indicates whether the association succeeded. :rtype: dict =====API DOCS=====
[ "Associate", "a", "notification", "template", "from", "this", "job", "template", "." ]
python
valid
log2timeline/plaso
plaso/cli/tools.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/tools.py#L332-L360
def ParseNumericOption(self, options, name, base=10, default_value=None): """Parses a numeric option. If the option is not set the default value is returned. Args: options (argparse.Namespace): command line arguments. name (str): name of the numeric option. base (Optional[int]): base of the numeric value. default_value (Optional[object]): default value. Returns: int: numeric value. Raises: BadConfigOption: if the options are invalid. """ numeric_value = getattr(options, name, None) if not numeric_value: return default_value try: return int(numeric_value, base) except (TypeError, ValueError): name = name.replace('_', ' ') raise errors.BadConfigOption( 'Unsupported numeric value {0:s}: {1!s}.'.format( name, numeric_value))
[ "def", "ParseNumericOption", "(", "self", ",", "options", ",", "name", ",", "base", "=", "10", ",", "default_value", "=", "None", ")", ":", "numeric_value", "=", "getattr", "(", "options", ",", "name", ",", "None", ")", "if", "not", "numeric_value", ":", "return", "default_value", "try", ":", "return", "int", "(", "numeric_value", ",", "base", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "name", "=", "name", ".", "replace", "(", "'_'", ",", "' '", ")", "raise", "errors", ".", "BadConfigOption", "(", "'Unsupported numeric value {0:s}: {1!s}.'", ".", "format", "(", "name", ",", "numeric_value", ")", ")" ]
Parses a numeric option. If the option is not set the default value is returned. Args: options (argparse.Namespace): command line arguments. name (str): name of the numeric option. base (Optional[int]): base of the numeric value. default_value (Optional[object]): default value. Returns: int: numeric value. Raises: BadConfigOption: if the options are invalid.
[ "Parses", "a", "numeric", "option", "." ]
python
train
cdriehuys/django-rest-email-auth
rest_email_auth/generics.py
https://github.com/cdriehuys/django-rest-email-auth/blob/7e752c4d77ae02d2d046f214f56e743aa12ab23f/rest_email_auth/generics.py#L14-L35
def post(self, request): """ Save the provided data using the class' serializer. Args: request: The request being made. Returns: An ``APIResponse`` instance. If the request was successful the response will have a 200 status code and contain the serializer's data. Otherwise a 400 status code and the request's errors will be returned. """ serializer = self.get_serializer(data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
[ "def", "post", "(", "self", ",", "request", ")", ":", "serializer", "=", "self", ".", "get_serializer", "(", "data", "=", "request", ".", "data", ")", "if", "serializer", ".", "is_valid", "(", ")", ":", "serializer", ".", "save", "(", ")", "return", "Response", "(", "serializer", ".", "data", ")", "return", "Response", "(", "serializer", ".", "errors", ",", "status", "=", "status", ".", "HTTP_400_BAD_REQUEST", ")" ]
Save the provided data using the class' serializer. Args: request: The request being made. Returns: An ``APIResponse`` instance. If the request was successful the response will have a 200 status code and contain the serializer's data. Otherwise a 400 status code and the request's errors will be returned.
[ "Save", "the", "provided", "data", "using", "the", "class", "serializer", "." ]
python
valid
trailofbits/manticore
manticore/platforms/linux.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L1619-L1636
def sys_chroot(self, path): """ An implementation of chroot that does perform some basic error checking, but does not actually chroot. :param path: Path to chroot """ if path not in self.current.memory: return -errno.EFAULT path_s = self.current.read_string(path) if not os.path.exists(path_s): return -errno.ENOENT if not os.path.isdir(path_s): return -errno.ENOTDIR return -errno.EPERM
[ "def", "sys_chroot", "(", "self", ",", "path", ")", ":", "if", "path", "not", "in", "self", ".", "current", ".", "memory", ":", "return", "-", "errno", ".", "EFAULT", "path_s", "=", "self", ".", "current", ".", "read_string", "(", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path_s", ")", ":", "return", "-", "errno", ".", "ENOENT", "if", "not", "os", ".", "path", ".", "isdir", "(", "path_s", ")", ":", "return", "-", "errno", ".", "ENOTDIR", "return", "-", "errno", ".", "EPERM" ]
An implementation of chroot that does perform some basic error checking, but does not actually chroot. :param path: Path to chroot
[ "An", "implementation", "of", "chroot", "that", "does", "perform", "some", "basic", "error", "checking", "but", "does", "not", "actually", "chroot", "." ]
python
valid
jmbhughes/suvi-trainer
suvitrainer/fileio.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/fileio.py#L72-L113
def fetch(self, multithread=True, median_kernel=5, solar_diam=740): """ For all products in products, will call the correct fetch routine and download an image :param multithread: if true will fetch the files simultaneously :type multithread: bool :param median_kernel: the size of the kernel to smooth by :type median_kernel: int >= 0 :return: a dictionary of all fetched products :rtype: dict from product string to (header, data) tuple """ # helper function to pull data def func_map(product): """ determines which function to call for a specific product and gets :param product: which product to fetch :type product: str :return: product tuple :rtype: (header, data) """ if "halpha" in product: result = self.fetch_halpha(median_kernel=median_kernel) elif "aia" in product: result = self.fetch_aia(product, median_kernel=median_kernel) elif "l1b" in product: result = self.fetch_suvi_l1b(product, median_kernel=median_kernel) elif "l2-ci" in product: result = self.fetch_suvi_composite(product, median_kernel=median_kernel) elif "limb" in product: result = self.fetch_limb(solar_diam) else: raise ValueError("{} is not a valid product.".format(product)) return result if multithread: pool = ThreadPool() results = pool.map(func_map, self.products) else: results = [func_map(product) for product in self.products] results = {product: (head, data) for product, head, data in results} return results
[ "def", "fetch", "(", "self", ",", "multithread", "=", "True", ",", "median_kernel", "=", "5", ",", "solar_diam", "=", "740", ")", ":", "# helper function to pull data", "def", "func_map", "(", "product", ")", ":", "\"\"\"\n determines which function to call for a specific product and gets\n :param product: which product to fetch\n :type product: str\n :return: product tuple\n :rtype: (header, data)\n \"\"\"", "if", "\"halpha\"", "in", "product", ":", "result", "=", "self", ".", "fetch_halpha", "(", "median_kernel", "=", "median_kernel", ")", "elif", "\"aia\"", "in", "product", ":", "result", "=", "self", ".", "fetch_aia", "(", "product", ",", "median_kernel", "=", "median_kernel", ")", "elif", "\"l1b\"", "in", "product", ":", "result", "=", "self", ".", "fetch_suvi_l1b", "(", "product", ",", "median_kernel", "=", "median_kernel", ")", "elif", "\"l2-ci\"", "in", "product", ":", "result", "=", "self", ".", "fetch_suvi_composite", "(", "product", ",", "median_kernel", "=", "median_kernel", ")", "elif", "\"limb\"", "in", "product", ":", "result", "=", "self", ".", "fetch_limb", "(", "solar_diam", ")", "else", ":", "raise", "ValueError", "(", "\"{} is not a valid product.\"", ".", "format", "(", "product", ")", ")", "return", "result", "if", "multithread", ":", "pool", "=", "ThreadPool", "(", ")", "results", "=", "pool", ".", "map", "(", "func_map", ",", "self", ".", "products", ")", "else", ":", "results", "=", "[", "func_map", "(", "product", ")", "for", "product", "in", "self", ".", "products", "]", "results", "=", "{", "product", ":", "(", "head", ",", "data", ")", "for", "product", ",", "head", ",", "data", "in", "results", "}", "return", "results" ]
For all products in products, will call the correct fetch routine and download an image :param multithread: if true will fetch the files simultaneously :type multithread: bool :param median_kernel: the size of the kernel to smooth by :type median_kernel: int >= 0 :return: a dictionary of all fetched products :rtype: dict from product string to (header, data) tuple
[ "For", "all", "products", "in", "products", "will", "call", "the", "correct", "fetch", "routine", "and", "download", "an", "image", ":", "param", "multithread", ":", "if", "true", "will", "fetch", "the", "files", "simultaneously", ":", "type", "multithread", ":", "bool", ":", "param", "median_kernel", ":", "the", "size", "of", "the", "kernel", "to", "smooth", "by", ":", "type", "median_kernel", ":", "int", ">", "=", "0", ":", "return", ":", "a", "dictionary", "of", "all", "fetched", "products", ":", "rtype", ":", "dict", "from", "product", "string", "to", "(", "header", "data", ")", "tuple" ]
python
train
mongodb/mongo-python-driver
pymongo/command_cursor.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/command_cursor.py#L122-L167
def __send_message(self, operation): """Send a getmore message and handle the response. """ def kill(): self.__killed = True self.__end_session(True) client = self.__collection.database.client try: response = client._run_operation_with_response( operation, self._unpack_response, address=self.__address) except OperationFailure: kill() raise except NotMasterError: # Don't send kill cursors to another server after a "not master" # error. It's completely pointless. kill() raise except ConnectionFailure: # Don't try to send kill cursors on another socket # or to another server. It can cause a _pinValue # assertion on some server releases if we get here # due to a socket timeout. kill() raise except Exception: # Close the cursor self.__die() raise from_command = response.from_command reply = response.data docs = response.docs if from_command: cursor = docs[0]['cursor'] documents = cursor['nextBatch'] self.__id = cursor['id'] else: documents = docs self.__id = reply.cursor_id if self.__id == 0: kill() self.__data = deque(documents)
[ "def", "__send_message", "(", "self", ",", "operation", ")", ":", "def", "kill", "(", ")", ":", "self", ".", "__killed", "=", "True", "self", ".", "__end_session", "(", "True", ")", "client", "=", "self", ".", "__collection", ".", "database", ".", "client", "try", ":", "response", "=", "client", ".", "_run_operation_with_response", "(", "operation", ",", "self", ".", "_unpack_response", ",", "address", "=", "self", ".", "__address", ")", "except", "OperationFailure", ":", "kill", "(", ")", "raise", "except", "NotMasterError", ":", "# Don't send kill cursors to another server after a \"not master\"", "# error. It's completely pointless.", "kill", "(", ")", "raise", "except", "ConnectionFailure", ":", "# Don't try to send kill cursors on another socket", "# or to another server. It can cause a _pinValue", "# assertion on some server releases if we get here", "# due to a socket timeout.", "kill", "(", ")", "raise", "except", "Exception", ":", "# Close the cursor", "self", ".", "__die", "(", ")", "raise", "from_command", "=", "response", ".", "from_command", "reply", "=", "response", ".", "data", "docs", "=", "response", ".", "docs", "if", "from_command", ":", "cursor", "=", "docs", "[", "0", "]", "[", "'cursor'", "]", "documents", "=", "cursor", "[", "'nextBatch'", "]", "self", ".", "__id", "=", "cursor", "[", "'id'", "]", "else", ":", "documents", "=", "docs", "self", ".", "__id", "=", "reply", ".", "cursor_id", "if", "self", ".", "__id", "==", "0", ":", "kill", "(", ")", "self", ".", "__data", "=", "deque", "(", "documents", ")" ]
Send a getmore message and handle the response.
[ "Send", "a", "getmore", "message", "and", "handle", "the", "response", "." ]
python
train
diffeo/rejester
rejester/_task_master.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L1596-L1599
def nice(self, work_spec_name, nice): '''Change the priority of an existing work spec.''' with self.registry.lock(identifier=self.worker_id) as session: session.update(NICE_LEVELS, dict(work_spec_name=nice))
[ "def", "nice", "(", "self", ",", "work_spec_name", ",", "nice", ")", ":", "with", "self", ".", "registry", ".", "lock", "(", "identifier", "=", "self", ".", "worker_id", ")", "as", "session", ":", "session", ".", "update", "(", "NICE_LEVELS", ",", "dict", "(", "work_spec_name", "=", "nice", ")", ")" ]
Change the priority of an existing work spec.
[ "Change", "the", "priority", "of", "an", "existing", "work", "spec", "." ]
python
train
TrafficSenseMSD/SumoTools
traci/_vehicle.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L1161-L1167
def setTau(self, vehID, tau): """setTau(string, double) -> None Sets the driver's tau-parameter (reaction time or anticipation time depending on the car-following model) in s for this vehicle. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_TAU, vehID, tau)
[ "def", "setTau", "(", "self", ",", "vehID", ",", "tau", ")", ":", "self", ".", "_connection", ".", "_sendDoubleCmd", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_TAU", ",", "vehID", ",", "tau", ")" ]
setTau(string, double) -> None Sets the driver's tau-parameter (reaction time or anticipation time depending on the car-following model) in s for this vehicle.
[ "setTau", "(", "string", "double", ")", "-", ">", "None" ]
python
train
JarryShaw/PyPCAPKit
src/interface/__init__.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/interface/__init__.py#L134-L151
def analyse(file, length=None): """Analyse application layer packets. Keyword arguments: * file -- bytes or file-like object, packet to be analysed * length -- int, length of the analysing packet Returns: * Analysis -- an Analysis object from `pcapkit.analyser` """ if isinstance(file, bytes): file = io.BytesIO(file) io_check(file) int_check(length or sys.maxsize) return analyse2(file, length)
[ "def", "analyse", "(", "file", ",", "length", "=", "None", ")", ":", "if", "isinstance", "(", "file", ",", "bytes", ")", ":", "file", "=", "io", ".", "BytesIO", "(", "file", ")", "io_check", "(", "file", ")", "int_check", "(", "length", "or", "sys", ".", "maxsize", ")", "return", "analyse2", "(", "file", ",", "length", ")" ]
Analyse application layer packets. Keyword arguments: * file -- bytes or file-like object, packet to be analysed * length -- int, length of the analysing packet Returns: * Analysis -- an Analysis object from `pcapkit.analyser`
[ "Analyse", "application", "layer", "packets", "." ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L3033-L3063
def nvmlDeviceGetPowerManagementLimit(handle): r""" /** * Retrieves the power management limit associated with this device. * * For Fermi &tm; or newer fully supported devices. * * The power limit defines the upper boundary for the card's power draw. If * the card's total power draw reaches this limit the power management algorithm kicks in. * * This reading is only available if power management mode is supported. * See \ref nvmlDeviceGetPowerManagementMode. * * @param device The identifier of the target device * @param limit Reference in which to return the power management limit in milliwatts * * @return * - \ref NVML_SUCCESS if \a limit has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit """ c_limit = c_uint() fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimit") ret = fn(handle, byref(c_limit)) _nvmlCheckReturn(ret) return bytes_to_str(c_limit.value)
[ "def", "nvmlDeviceGetPowerManagementLimit", "(", "handle", ")", ":", "c_limit", "=", "c_uint", "(", ")", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceGetPowerManagementLimit\"", ")", "ret", "=", "fn", "(", "handle", ",", "byref", "(", "c_limit", ")", ")", "_nvmlCheckReturn", "(", "ret", ")", "return", "bytes_to_str", "(", "c_limit", ".", "value", ")" ]
r""" /** * Retrieves the power management limit associated with this device. * * For Fermi &tm; or newer fully supported devices. * * The power limit defines the upper boundary for the card's power draw. If * the card's total power draw reaches this limit the power management algorithm kicks in. * * This reading is only available if power management mode is supported. * See \ref nvmlDeviceGetPowerManagementMode. * * @param device The identifier of the target device * @param limit Reference in which to return the power management limit in milliwatts * * @return * - \ref NVML_SUCCESS if \a limit has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit
[ "r", "/", "**", "*", "Retrieves", "the", "power", "management", "limit", "associated", "with", "this", "device", ".", "*", "*", "For", "Fermi", "&tm", ";", "or", "newer", "fully", "supported", "devices", ".", "*", "*", "The", "power", "limit", "defines", "the", "upper", "boundary", "for", "the", "card", "s", "power", "draw", ".", "If", "*", "the", "card", "s", "total", "power", "draw", "reaches", "this", "limit", "the", "power", "management", "algorithm", "kicks", "in", ".", "*", "*", "This", "reading", "is", "only", "available", "if", "power", "management", "mode", "is", "supported", ".", "*", "See", "\\", "ref", "nvmlDeviceGetPowerManagementMode", ".", "*", "*" ]
python
train
MisanthropicBit/colorise
colorise/BaseColorManager.py
https://github.com/MisanthropicBit/colorise/blob/e630df74b8b27680a43c370ddbe98766be50158c/colorise/BaseColorManager.py#L34-L36
def set_color(self, fg=None, bg=None, intensify=False, target=sys.stdout): """Set foreground- and background colors and intensity.""" raise NotImplementedError
[ "def", "set_color", "(", "self", ",", "fg", "=", "None", ",", "bg", "=", "None", ",", "intensify", "=", "False", ",", "target", "=", "sys", ".", "stdout", ")", ":", "raise", "NotImplementedError" ]
Set foreground- and background colors and intensity.
[ "Set", "foreground", "-", "and", "background", "colors", "and", "intensity", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/servicegroup.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/servicegroup.py#L260-L285
def explode(self): """ Get services and put them in members container :return: None """ # We do not want a same service group to be exploded again and again # so we tag it for tmp_sg in list(self.items.values()): tmp_sg.already_exploded = False for servicegroup in list(self.items.values()): if servicegroup.already_exploded: continue # get_services_by_explosion is a recursive # function, so we must tag hg so we do not loop for tmp_sg in list(self.items.values()): tmp_sg.rec_tag = False servicegroup.get_services_by_explosion(self) # We clean the tags for tmp_sg in list(self.items.values()): if hasattr(tmp_sg, 'rec_tag'): del tmp_sg.rec_tag del tmp_sg.already_exploded
[ "def", "explode", "(", "self", ")", ":", "# We do not want a same service group to be exploded again and again", "# so we tag it", "for", "tmp_sg", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "tmp_sg", ".", "already_exploded", "=", "False", "for", "servicegroup", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "if", "servicegroup", ".", "already_exploded", ":", "continue", "# get_services_by_explosion is a recursive", "# function, so we must tag hg so we do not loop", "for", "tmp_sg", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "tmp_sg", ".", "rec_tag", "=", "False", "servicegroup", ".", "get_services_by_explosion", "(", "self", ")", "# We clean the tags", "for", "tmp_sg", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "if", "hasattr", "(", "tmp_sg", ",", "'rec_tag'", ")", ":", "del", "tmp_sg", ".", "rec_tag", "del", "tmp_sg", ".", "already_exploded" ]
Get services and put them in members container :return: None
[ "Get", "services", "and", "put", "them", "in", "members", "container" ]
python
train
limix/glimix-core
glimix_core/random/_canonical.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/random/_canonical.py#L110-L144
def poisson_sample( offset, G, heritability=0.5, causal_variants=None, causal_variance=0, random_state=None, ): """Poisson likelihood sampling. Parameters ---------- random_state : random_state Set the initial random state. Example ------- .. doctest:: >>> from glimix_core.random import poisson_sample >>> from numpy.random import RandomState >>> offset = -0.5 >>> G = [[0.5, -1], [2, 1]] >>> poisson_sample(offset, G, random_state=RandomState(0)) array([0, 6]) """ mean, cov = _mean_cov( offset, G, heritability, causal_variants, causal_variance, random_state ) link = LogLink() lik = PoissonProdLik(link) sampler = GGPSampler(lik, mean, cov) return sampler.sample(random_state)
[ "def", "poisson_sample", "(", "offset", ",", "G", ",", "heritability", "=", "0.5", ",", "causal_variants", "=", "None", ",", "causal_variance", "=", "0", ",", "random_state", "=", "None", ",", ")", ":", "mean", ",", "cov", "=", "_mean_cov", "(", "offset", ",", "G", ",", "heritability", ",", "causal_variants", ",", "causal_variance", ",", "random_state", ")", "link", "=", "LogLink", "(", ")", "lik", "=", "PoissonProdLik", "(", "link", ")", "sampler", "=", "GGPSampler", "(", "lik", ",", "mean", ",", "cov", ")", "return", "sampler", ".", "sample", "(", "random_state", ")" ]
Poisson likelihood sampling. Parameters ---------- random_state : random_state Set the initial random state. Example ------- .. doctest:: >>> from glimix_core.random import poisson_sample >>> from numpy.random import RandomState >>> offset = -0.5 >>> G = [[0.5, -1], [2, 1]] >>> poisson_sample(offset, G, random_state=RandomState(0)) array([0, 6])
[ "Poisson", "likelihood", "sampling", "." ]
python
valid
noirbizarre/flask-fs
flask_fs/storage.py
https://github.com/noirbizarre/flask-fs/blob/092e9327384b8411c9bb38ca257ecb558584d201/flask_fs/storage.py#L220-L229
def read(self, filename): ''' Read a file content. :param string filename: The storage root-relative filename :raises FileNotFound: If the file does not exists ''' if not self.backend.exists(filename): raise FileNotFound(filename) return self.backend.read(filename)
[ "def", "read", "(", "self", ",", "filename", ")", ":", "if", "not", "self", ".", "backend", ".", "exists", "(", "filename", ")", ":", "raise", "FileNotFound", "(", "filename", ")", "return", "self", ".", "backend", ".", "read", "(", "filename", ")" ]
Read a file content. :param string filename: The storage root-relative filename :raises FileNotFound: If the file does not exists
[ "Read", "a", "file", "content", "." ]
python
train
IrvKalb/pygwidgets
pygwidgets/pygwidgets.py
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L770-L837
def handleEvent(self, eventObj): """This method should be called every time through the main loop. It handles showing the up, over, and down states of the button. Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the has toggled the checkbox. """ if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) or not self.visible: # The checkBox only cares bout mouse-related events (or no events, if it is invisible) return False if not self.isEnabled: return False clicked = False if (not self.mouseOverButton) and self.rect.collidepoint(eventObj.pos): # if mouse has entered the checkBox: self.mouseOverButton = True elif self.mouseOverButton and (not self.rect.collidepoint(eventObj.pos)): # if mouse has exited the checkBox: self.mouseOverButton = False if self.rect.collidepoint(eventObj.pos): if eventObj.type == MOUSEBUTTONDOWN: self.buttonDown = True self.lastMouseDownOverButton = True else: if eventObj.type in (MOUSEBUTTONUP, MOUSEBUTTONDOWN): # if an up/down happens off the checkBox, then the next up won't cause mouseClick() self.lastMouseDownOverButton = False if eventObj.type == MOUSEBUTTONDOWN: self.mouseIsDown = True # mouse up is handled whether or not it was over the checkBox doMouseClick = False if eventObj.type == MOUSEBUTTONUP: self.mouseIsDown = False if self.lastMouseDownOverButton: doMouseClick = True self.lastMouseDownOverButton = False if self.buttonDown: self.buttonDown = False if doMouseClick: self.buttonDown = False clicked = True if self.playSoundOnClick: self.soundOnClick.play() # switch state: self.value = not self.value return clicked
[ "def", "handleEvent", "(", "self", ",", "eventObj", ")", ":", "if", "eventObj", ".", "type", "not", "in", "(", "MOUSEMOTION", ",", "MOUSEBUTTONUP", ",", "MOUSEBUTTONDOWN", ")", "or", "not", "self", ".", "visible", ":", "# The checkBox only cares bout mouse-related events (or no events, if it is invisible)\r", "return", "False", "if", "not", "self", ".", "isEnabled", ":", "return", "False", "clicked", "=", "False", "if", "(", "not", "self", ".", "mouseOverButton", ")", "and", "self", ".", "rect", ".", "collidepoint", "(", "eventObj", ".", "pos", ")", ":", "# if mouse has entered the checkBox:\r", "self", ".", "mouseOverButton", "=", "True", "elif", "self", ".", "mouseOverButton", "and", "(", "not", "self", ".", "rect", ".", "collidepoint", "(", "eventObj", ".", "pos", ")", ")", ":", "# if mouse has exited the checkBox:\r", "self", ".", "mouseOverButton", "=", "False", "if", "self", ".", "rect", ".", "collidepoint", "(", "eventObj", ".", "pos", ")", ":", "if", "eventObj", ".", "type", "==", "MOUSEBUTTONDOWN", ":", "self", ".", "buttonDown", "=", "True", "self", ".", "lastMouseDownOverButton", "=", "True", "else", ":", "if", "eventObj", ".", "type", "in", "(", "MOUSEBUTTONUP", ",", "MOUSEBUTTONDOWN", ")", ":", "# if an up/down happens off the checkBox, then the next up won't cause mouseClick()\r", "self", ".", "lastMouseDownOverButton", "=", "False", "if", "eventObj", ".", "type", "==", "MOUSEBUTTONDOWN", ":", "self", ".", "mouseIsDown", "=", "True", "# mouse up is handled whether or not it was over the checkBox\r", "doMouseClick", "=", "False", "if", "eventObj", ".", "type", "==", "MOUSEBUTTONUP", ":", "self", ".", "mouseIsDown", "=", "False", "if", "self", ".", "lastMouseDownOverButton", ":", "doMouseClick", "=", "True", "self", ".", "lastMouseDownOverButton", "=", "False", "if", "self", ".", "buttonDown", ":", "self", ".", "buttonDown", "=", "False", "if", "doMouseClick", ":", "self", ".", "buttonDown", "=", "False", "clicked", "=", "True", "if", "self", ".", "playSoundOnClick", ":", "self", ".", "soundOnClick", ".", "play", "(", ")", "# switch state:\r", "self", ".", "value", "=", "not", "self", ".", "value", "return", "clicked" ]
This method should be called every time through the main loop. It handles showing the up, over, and down states of the button. Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the has toggled the checkbox.
[ "This", "method", "should", "be", "called", "every", "time", "through", "the", "main", "loop", ".", "It", "handles", "showing", "the", "up", "over", "and", "down", "states", "of", "the", "button", ".", "Parameters", ":", "|", "eventObj", "-", "the", "event", "object", "obtained", "by", "calling", "pygame", ".", "event", ".", "get", "()", "Returns", ":", "|", "False", "most", "of", "the", "time", "|", "True", "when", "the", "has", "toggled", "the", "checkbox", "." ]
python
train
drewsonne/pyum
pyum/rpm.py
https://github.com/drewsonne/pyum/blob/5d2955f86575c9430ab7104211b3d67bd4c0febe/pyum/rpm.py#L126-L133
def dependencies(self): """ Read the contents of the rpm itself :return: """ cpio = self.rpm.gzip_file.read() content = cpio.read() return []
[ "def", "dependencies", "(", "self", ")", ":", "cpio", "=", "self", ".", "rpm", ".", "gzip_file", ".", "read", "(", ")", "content", "=", "cpio", ".", "read", "(", ")", "return", "[", "]" ]
Read the contents of the rpm itself :return:
[ "Read", "the", "contents", "of", "the", "rpm", "itself", ":", "return", ":" ]
python
test
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L757-L783
def num_lineages_at(self, distance): '''Returns the number of lineages of this ``Tree`` that exist ``distance`` away from the root Args: ``distance`` (``float``): The distance away from the root Returns: ``int``: The number of lineages that exist ``distance`` away from the root ''' if not isinstance(distance, float) and not isinstance(distance, int): raise TypeError("distance must be an int or a float") if distance < 0: raise RuntimeError("distance cannot be negative") d = dict(); q = deque(); q.append(self.root); count = 0 while len(q) != 0: node = q.popleft() if node.is_root(): d[node] = 0 else: d[node] = d[node.parent] if node.edge_length is not None: d[node] += node.edge_length if d[node] < distance: q.extend(node.children) elif node.parent is None or d[node.parent] < distance: count += 1 return count
[ "def", "num_lineages_at", "(", "self", ",", "distance", ")", ":", "if", "not", "isinstance", "(", "distance", ",", "float", ")", "and", "not", "isinstance", "(", "distance", ",", "int", ")", ":", "raise", "TypeError", "(", "\"distance must be an int or a float\"", ")", "if", "distance", "<", "0", ":", "raise", "RuntimeError", "(", "\"distance cannot be negative\"", ")", "d", "=", "dict", "(", ")", "q", "=", "deque", "(", ")", "q", ".", "append", "(", "self", ".", "root", ")", "count", "=", "0", "while", "len", "(", "q", ")", "!=", "0", ":", "node", "=", "q", ".", "popleft", "(", ")", "if", "node", ".", "is_root", "(", ")", ":", "d", "[", "node", "]", "=", "0", "else", ":", "d", "[", "node", "]", "=", "d", "[", "node", ".", "parent", "]", "if", "node", ".", "edge_length", "is", "not", "None", ":", "d", "[", "node", "]", "+=", "node", ".", "edge_length", "if", "d", "[", "node", "]", "<", "distance", ":", "q", ".", "extend", "(", "node", ".", "children", ")", "elif", "node", ".", "parent", "is", "None", "or", "d", "[", "node", ".", "parent", "]", "<", "distance", ":", "count", "+=", "1", "return", "count" ]
Returns the number of lineages of this ``Tree`` that exist ``distance`` away from the root Args: ``distance`` (``float``): The distance away from the root Returns: ``int``: The number of lineages that exist ``distance`` away from the root
[ "Returns", "the", "number", "of", "lineages", "of", "this", "Tree", "that", "exist", "distance", "away", "from", "the", "root" ]
python
train
Capitains/Nautilus
capitains_nautilus/apis/dts.py
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/apis/dts.py#L41-L53
def r_dts_collection(self, objectId=None): """ DTS Collection Metadata reply for given objectId :param objectId: Collection Identifier :return: JSON Format of DTS Collection """ try: j = self.resolver.getMetadata(objectId=objectId).export(Mimetypes.JSON.DTS.Std) j = jsonify(j) j.status_code = 200 except NautilusError as E: return self.dts_error(error_name=E.__class__.__name__, message=E.__doc__) return j
[ "def", "r_dts_collection", "(", "self", ",", "objectId", "=", "None", ")", ":", "try", ":", "j", "=", "self", ".", "resolver", ".", "getMetadata", "(", "objectId", "=", "objectId", ")", ".", "export", "(", "Mimetypes", ".", "JSON", ".", "DTS", ".", "Std", ")", "j", "=", "jsonify", "(", "j", ")", "j", ".", "status_code", "=", "200", "except", "NautilusError", "as", "E", ":", "return", "self", ".", "dts_error", "(", "error_name", "=", "E", ".", "__class__", ".", "__name__", ",", "message", "=", "E", ".", "__doc__", ")", "return", "j" ]
DTS Collection Metadata reply for given objectId :param objectId: Collection Identifier :return: JSON Format of DTS Collection
[ "DTS", "Collection", "Metadata", "reply", "for", "given", "objectId" ]
python
train
roclark/sportsreference
sportsreference/ncaab/schedule.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/schedule.py#L195-L208
def datetime(self): """ Returns a datetime object to indicate the month, day, year, and time the requested game took place. """ date_string = '%s %s' % (self._date, self._time.upper()) date_string = re.sub(r'/.*', '', date_string) date_string = re.sub(r' ET', '', date_string) date_string += 'M' date_string = re.sub(r'PMM', 'PM', date_string, flags=re.IGNORECASE) date_string = re.sub(r'AMM', 'AM', date_string, flags=re.IGNORECASE) date_string = re.sub(r' PM', 'PM', date_string, flags=re.IGNORECASE) date_string = re.sub(r' AM', 'AM', date_string, flags=re.IGNORECASE) return datetime.strptime(date_string, '%a, %b %d, %Y %I:%M%p')
[ "def", "datetime", "(", "self", ")", ":", "date_string", "=", "'%s %s'", "%", "(", "self", ".", "_date", ",", "self", ".", "_time", ".", "upper", "(", ")", ")", "date_string", "=", "re", ".", "sub", "(", "r'/.*'", ",", "''", ",", "date_string", ")", "date_string", "=", "re", ".", "sub", "(", "r' ET'", ",", "''", ",", "date_string", ")", "date_string", "+=", "'M'", "date_string", "=", "re", ".", "sub", "(", "r'PMM'", ",", "'PM'", ",", "date_string", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "date_string", "=", "re", ".", "sub", "(", "r'AMM'", ",", "'AM'", ",", "date_string", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "date_string", "=", "re", ".", "sub", "(", "r' PM'", ",", "'PM'", ",", "date_string", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "date_string", "=", "re", ".", "sub", "(", "r' AM'", ",", "'AM'", ",", "date_string", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "return", "datetime", ".", "strptime", "(", "date_string", ",", "'%a, %b %d, %Y %I:%M%p'", ")" ]
Returns a datetime object to indicate the month, day, year, and time the requested game took place.
[ "Returns", "a", "datetime", "object", "to", "indicate", "the", "month", "day", "year", "and", "time", "the", "requested", "game", "took", "place", "." ]
python
train
seomoz/qless-py
qless/queue.py
https://github.com/seomoz/qless-py/blob/3eda4ffcd4c0016c9a7e44f780d6155e1a354dda/qless/queue.py#L131-L139
def pop(self, count=None): '''Passing in the queue from which to pull items, the current time, when the locks for these returned items should expire, and the number of items to be popped off.''' results = [Job(self.client, **job) for job in json.loads( self.client('pop', self.name, self.worker_name, count or 1))] if count is None: return (len(results) and results[0]) or None return results
[ "def", "pop", "(", "self", ",", "count", "=", "None", ")", ":", "results", "=", "[", "Job", "(", "self", ".", "client", ",", "*", "*", "job", ")", "for", "job", "in", "json", ".", "loads", "(", "self", ".", "client", "(", "'pop'", ",", "self", ".", "name", ",", "self", ".", "worker_name", ",", "count", "or", "1", ")", ")", "]", "if", "count", "is", "None", ":", "return", "(", "len", "(", "results", ")", "and", "results", "[", "0", "]", ")", "or", "None", "return", "results" ]
Passing in the queue from which to pull items, the current time, when the locks for these returned items should expire, and the number of items to be popped off.
[ "Passing", "in", "the", "queue", "from", "which", "to", "pull", "items", "the", "current", "time", "when", "the", "locks", "for", "these", "returned", "items", "should", "expire", "and", "the", "number", "of", "items", "to", "be", "popped", "off", "." ]
python
train
crs4/hl7apy
hl7apy/parser.py
https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/parser.py#L38-L100
def parse_message(message, validation_level=None, find_groups=True, message_profile=None, report_file=None, force_validation=False): """ Parse the given ER7-encoded message and return an instance of :class:`Message <hl7apy.core.Message>`. :type message: ``str`` :param message: the ER7-encoded message to be parsed :type validation_level: ``int`` :param validation_level: the validation level. Possible values are those defined in :class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`) :type find_groups: ``bool`` :param find_groups: if ``True``, automatically assign the segments found to the appropriate :class:`Groups <hl7apy.core.Group>` instances. If ``False``, the segments found are assigned as children of the :class:`Message <hl7apy.core.Message>` instance :type force_validation: ``bool`` :type force_validation: if ``True``, automatically forces the message validation after the end of the parsing :return: an instance of :class:`Message <hl7apy.core.Message>` >>> message = "MSH|^~\&|GHH_ADT||||20080115153000||OML^O33^OML_O33|0123456789|P|2.5||||AL\\rPID|1||" \ "566-554-3423^^^GHH^MR||EVERYMAN^ADAM^A|||M|||2222 HOME STREET^^ANN ARBOR^MI^^USA||555-555-2004|||M\\r" >>> m = parse_message(message) >>> print(m) <Message OML_O33> >>> print(m.msh.sending_application.to_er7()) GHH_ADT >>> print(m.children) [<Segment MSH>, <Group OML_O33_PATIENT>] """ message = message.lstrip() encoding_chars, message_structure, version = get_message_info(message) validation_level = _get_validation_level(validation_level) try: reference = message_profile[message_structure] if message_profile else None except KeyError: raise MessageProfileNotFound() try: m = Message(name=message_structure, reference=reference, version=version, validation_level=validation_level, encoding_chars=encoding_chars) except InvalidName: m = Message(version=version, validation_level=validation_level, encoding_chars=encoding_chars) try: children = parse_segments(message, m.version, encoding_chars, validation_level, m.reference, find_groups) except AttributeError: # m.reference can raise i children = parse_segments(message, m.version, encoding_chars, validation_level, find_groups=False) m.children = children if force_validation: if message_profile is None: Validator.validate(m, report_file=report_file) else: Validator.validate(m, message_profile[message_structure], report_file=report_file) return m
[ "def", "parse_message", "(", "message", ",", "validation_level", "=", "None", ",", "find_groups", "=", "True", ",", "message_profile", "=", "None", ",", "report_file", "=", "None", ",", "force_validation", "=", "False", ")", ":", "message", "=", "message", ".", "lstrip", "(", ")", "encoding_chars", ",", "message_structure", ",", "version", "=", "get_message_info", "(", "message", ")", "validation_level", "=", "_get_validation_level", "(", "validation_level", ")", "try", ":", "reference", "=", "message_profile", "[", "message_structure", "]", "if", "message_profile", "else", "None", "except", "KeyError", ":", "raise", "MessageProfileNotFound", "(", ")", "try", ":", "m", "=", "Message", "(", "name", "=", "message_structure", ",", "reference", "=", "reference", ",", "version", "=", "version", ",", "validation_level", "=", "validation_level", ",", "encoding_chars", "=", "encoding_chars", ")", "except", "InvalidName", ":", "m", "=", "Message", "(", "version", "=", "version", ",", "validation_level", "=", "validation_level", ",", "encoding_chars", "=", "encoding_chars", ")", "try", ":", "children", "=", "parse_segments", "(", "message", ",", "m", ".", "version", ",", "encoding_chars", ",", "validation_level", ",", "m", ".", "reference", ",", "find_groups", ")", "except", "AttributeError", ":", "# m.reference can raise i", "children", "=", "parse_segments", "(", "message", ",", "m", ".", "version", ",", "encoding_chars", ",", "validation_level", ",", "find_groups", "=", "False", ")", "m", ".", "children", "=", "children", "if", "force_validation", ":", "if", "message_profile", "is", "None", ":", "Validator", ".", "validate", "(", "m", ",", "report_file", "=", "report_file", ")", "else", ":", "Validator", ".", "validate", "(", "m", ",", "message_profile", "[", "message_structure", "]", ",", "report_file", "=", "report_file", ")", "return", "m" ]
Parse the given ER7-encoded message and return an instance of :class:`Message <hl7apy.core.Message>`. :type message: ``str`` :param message: the ER7-encoded message to be parsed :type validation_level: ``int`` :param validation_level: the validation level. Possible values are those defined in :class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`) :type find_groups: ``bool`` :param find_groups: if ``True``, automatically assign the segments found to the appropriate :class:`Groups <hl7apy.core.Group>` instances. If ``False``, the segments found are assigned as children of the :class:`Message <hl7apy.core.Message>` instance :type force_validation: ``bool`` :type force_validation: if ``True``, automatically forces the message validation after the end of the parsing :return: an instance of :class:`Message <hl7apy.core.Message>` >>> message = "MSH|^~\&|GHH_ADT||||20080115153000||OML^O33^OML_O33|0123456789|P|2.5||||AL\\rPID|1||" \ "566-554-3423^^^GHH^MR||EVERYMAN^ADAM^A|||M|||2222 HOME STREET^^ANN ARBOR^MI^^USA||555-555-2004|||M\\r" >>> m = parse_message(message) >>> print(m) <Message OML_O33> >>> print(m.msh.sending_application.to_er7()) GHH_ADT >>> print(m.children) [<Segment MSH>, <Group OML_O33_PATIENT>]
[ "Parse", "the", "given", "ER7", "-", "encoded", "message", "and", "return", "an", "instance", "of", ":", "class", ":", "Message", "<hl7apy", ".", "core", ".", "Message", ">", "." ]
python
train
urinieto/msaf
msaf/base.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L351-L355
def _compute_framesync_times(self): """Computes the framesync times based on the framesync features.""" self._framesync_times = librosa.core.frames_to_time( np.arange(self._framesync_features.shape[0]), self.sr, self.hop_length)
[ "def", "_compute_framesync_times", "(", "self", ")", ":", "self", ".", "_framesync_times", "=", "librosa", ".", "core", ".", "frames_to_time", "(", "np", ".", "arange", "(", "self", ".", "_framesync_features", ".", "shape", "[", "0", "]", ")", ",", "self", ".", "sr", ",", "self", ".", "hop_length", ")" ]
Computes the framesync times based on the framesync features.
[ "Computes", "the", "framesync", "times", "based", "on", "the", "framesync", "features", "." ]
python
test
briandilley/ebs-deploy
ebs_deploy/commands/swap_urls_command.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/commands/swap_urls_command.py#L13-L26
def execute(helper, config, args): """ Swaps old and new URLs. If old_environment was active, new_environment will become the active environment """ old_env_name = args.old_environment new_env_name = args.new_environment # swap C-Names out("Assuming that {} is the currently active environment...".format(old_env_name)) out("Swapping environment cnames: {} will become active, {} will become inactive.".format(new_env_name, old_env_name)) helper.swap_environment_cnames(old_env_name, new_env_name) helper.wait_for_environments([old_env_name, new_env_name], status='Ready', include_deleted=False)
[ "def", "execute", "(", "helper", ",", "config", ",", "args", ")", ":", "old_env_name", "=", "args", ".", "old_environment", "new_env_name", "=", "args", ".", "new_environment", "# swap C-Names", "out", "(", "\"Assuming that {} is the currently active environment...\"", ".", "format", "(", "old_env_name", ")", ")", "out", "(", "\"Swapping environment cnames: {} will become active, {} will become inactive.\"", ".", "format", "(", "new_env_name", ",", "old_env_name", ")", ")", "helper", ".", "swap_environment_cnames", "(", "old_env_name", ",", "new_env_name", ")", "helper", ".", "wait_for_environments", "(", "[", "old_env_name", ",", "new_env_name", "]", ",", "status", "=", "'Ready'", ",", "include_deleted", "=", "False", ")" ]
Swaps old and new URLs. If old_environment was active, new_environment will become the active environment
[ "Swaps", "old", "and", "new", "URLs", ".", "If", "old_environment", "was", "active", "new_environment", "will", "become", "the", "active", "environment" ]
python
valid
hollenstein/maspy
maspy/featuregrouping.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/featuregrouping.py#L534-L632
def lfqFeatureGrouping(fiContainer, timeLimit=40, massLimit=10*1e-6, eucLimit=None, timeKey='rt', massKey='mz', massScalingFactor=None, categoryKey='specfile', charges=None, matchArraySelector=None, specfiles=None): """ #TODO: docstring :param fiContainer: #TODO: docstring :param timeLimit: #TODO: docstring :param massLimit: #TODO: docstring :param eucLimit: #TODO: docstring :param timeKey: #TODO: docstring :param massKey: #TODO: docstring :param massScalingFactor: #TODO: docstring :param categoryKey: #TODO: docstring :param charges: #TODO: docstring :param matchArraySelector: #TODO: docstring :param specfiles: limit grouping to these specfiles :returns: #TODO docstring, :class:`FgiContainer` """ # --- perform the whole feature grouping process --- # targetChargeStates = range(1, 6) if charges is None else charges if matchArraySelector is None: matchArraySelector = lambda arr: numpy.any(arr['isAnnotated']) if massScalingFactor is None: massScalingFactor = timeLimit / massLimit if eucLimit is None: eucLimit = timeLimit if specfiles is None: specfiles = sorted(viewkeys(fiContainer.info)) #'massToleranceMode': 'relative' #'timeToleranceMode': 'absolute' fgiContainer = FgiContainer(specfiles) logMassLimit = log2RelativeMassLimit(massLimit) logMassKey = 'logMass' logToleranceFactor = massLimit / log2RelativeMassLimit(massLimit) logMassScalingFactor = massScalingFactor * logToleranceFactor """ Note: because "a" is similar to "b" a = (1- 400 / 400.001) * massScalingFactor b = (numpy.log2(400.001) - numpy.log2(400)) * logMassScalingFactor """ fiArrayKeys = [massKey, timeKey, 'isAnnotated', 'isMatched'] for _charge in targetChargeStates: # - Prepare feature arrays - # fiSelector = lambda fi: fi.charge == _charge and fi.isValid fiArrays = fiContainer.getArrays(fiArrayKeys, specfiles, sort=massKey, selector=fiSelector) fiArrays['logMass'] = numpy.log2(fiArrays[massKey]) if listvalues(fiArrays)[0].size == 0: continue # - group features which are in close mass and time proximity - # continuousGroups = massTimeContinuityGroups(fiArrays, logMassKey, timeKey, logMassLimit, timeLimit ) # - perform proximity grouping - # matchArrayKeys = list(viewkeys(fiArrays)) for groupId in range(len(continuousGroups)): #Grab the arrays of the current feature continuity group groupPositions = continuousGroups[groupId] matchArr = getContGroupArrays(fiArrays, groupPositions, matchArrayKeys ) if not matchArraySelector(matchArr): continue #Calculate a sorted list of all euclidean feature distances matchArr['mNorm'] = matchArr[logMassKey] * logMassScalingFactor distInfo = calcDistMatchArr(matchArr, timeKey, 'mNorm') #Group fi according to their proximity linkageGroups = proximityGrouping(matchArr, distInfo, eucLimit, categoryKey ) #Generate feature groups from the linked features fgiIds = generateFeatureGroups(fgiContainer, linkageGroups, matchArr, timeKey, massKey, logMassKey, logMassScalingFactor ) #Set charge manually for fgiId in fgiIds: fgiContainer.container[fgiId].charge = _charge #Mark overlapping groups as not valid (fgi.isValid = False) fgiDoOverlap = findFgiOverlaps(fgiContainer, fgiIds) #Add feature intensities to the feature groups fgiContainer.updateIntensities(fiContainer) return fgiContainer
[ "def", "lfqFeatureGrouping", "(", "fiContainer", ",", "timeLimit", "=", "40", ",", "massLimit", "=", "10", "*", "1e-6", ",", "eucLimit", "=", "None", ",", "timeKey", "=", "'rt'", ",", "massKey", "=", "'mz'", ",", "massScalingFactor", "=", "None", ",", "categoryKey", "=", "'specfile'", ",", "charges", "=", "None", ",", "matchArraySelector", "=", "None", ",", "specfiles", "=", "None", ")", ":", "# --- perform the whole feature grouping process --- #", "targetChargeStates", "=", "range", "(", "1", ",", "6", ")", "if", "charges", "is", "None", "else", "charges", "if", "matchArraySelector", "is", "None", ":", "matchArraySelector", "=", "lambda", "arr", ":", "numpy", ".", "any", "(", "arr", "[", "'isAnnotated'", "]", ")", "if", "massScalingFactor", "is", "None", ":", "massScalingFactor", "=", "timeLimit", "/", "massLimit", "if", "eucLimit", "is", "None", ":", "eucLimit", "=", "timeLimit", "if", "specfiles", "is", "None", ":", "specfiles", "=", "sorted", "(", "viewkeys", "(", "fiContainer", ".", "info", ")", ")", "#'massToleranceMode': 'relative'", "#'timeToleranceMode': 'absolute'", "fgiContainer", "=", "FgiContainer", "(", "specfiles", ")", "logMassLimit", "=", "log2RelativeMassLimit", "(", "massLimit", ")", "logMassKey", "=", "'logMass'", "logToleranceFactor", "=", "massLimit", "/", "log2RelativeMassLimit", "(", "massLimit", ")", "logMassScalingFactor", "=", "massScalingFactor", "*", "logToleranceFactor", "\"\"\" Note: because \"a\" is similar to \"b\"\n a = (1- 400 / 400.001) * massScalingFactor\n b = (numpy.log2(400.001) - numpy.log2(400)) * logMassScalingFactor\n \"\"\"", "fiArrayKeys", "=", "[", "massKey", ",", "timeKey", ",", "'isAnnotated'", ",", "'isMatched'", "]", "for", "_charge", "in", "targetChargeStates", ":", "# - Prepare feature arrays - #", "fiSelector", "=", "lambda", "fi", ":", "fi", ".", "charge", "==", "_charge", "and", "fi", ".", "isValid", "fiArrays", "=", "fiContainer", ".", "getArrays", "(", "fiArrayKeys", ",", "specfiles", ",", "sort", "=", "massKey", ",", "selector", "=", "fiSelector", ")", "fiArrays", "[", "'logMass'", "]", "=", "numpy", ".", "log2", "(", "fiArrays", "[", "massKey", "]", ")", "if", "listvalues", "(", "fiArrays", ")", "[", "0", "]", ".", "size", "==", "0", ":", "continue", "# - group features which are in close mass and time proximity - #", "continuousGroups", "=", "massTimeContinuityGroups", "(", "fiArrays", ",", "logMassKey", ",", "timeKey", ",", "logMassLimit", ",", "timeLimit", ")", "# - perform proximity grouping - #", "matchArrayKeys", "=", "list", "(", "viewkeys", "(", "fiArrays", ")", ")", "for", "groupId", "in", "range", "(", "len", "(", "continuousGroups", ")", ")", ":", "#Grab the arrays of the current feature continuity group", "groupPositions", "=", "continuousGroups", "[", "groupId", "]", "matchArr", "=", "getContGroupArrays", "(", "fiArrays", ",", "groupPositions", ",", "matchArrayKeys", ")", "if", "not", "matchArraySelector", "(", "matchArr", ")", ":", "continue", "#Calculate a sorted list of all euclidean feature distances", "matchArr", "[", "'mNorm'", "]", "=", "matchArr", "[", "logMassKey", "]", "*", "logMassScalingFactor", "distInfo", "=", "calcDistMatchArr", "(", "matchArr", ",", "timeKey", ",", "'mNorm'", ")", "#Group fi according to their proximity", "linkageGroups", "=", "proximityGrouping", "(", "matchArr", ",", "distInfo", ",", "eucLimit", ",", "categoryKey", ")", "#Generate feature groups from the linked features", "fgiIds", "=", "generateFeatureGroups", "(", "fgiContainer", ",", "linkageGroups", ",", "matchArr", ",", "timeKey", ",", "massKey", ",", "logMassKey", ",", "logMassScalingFactor", ")", "#Set charge manually", "for", "fgiId", "in", "fgiIds", ":", "fgiContainer", ".", "container", "[", "fgiId", "]", ".", "charge", "=", "_charge", "#Mark overlapping groups as not valid (fgi.isValid = False)", "fgiDoOverlap", "=", "findFgiOverlaps", "(", "fgiContainer", ",", "fgiIds", ")", "#Add feature intensities to the feature groups", "fgiContainer", ".", "updateIntensities", "(", "fiContainer", ")", "return", "fgiContainer" ]
#TODO: docstring :param fiContainer: #TODO: docstring :param timeLimit: #TODO: docstring :param massLimit: #TODO: docstring :param eucLimit: #TODO: docstring :param timeKey: #TODO: docstring :param massKey: #TODO: docstring :param massScalingFactor: #TODO: docstring :param categoryKey: #TODO: docstring :param charges: #TODO: docstring :param matchArraySelector: #TODO: docstring :param specfiles: limit grouping to these specfiles :returns: #TODO docstring, :class:`FgiContainer`
[ "#TODO", ":", "docstring" ]
python
train
Falkonry/falkonry-python-client
falkonryclient/service/http.py
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/http.py#L157-L195
def fpost(self, url, form_data): """ To make a form-data POST request to Falkonry API server :param url: string :param form_data: form-data """ response = None if 'files' in form_data: response = requests.post( self.host + url, data=form_data['data'] if 'data' in form_data else {}, files=form_data['files'] if 'files' in form_data else {}, headers={ 'Authorization': 'Bearer ' + self.token, 'x-falkonry-source':self.sourceHeader }, verify=False ) else: response = requests.post( self.host + url, data=json.dumps(form_data['data'] if 'data' in form_data else {}), headers={ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'x-falkonry-source':self.sourceHeader }, verify=False ) if response.status_code == 201 or response.status_code == 202: try: return json.loads(response._content.decode('utf-8')) except Exception as e: return json.loads(response.content) elif response.status_code == 401: raise Exception(json.dumps({'message':'Unauthorized Access'})) else: raise Exception(response.content)
[ "def", "fpost", "(", "self", ",", "url", ",", "form_data", ")", ":", "response", "=", "None", "if", "'files'", "in", "form_data", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "host", "+", "url", ",", "data", "=", "form_data", "[", "'data'", "]", "if", "'data'", "in", "form_data", "else", "{", "}", ",", "files", "=", "form_data", "[", "'files'", "]", "if", "'files'", "in", "form_data", "else", "{", "}", ",", "headers", "=", "{", "'Authorization'", ":", "'Bearer '", "+", "self", ".", "token", ",", "'x-falkonry-source'", ":", "self", ".", "sourceHeader", "}", ",", "verify", "=", "False", ")", "else", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "host", "+", "url", ",", "data", "=", "json", ".", "dumps", "(", "form_data", "[", "'data'", "]", "if", "'data'", "in", "form_data", "else", "{", "}", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Authorization'", ":", "'Bearer '", "+", "self", ".", "token", ",", "'x-falkonry-source'", ":", "self", ".", "sourceHeader", "}", ",", "verify", "=", "False", ")", "if", "response", ".", "status_code", "==", "201", "or", "response", ".", "status_code", "==", "202", ":", "try", ":", "return", "json", ".", "loads", "(", "response", ".", "_content", ".", "decode", "(", "'utf-8'", ")", ")", "except", "Exception", "as", "e", ":", "return", "json", ".", "loads", "(", "response", ".", "content", ")", "elif", "response", ".", "status_code", "==", "401", ":", "raise", "Exception", "(", "json", ".", "dumps", "(", "{", "'message'", ":", "'Unauthorized Access'", "}", ")", ")", "else", ":", "raise", "Exception", "(", "response", ".", "content", ")" ]
To make a form-data POST request to Falkonry API server :param url: string :param form_data: form-data
[ "To", "make", "a", "form", "-", "data", "POST", "request", "to", "Falkonry", "API", "server", ":", "param", "url", ":", "string", ":", "param", "form_data", ":", "form", "-", "data" ]
python
train
sassoo/goldman
goldman/serializers/jsonapi_error.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/serializers/jsonapi_error.py#L87-L103
def get_status(self): """ Return a HTTPStatus compliant status attribute Per the JSON API spec errors could have different status codes & a generic one should be chosen in these conditions for the actual HTTP response code. """ codes = [error['status'] for error in self.errors] same = all(code == codes[0] for code in codes) if not same and codes[0].startswith('4'): return falcon.HTTP_400 elif not same and codes[0].startswith('5'): return falcon.HTTP_500 else: return codes[0]
[ "def", "get_status", "(", "self", ")", ":", "codes", "=", "[", "error", "[", "'status'", "]", "for", "error", "in", "self", ".", "errors", "]", "same", "=", "all", "(", "code", "==", "codes", "[", "0", "]", "for", "code", "in", "codes", ")", "if", "not", "same", "and", "codes", "[", "0", "]", ".", "startswith", "(", "'4'", ")", ":", "return", "falcon", ".", "HTTP_400", "elif", "not", "same", "and", "codes", "[", "0", "]", ".", "startswith", "(", "'5'", ")", ":", "return", "falcon", ".", "HTTP_500", "else", ":", "return", "codes", "[", "0", "]" ]
Return a HTTPStatus compliant status attribute Per the JSON API spec errors could have different status codes & a generic one should be chosen in these conditions for the actual HTTP response code.
[ "Return", "a", "HTTPStatus", "compliant", "status", "attribute" ]
python
train
saulpw/visidata
visidata/vdtui.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L175-L181
def _get(self, k, obj=None): 'Return Option object for k in context of obj. Cache result until any set().' opt = self._cache.get((k, obj), None) if opt is None: opt = self._opts._get(k, obj) self._cache[(k, obj or vd.sheet)] = opt return opt
[ "def", "_get", "(", "self", ",", "k", ",", "obj", "=", "None", ")", ":", "opt", "=", "self", ".", "_cache", ".", "get", "(", "(", "k", ",", "obj", ")", ",", "None", ")", "if", "opt", "is", "None", ":", "opt", "=", "self", ".", "_opts", ".", "_get", "(", "k", ",", "obj", ")", "self", ".", "_cache", "[", "(", "k", ",", "obj", "or", "vd", ".", "sheet", ")", "]", "=", "opt", "return", "opt" ]
Return Option object for k in context of obj. Cache result until any set().
[ "Return", "Option", "object", "for", "k", "in", "context", "of", "obj", ".", "Cache", "result", "until", "any", "set", "()", "." ]
python
train
openeemeter/eemeter
eemeter/caltrack/usage_per_day.py
https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L332-L345
def json(self): """ Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`. """ return { "model_type": self.model_type, "formula": self.formula, "status": self.status, "model_params": self.model_params, "r_squared_adj": _noneify(self.r_squared_adj), "warnings": [w.json() for w in self.warnings], }
[ "def", "json", "(", "self", ")", ":", "return", "{", "\"model_type\"", ":", "self", ".", "model_type", ",", "\"formula\"", ":", "self", ".", "formula", ",", "\"status\"", ":", "self", ".", "status", ",", "\"model_params\"", ":", "self", ".", "model_params", ",", "\"r_squared_adj\"", ":", "_noneify", "(", "self", ".", "r_squared_adj", ")", ",", "\"warnings\"", ":", "[", "w", ".", "json", "(", ")", "for", "w", "in", "self", ".", "warnings", "]", ",", "}" ]
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
[ "Return", "a", "JSON", "-", "serializable", "representation", "of", "this", "result", "." ]
python
train
rduplain/jeni-python
jeni.py
https://github.com/rduplain/jeni-python/blob/feca12ce5e4f0438ae5d7bec59d61826063594f1/jeni.py#L750-L760
def lookup(cls, basenote): """Look up note in registered annotations, walking class tree.""" # Walk method resolution order, which includes current class. for c in cls.mro(): if 'provider_registry' not in vars(c): # class is a mixin, super to base class, or never registered. continue if basenote in c.provider_registry: # note is in the registry. return c.provider_registry[basenote] raise LookupError(repr(basenote))
[ "def", "lookup", "(", "cls", ",", "basenote", ")", ":", "# Walk method resolution order, which includes current class.", "for", "c", "in", "cls", ".", "mro", "(", ")", ":", "if", "'provider_registry'", "not", "in", "vars", "(", "c", ")", ":", "# class is a mixin, super to base class, or never registered.", "continue", "if", "basenote", "in", "c", ".", "provider_registry", ":", "# note is in the registry.", "return", "c", ".", "provider_registry", "[", "basenote", "]", "raise", "LookupError", "(", "repr", "(", "basenote", ")", ")" ]
Look up note in registered annotations, walking class tree.
[ "Look", "up", "note", "in", "registered", "annotations", "walking", "class", "tree", "." ]
python
train
gem/oq-engine
openquake/commonlib/calc.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/calc.py#L268-L291
def to_array(self, ebruptures): """ Convert a list of ebruptures into an array of dtype RuptureRata.dt """ data = [] for ebr in ebruptures: rup = ebr.rupture self.cmaker.add_rup_params(rup) ruptparams = tuple(getattr(rup, param) for param in self.params) point = rup.surface.get_middle_point() multi_lons, multi_lats = rup.surface.get_surface_boundaries() bounds = ','.join('((%s))' % ','.join( '%.5f %.5f' % (lon, lat) for lon, lat in zip(lons, lats)) for lons, lats in zip(multi_lons, multi_lats)) try: rate = ebr.rupture.occurrence_rate except AttributeError: # for nonparametric sources rate = numpy.nan data.append( (ebr.serial, ebr.srcidx, ebr.n_occ, rate, rup.mag, point.x, point.y, point.z, rup.surface.get_strike(), rup.surface.get_dip(), rup.rake, 'MULTIPOLYGON(%s)' % decode(bounds)) + ruptparams) return numpy.array(data, self.dt)
[ "def", "to_array", "(", "self", ",", "ebruptures", ")", ":", "data", "=", "[", "]", "for", "ebr", "in", "ebruptures", ":", "rup", "=", "ebr", ".", "rupture", "self", ".", "cmaker", ".", "add_rup_params", "(", "rup", ")", "ruptparams", "=", "tuple", "(", "getattr", "(", "rup", ",", "param", ")", "for", "param", "in", "self", ".", "params", ")", "point", "=", "rup", ".", "surface", ".", "get_middle_point", "(", ")", "multi_lons", ",", "multi_lats", "=", "rup", ".", "surface", ".", "get_surface_boundaries", "(", ")", "bounds", "=", "','", ".", "join", "(", "'((%s))'", "%", "','", ".", "join", "(", "'%.5f %.5f'", "%", "(", "lon", ",", "lat", ")", "for", "lon", ",", "lat", "in", "zip", "(", "lons", ",", "lats", ")", ")", "for", "lons", ",", "lats", "in", "zip", "(", "multi_lons", ",", "multi_lats", ")", ")", "try", ":", "rate", "=", "ebr", ".", "rupture", ".", "occurrence_rate", "except", "AttributeError", ":", "# for nonparametric sources", "rate", "=", "numpy", ".", "nan", "data", ".", "append", "(", "(", "ebr", ".", "serial", ",", "ebr", ".", "srcidx", ",", "ebr", ".", "n_occ", ",", "rate", ",", "rup", ".", "mag", ",", "point", ".", "x", ",", "point", ".", "y", ",", "point", ".", "z", ",", "rup", ".", "surface", ".", "get_strike", "(", ")", ",", "rup", ".", "surface", ".", "get_dip", "(", ")", ",", "rup", ".", "rake", ",", "'MULTIPOLYGON(%s)'", "%", "decode", "(", "bounds", ")", ")", "+", "ruptparams", ")", "return", "numpy", ".", "array", "(", "data", ",", "self", ".", "dt", ")" ]
Convert a list of ebruptures into an array of dtype RuptureRata.dt
[ "Convert", "a", "list", "of", "ebruptures", "into", "an", "array", "of", "dtype", "RuptureRata", ".", "dt" ]
python
train
HazyResearch/fonduer
src/fonduer/parser/spacy_parser.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L212-L270
def split_sentences(self, text): """ Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return: """ if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") if not self.model.has_pipe("sentencizer"): sentencizer = self.model.create_pipe("sentencizer") # add sentencizer self.model.add_pipe(sentencizer) try: doc = self.model(text, disable=["parser", "tagger", "ner"]) except ValueError: # temporary increase character limit of spacy # 'Probably save' according to spacy, as no parser or NER is used previous_max_length = self.model.max_length self.model.max_length = 100_000_000 self.logger.warning( f"Temporarily increased spacy maximum " f"character limit to {self.model.max_length} to split sentences." ) doc = self.model(text, disable=["parser", "tagger", "ner"]) self.model.max_length = previous_max_length self.logger.warning( f"Spacy maximum " f"character limit set back to {self.model.max_length}." ) doc.is_parsed = True position = 0 for sent in doc.sents: parts = defaultdict(list) text = sent.text for i, token in enumerate(sent): parts["words"].append(str(token)) parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.pos_) parts["ner_tags"].append("") # placeholder for later NLP parsing parts["char_offsets"].append(token.idx) parts["abs_char_offsets"].append(token.idx) parts["dep_parents"].append(0) # placeholder for later NLP parsing parts["dep_labels"].append("") # placeholder for later NLP parsing # make char_offsets relative to start of sentence parts["char_offsets"] = [ p - parts["char_offsets"][0] for p in parts["char_offsets"] ] parts["position"] = position parts["text"] = text position += 1 yield parts
[ "def", "split_sentences", "(", "self", ",", "text", ")", ":", "if", "self", ".", "model", ".", "has_pipe", "(", "\"sentence_boundary_detector\"", ")", ":", "self", ".", "model", ".", "remove_pipe", "(", "name", "=", "\"sentence_boundary_detector\"", ")", "if", "not", "self", ".", "model", ".", "has_pipe", "(", "\"sentencizer\"", ")", ":", "sentencizer", "=", "self", ".", "model", ".", "create_pipe", "(", "\"sentencizer\"", ")", "# add sentencizer", "self", ".", "model", ".", "add_pipe", "(", "sentencizer", ")", "try", ":", "doc", "=", "self", ".", "model", "(", "text", ",", "disable", "=", "[", "\"parser\"", ",", "\"tagger\"", ",", "\"ner\"", "]", ")", "except", "ValueError", ":", "# temporary increase character limit of spacy", "# 'Probably save' according to spacy, as no parser or NER is used", "previous_max_length", "=", "self", ".", "model", ".", "max_length", "self", ".", "model", ".", "max_length", "=", "100_000_000", "self", ".", "logger", ".", "warning", "(", "f\"Temporarily increased spacy maximum \"", "f\"character limit to {self.model.max_length} to split sentences.\"", ")", "doc", "=", "self", ".", "model", "(", "text", ",", "disable", "=", "[", "\"parser\"", ",", "\"tagger\"", ",", "\"ner\"", "]", ")", "self", ".", "model", ".", "max_length", "=", "previous_max_length", "self", ".", "logger", ".", "warning", "(", "f\"Spacy maximum \"", "f\"character limit set back to {self.model.max_length}.\"", ")", "doc", ".", "is_parsed", "=", "True", "position", "=", "0", "for", "sent", "in", "doc", ".", "sents", ":", "parts", "=", "defaultdict", "(", "list", ")", "text", "=", "sent", ".", "text", "for", "i", ",", "token", "in", "enumerate", "(", "sent", ")", ":", "parts", "[", "\"words\"", "]", ".", "append", "(", "str", "(", "token", ")", ")", "parts", "[", "\"lemmas\"", "]", ".", "append", "(", "token", ".", "lemma_", ")", "parts", "[", "\"pos_tags\"", "]", ".", "append", "(", "token", ".", "pos_", ")", "parts", "[", "\"ner_tags\"", "]", ".", "append", "(", "\"\"", ")", "# placeholder for later NLP parsing", "parts", "[", "\"char_offsets\"", "]", ".", "append", "(", "token", ".", "idx", ")", "parts", "[", "\"abs_char_offsets\"", "]", ".", "append", "(", "token", ".", "idx", ")", "parts", "[", "\"dep_parents\"", "]", ".", "append", "(", "0", ")", "# placeholder for later NLP parsing", "parts", "[", "\"dep_labels\"", "]", ".", "append", "(", "\"\"", ")", "# placeholder for later NLP parsing", "# make char_offsets relative to start of sentence", "parts", "[", "\"char_offsets\"", "]", "=", "[", "p", "-", "parts", "[", "\"char_offsets\"", "]", "[", "0", "]", "for", "p", "in", "parts", "[", "\"char_offsets\"", "]", "]", "parts", "[", "\"position\"", "]", "=", "position", "parts", "[", "\"text\"", "]", "=", "text", "position", "+=", "1", "yield", "parts" ]
Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return:
[ "Split", "input", "text", "into", "sentences", "that", "match", "CoreNLP", "s", "default", "format", "but", "are", "not", "yet", "processed", "." ]
python
train
pkgw/pwkit
pwkit/environments/casa/util.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/environments/casa/util.py#L108-L163
def datadir(*subdirs): """Get a path within the CASA data directory. subdirs Extra elements to append to the returned path. This function locates the directory where CASA resource data files (tables of time offsets, calibrator models, etc.) are stored. If called with no arguments, it simply returns that path. If arguments are provided, they are appended to the returned path using :func:`os.path.join`, making it easy to construct the names of specific data files. For instance:: from pwkit.environments.casa import util cal_image_path = util.datadir('nrao', 'VLA', 'CalModels', '3C286_C.im') tb = util.tools.image() tb.open(cal_image_path) """ import os.path data = None if 'CASAPATH' in os.environ: data = os.path.join(os.environ['CASAPATH'].split()[0], 'data') if data is None: # The Conda CASA directory layout: try: import casadef except ImportError: pass else: data = os.path.join(os.path.dirname(casadef.task_directory), 'data') if not os.path.isdir(data): # Sigh, hack for CASA 4.7 + Conda; should be straightened out: dn = os.path.dirname data = os.path.join(dn(dn(dn(casadef.task_directory))), 'lib', 'casa', 'data') if not os.path.isdir(data): data = None if data is None: import casac prevp = None p = os.path.dirname(casac.__file__) while len(p) and p != prevp: data = os.path.join(p, 'data') if os.path.isdir(data): break prevp = p p = os.path.dirname(p) if not os.path.isdir(data): raise RuntimeError('cannot identify CASA data directory') return os.path.join(data, *subdirs)
[ "def", "datadir", "(", "*", "subdirs", ")", ":", "import", "os", ".", "path", "data", "=", "None", "if", "'CASAPATH'", "in", "os", ".", "environ", ":", "data", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'CASAPATH'", "]", ".", "split", "(", ")", "[", "0", "]", ",", "'data'", ")", "if", "data", "is", "None", ":", "# The Conda CASA directory layout:", "try", ":", "import", "casadef", "except", "ImportError", ":", "pass", "else", ":", "data", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "casadef", ".", "task_directory", ")", ",", "'data'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "data", ")", ":", "# Sigh, hack for CASA 4.7 + Conda; should be straightened out:", "dn", "=", "os", ".", "path", ".", "dirname", "data", "=", "os", ".", "path", ".", "join", "(", "dn", "(", "dn", "(", "dn", "(", "casadef", ".", "task_directory", ")", ")", ")", ",", "'lib'", ",", "'casa'", ",", "'data'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "data", ")", ":", "data", "=", "None", "if", "data", "is", "None", ":", "import", "casac", "prevp", "=", "None", "p", "=", "os", ".", "path", ".", "dirname", "(", "casac", ".", "__file__", ")", "while", "len", "(", "p", ")", "and", "p", "!=", "prevp", ":", "data", "=", "os", ".", "path", ".", "join", "(", "p", ",", "'data'", ")", "if", "os", ".", "path", ".", "isdir", "(", "data", ")", ":", "break", "prevp", "=", "p", "p", "=", "os", ".", "path", ".", "dirname", "(", "p", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "data", ")", ":", "raise", "RuntimeError", "(", "'cannot identify CASA data directory'", ")", "return", "os", ".", "path", ".", "join", "(", "data", ",", "*", "subdirs", ")" ]
Get a path within the CASA data directory. subdirs Extra elements to append to the returned path. This function locates the directory where CASA resource data files (tables of time offsets, calibrator models, etc.) are stored. If called with no arguments, it simply returns that path. If arguments are provided, they are appended to the returned path using :func:`os.path.join`, making it easy to construct the names of specific data files. For instance:: from pwkit.environments.casa import util cal_image_path = util.datadir('nrao', 'VLA', 'CalModels', '3C286_C.im') tb = util.tools.image() tb.open(cal_image_path)
[ "Get", "a", "path", "within", "the", "CASA", "data", "directory", "." ]
python
train
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1142-L1148
def metrics_incremental(self, start_time): """ Retrieve TicketMetric incremental :param start_time: time to retrieve events from. """ return self._query_zendesk(self.endpoint.metrics.incremental, 'ticket_metric_events', start_time=start_time)
[ "def", "metrics_incremental", "(", "self", ",", "start_time", ")", ":", "return", "self", ".", "_query_zendesk", "(", "self", ".", "endpoint", ".", "metrics", ".", "incremental", ",", "'ticket_metric_events'", ",", "start_time", "=", "start_time", ")" ]
Retrieve TicketMetric incremental :param start_time: time to retrieve events from.
[ "Retrieve", "TicketMetric", "incremental" ]
python
train
asmodehn/filefinder2
filefinder2/_filefinder2.py
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/_filefinder2.py#L127-L139
def find_module(cls, fullname, path=None): """find the module on sys.path or 'path' based on sys.path_hooks and sys.path_importer_cache. This method is for python2 only """ spec = cls.find_spec(fullname, path) if spec is None: return None elif spec.loader is None and spec.submodule_search_locations: # Here we need to create a namespace loader to handle namespaces since python2 doesn't... return NamespaceLoader2(spec.name, spec.submodule_search_locations) else: return spec.loader
[ "def", "find_module", "(", "cls", ",", "fullname", ",", "path", "=", "None", ")", ":", "spec", "=", "cls", ".", "find_spec", "(", "fullname", ",", "path", ")", "if", "spec", "is", "None", ":", "return", "None", "elif", "spec", ".", "loader", "is", "None", "and", "spec", ".", "submodule_search_locations", ":", "# Here we need to create a namespace loader to handle namespaces since python2 doesn't...", "return", "NamespaceLoader2", "(", "spec", ".", "name", ",", "spec", ".", "submodule_search_locations", ")", "else", ":", "return", "spec", ".", "loader" ]
find the module on sys.path or 'path' based on sys.path_hooks and sys.path_importer_cache. This method is for python2 only
[ "find", "the", "module", "on", "sys", ".", "path", "or", "path", "based", "on", "sys", ".", "path_hooks", "and", "sys", ".", "path_importer_cache", ".", "This", "method", "is", "for", "python2", "only" ]
python
train
mdgoldberg/sportsref
sportsref/nba/players.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/players.py#L142-L157
def _get_stats_table(self, table_id, kind='R', summary=False): """Gets a stats table from the player page; helper function that does the work for per-game, per-100-poss, etc. stats. :table_id: the ID of the HTML table. :kind: specifies regular season, playoffs, or both. One of 'R', 'P', 'B'. Defaults to 'R'. :returns: A DataFrame of stats. """ doc = self.get_main_doc() table_id = 'table#{}{}'.format( 'playoffs_' if kind == 'P' else '', table_id) table = doc(table_id) df = sportsref.utils.parse_table(table, flatten=(not summary), footer=summary) return df
[ "def", "_get_stats_table", "(", "self", ",", "table_id", ",", "kind", "=", "'R'", ",", "summary", "=", "False", ")", ":", "doc", "=", "self", ".", "get_main_doc", "(", ")", "table_id", "=", "'table#{}{}'", ".", "format", "(", "'playoffs_'", "if", "kind", "==", "'P'", "else", "''", ",", "table_id", ")", "table", "=", "doc", "(", "table_id", ")", "df", "=", "sportsref", ".", "utils", ".", "parse_table", "(", "table", ",", "flatten", "=", "(", "not", "summary", ")", ",", "footer", "=", "summary", ")", "return", "df" ]
Gets a stats table from the player page; helper function that does the work for per-game, per-100-poss, etc. stats. :table_id: the ID of the HTML table. :kind: specifies regular season, playoffs, or both. One of 'R', 'P', 'B'. Defaults to 'R'. :returns: A DataFrame of stats.
[ "Gets", "a", "stats", "table", "from", "the", "player", "page", ";", "helper", "function", "that", "does", "the", "work", "for", "per", "-", "game", "per", "-", "100", "-", "poss", "etc", ".", "stats", "." ]
python
test
MuhammedHasan/sklearn_utils
sklearn_utils/noise/noise_preprocessing.py
https://github.com/MuhammedHasan/sklearn_utils/blob/337c3b7a27f4921d12da496f66a2b83ef582b413/sklearn_utils/noise/noise_preprocessing.py#L24-L31
def transform(self, X): ''' :X: numpy ndarray ''' noise = self._noise_func(*self._args, size=X.shape) results = X + noise self.relative_noise_size_ = self.relative_noise_size(X, results) return results
[ "def", "transform", "(", "self", ",", "X", ")", ":", "noise", "=", "self", ".", "_noise_func", "(", "*", "self", ".", "_args", ",", "size", "=", "X", ".", "shape", ")", "results", "=", "X", "+", "noise", "self", ".", "relative_noise_size_", "=", "self", ".", "relative_noise_size", "(", "X", ",", "results", ")", "return", "results" ]
:X: numpy ndarray
[ ":", "X", ":", "numpy", "ndarray" ]
python
test
click-contrib/sphinx-click
sphinx_click/ext.py
https://github.com/click-contrib/sphinx-click/blob/ec76d15697ec80e51486a6e3daa0aec60b04870f/sphinx_click/ext.py#L175-L189
def _format_subcommand(command): """Format a sub-command of a `click.Command` or `click.Group`.""" yield '.. object:: {}'.format(command.name) # click 7.0 stopped setting short_help by default if CLICK_VERSION < (7, 0): short_help = command.short_help else: short_help = command.get_short_help_str() if short_help: yield '' for line in statemachine.string2lines( short_help, tab_width=4, convert_whitespace=True): yield _indent(line)
[ "def", "_format_subcommand", "(", "command", ")", ":", "yield", "'.. object:: {}'", ".", "format", "(", "command", ".", "name", ")", "# click 7.0 stopped setting short_help by default", "if", "CLICK_VERSION", "<", "(", "7", ",", "0", ")", ":", "short_help", "=", "command", ".", "short_help", "else", ":", "short_help", "=", "command", ".", "get_short_help_str", "(", ")", "if", "short_help", ":", "yield", "''", "for", "line", "in", "statemachine", ".", "string2lines", "(", "short_help", ",", "tab_width", "=", "4", ",", "convert_whitespace", "=", "True", ")", ":", "yield", "_indent", "(", "line", ")" ]
Format a sub-command of a `click.Command` or `click.Group`.
[ "Format", "a", "sub", "-", "command", "of", "a", "click", ".", "Command", "or", "click", ".", "Group", "." ]
python
train
SeabornGames/RequestClient
seaborn/request_client/connection_basic.py
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/connection_basic.py#L480-L486
def sub_base_uri(self): """ This will return the sub_base_uri parsed from the base_uri :return: str of the sub_base_uri """ return self._base_uri and \ self._base_uri.split('://')[-1].split('.')[0] \ or self._base_uri
[ "def", "sub_base_uri", "(", "self", ")", ":", "return", "self", ".", "_base_uri", "and", "self", ".", "_base_uri", ".", "split", "(", "'://'", ")", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "or", "self", ".", "_base_uri" ]
This will return the sub_base_uri parsed from the base_uri :return: str of the sub_base_uri
[ "This", "will", "return", "the", "sub_base_uri", "parsed", "from", "the", "base_uri", ":", "return", ":", "str", "of", "the", "sub_base_uri" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L300-L321
def make_agent_from_hparams( agent_type, base_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=() ): """Creates an Agent from hparams.""" def sim_env_kwargs_fn(): return rl.make_simulated_env_kwargs( base_env, loop_hparams, batch_size=planner_hparams.batch_size, model_dir=model_dir ) planner_kwargs = planner_hparams.values() planner_kwargs.pop("batch_size") planner_kwargs.pop("rollout_agent_type") planner_kwargs.pop("env_type") return make_agent( agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn, loop_hparams.frame_stack_size, planner_hparams.rollout_agent_type, inner_batch_size=planner_hparams.batch_size, env_type=planner_hparams.env_type, video_writers=video_writers, **planner_kwargs )
[ "def", "make_agent_from_hparams", "(", "agent_type", ",", "base_env", ",", "stacked_env", ",", "loop_hparams", ",", "policy_hparams", ",", "planner_hparams", ",", "model_dir", ",", "policy_dir", ",", "sampling_temp", ",", "video_writers", "=", "(", ")", ")", ":", "def", "sim_env_kwargs_fn", "(", ")", ":", "return", "rl", ".", "make_simulated_env_kwargs", "(", "base_env", ",", "loop_hparams", ",", "batch_size", "=", "planner_hparams", ".", "batch_size", ",", "model_dir", "=", "model_dir", ")", "planner_kwargs", "=", "planner_hparams", ".", "values", "(", ")", "planner_kwargs", ".", "pop", "(", "\"batch_size\"", ")", "planner_kwargs", ".", "pop", "(", "\"rollout_agent_type\"", ")", "planner_kwargs", ".", "pop", "(", "\"env_type\"", ")", "return", "make_agent", "(", "agent_type", ",", "stacked_env", ",", "policy_hparams", ",", "policy_dir", ",", "sampling_temp", ",", "sim_env_kwargs_fn", ",", "loop_hparams", ".", "frame_stack_size", ",", "planner_hparams", ".", "rollout_agent_type", ",", "inner_batch_size", "=", "planner_hparams", ".", "batch_size", ",", "env_type", "=", "planner_hparams", ".", "env_type", ",", "video_writers", "=", "video_writers", ",", "*", "*", "planner_kwargs", ")" ]
Creates an Agent from hparams.
[ "Creates", "an", "Agent", "from", "hparams", "." ]
python
train
secdev/scapy
scapy/contrib/automotive/someip.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/automotive/someip.py#L179-L188
def _is_tp(pkt): """Returns true if pkt is using SOMEIP-TP, else returns false.""" tp = [SOMEIP.TYPE_TP_REQUEST, SOMEIP.TYPE_TP_REQUEST_NO_RET, SOMEIP.TYPE_TP_NOTIFICATION, SOMEIP.TYPE_TP_RESPONSE, SOMEIP.TYPE_TP_ERROR] if isinstance(pkt, Packet): return pkt.msg_type in tp else: return pkt[15] in tp
[ "def", "_is_tp", "(", "pkt", ")", ":", "tp", "=", "[", "SOMEIP", ".", "TYPE_TP_REQUEST", ",", "SOMEIP", ".", "TYPE_TP_REQUEST_NO_RET", ",", "SOMEIP", ".", "TYPE_TP_NOTIFICATION", ",", "SOMEIP", ".", "TYPE_TP_RESPONSE", ",", "SOMEIP", ".", "TYPE_TP_ERROR", "]", "if", "isinstance", "(", "pkt", ",", "Packet", ")", ":", "return", "pkt", ".", "msg_type", "in", "tp", "else", ":", "return", "pkt", "[", "15", "]", "in", "tp" ]
Returns true if pkt is using SOMEIP-TP, else returns false.
[ "Returns", "true", "if", "pkt", "is", "using", "SOMEIP", "-", "TP", "else", "returns", "false", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/Optimizer/optimizer.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Optimizer/optimizer.py#L145-L195
def optimize(self, n_particles=50, n_iterations=250, restart=1): """ the best result of all optimizations will be returned. total number of lens models sovled: n_particles*n_iterations :param n_particles: number of particle swarm particles :param n_iterations: number of particle swarm iternations :param restart: number of times to execute the optimization; :return: lens model keywords, [optimized source position], best fit image positions """ if restart < 0: raise ValueError("parameter 'restart' must be integer of value > 0") # particle swarm optimization penalties, parameters, src_pen_best = [],[], [] for run in range(0, restart): penalty, params = self._single_optimization(n_particles, n_iterations) penalties.append(penalty) parameters.append(params) src_pen_best.append(self._optimizer.src_pen_best) # select the best optimization best_index = np.argmin(penalties) # combine the optimized parameters with the parameters kept fixed during the optimization to obtain full kwargs_lens kwargs_varied = self._params.argstovary_todictionary(parameters[best_index]) kwargs_lens_final = kwargs_varied + self._params.argsfixed_todictionary() # solve for the optimized image positions srcx, srcy = self._optimizer.lensing._ray_shooting_fast(kwargs_varied) source_x, source_y = np.mean(srcx), np.mean(srcy) # if we have a good enough solution, no point in recomputing the image positions since this can be quite slow # and will give the same answer if src_pen_best[best_index] < self._tol_src_penalty: x_image, y_image = self.x_pos, self.y_pos else: # Here, the solver has the instance of "lensing_class" or "LensModel" for multiplane/singleplane respectively. print('Warning: possibly a bad fit.') x_image, y_image = self.solver.findBrightImage(source_x, source_y, kwargs_lens_final, arrival_time_sort=False) #x_image, y_image = self.solver.image_position_from_source(source_x, source_y, kwargs_lens_final, arrival_time_sort = False) if self._verbose: print('optimization done.') print('Recovered source position: ', (srcx, srcy)) return kwargs_lens_final, [source_x, source_y], [x_image, y_image]
[ "def", "optimize", "(", "self", ",", "n_particles", "=", "50", ",", "n_iterations", "=", "250", ",", "restart", "=", "1", ")", ":", "if", "restart", "<", "0", ":", "raise", "ValueError", "(", "\"parameter 'restart' must be integer of value > 0\"", ")", "# particle swarm optimization", "penalties", ",", "parameters", ",", "src_pen_best", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "run", "in", "range", "(", "0", ",", "restart", ")", ":", "penalty", ",", "params", "=", "self", ".", "_single_optimization", "(", "n_particles", ",", "n_iterations", ")", "penalties", ".", "append", "(", "penalty", ")", "parameters", ".", "append", "(", "params", ")", "src_pen_best", ".", "append", "(", "self", ".", "_optimizer", ".", "src_pen_best", ")", "# select the best optimization", "best_index", "=", "np", ".", "argmin", "(", "penalties", ")", "# combine the optimized parameters with the parameters kept fixed during the optimization to obtain full kwargs_lens", "kwargs_varied", "=", "self", ".", "_params", ".", "argstovary_todictionary", "(", "parameters", "[", "best_index", "]", ")", "kwargs_lens_final", "=", "kwargs_varied", "+", "self", ".", "_params", ".", "argsfixed_todictionary", "(", ")", "# solve for the optimized image positions", "srcx", ",", "srcy", "=", "self", ".", "_optimizer", ".", "lensing", ".", "_ray_shooting_fast", "(", "kwargs_varied", ")", "source_x", ",", "source_y", "=", "np", ".", "mean", "(", "srcx", ")", ",", "np", ".", "mean", "(", "srcy", ")", "# if we have a good enough solution, no point in recomputing the image positions since this can be quite slow", "# and will give the same answer", "if", "src_pen_best", "[", "best_index", "]", "<", "self", ".", "_tol_src_penalty", ":", "x_image", ",", "y_image", "=", "self", ".", "x_pos", ",", "self", ".", "y_pos", "else", ":", "# Here, the solver has the instance of \"lensing_class\" or \"LensModel\" for multiplane/singleplane respectively.", "print", "(", "'Warning: possibly a bad fit.'", ")", "x_image", ",", "y_image", "=", "self", ".", "solver", ".", "findBrightImage", "(", "source_x", ",", "source_y", ",", "kwargs_lens_final", ",", "arrival_time_sort", "=", "False", ")", "#x_image, y_image = self.solver.image_position_from_source(source_x, source_y, kwargs_lens_final, arrival_time_sort = False)", "if", "self", ".", "_verbose", ":", "print", "(", "'optimization done.'", ")", "print", "(", "'Recovered source position: '", ",", "(", "srcx", ",", "srcy", ")", ")", "return", "kwargs_lens_final", ",", "[", "source_x", ",", "source_y", "]", ",", "[", "x_image", ",", "y_image", "]" ]
the best result of all optimizations will be returned. total number of lens models sovled: n_particles*n_iterations :param n_particles: number of particle swarm particles :param n_iterations: number of particle swarm iternations :param restart: number of times to execute the optimization; :return: lens model keywords, [optimized source position], best fit image positions
[ "the", "best", "result", "of", "all", "optimizations", "will", "be", "returned", ".", "total", "number", "of", "lens", "models", "sovled", ":", "n_particles", "*", "n_iterations" ]
python
train
PmagPy/PmagPy
programs/magic_gui2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui2.py#L210-L236
def on_change_dir_button(self, event): """ create change directory frame """ currentDirectory = self.WD #os.getcwd() change_dir_dialog = wx.DirDialog(self.panel, "Choose your working directory to create or edit a MagIC contribution:", defaultPath=currentDirectory, style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR) result = change_dir_dialog.ShowModal() if result == wx.ID_CANCEL: return if result == wx.ID_OK: self.WD = change_dir_dialog.GetPath() self.dir_path.SetValue(self.WD) change_dir_dialog.Destroy() wait = wx.BusyInfo('Initializing data object in new directory, please wait...') wx.SafeYield() print('-I- Initializing magic data object') # make new builder object, but reuse old data_model self.er_magic = builder.ErMagicBuilder(self.WD, self.er_magic.data_model) print('-I- Read in any available data from working directory') self.er_magic.get_all_magic_info() print('-I- Initializing headers') self.er_magic.init_default_headers() self.er_magic.init_actual_headers() del wait
[ "def", "on_change_dir_button", "(", "self", ",", "event", ")", ":", "currentDirectory", "=", "self", ".", "WD", "#os.getcwd()", "change_dir_dialog", "=", "wx", ".", "DirDialog", "(", "self", ".", "panel", ",", "\"Choose your working directory to create or edit a MagIC contribution:\"", ",", "defaultPath", "=", "currentDirectory", ",", "style", "=", "wx", ".", "DD_DEFAULT_STYLE", "|", "wx", ".", "DD_NEW_DIR_BUTTON", "|", "wx", ".", "DD_CHANGE_DIR", ")", "result", "=", "change_dir_dialog", ".", "ShowModal", "(", ")", "if", "result", "==", "wx", ".", "ID_CANCEL", ":", "return", "if", "result", "==", "wx", ".", "ID_OK", ":", "self", ".", "WD", "=", "change_dir_dialog", ".", "GetPath", "(", ")", "self", ".", "dir_path", ".", "SetValue", "(", "self", ".", "WD", ")", "change_dir_dialog", ".", "Destroy", "(", ")", "wait", "=", "wx", ".", "BusyInfo", "(", "'Initializing data object in new directory, please wait...'", ")", "wx", ".", "SafeYield", "(", ")", "print", "(", "'-I- Initializing magic data object'", ")", "# make new builder object, but reuse old data_model", "self", ".", "er_magic", "=", "builder", ".", "ErMagicBuilder", "(", "self", ".", "WD", ",", "self", ".", "er_magic", ".", "data_model", ")", "print", "(", "'-I- Read in any available data from working directory'", ")", "self", ".", "er_magic", ".", "get_all_magic_info", "(", ")", "print", "(", "'-I- Initializing headers'", ")", "self", ".", "er_magic", ".", "init_default_headers", "(", ")", "self", ".", "er_magic", ".", "init_actual_headers", "(", ")", "del", "wait" ]
create change directory frame
[ "create", "change", "directory", "frame" ]
python
train
Kortemme-Lab/klab
klab/general/date_ext.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/date_ext.py#L14-L22
def date_to_long_form_string(dt, locale_ = 'en_US.utf8'): '''dt should be a datetime.date object.''' if locale_: old_locale = locale.getlocale() locale.setlocale(locale.LC_ALL, locale_) v = dt.strftime("%A %B %d %Y") if locale_: locale.setlocale(locale.LC_ALL, old_locale) return v
[ "def", "date_to_long_form_string", "(", "dt", ",", "locale_", "=", "'en_US.utf8'", ")", ":", "if", "locale_", ":", "old_locale", "=", "locale", ".", "getlocale", "(", ")", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "locale_", ")", "v", "=", "dt", ".", "strftime", "(", "\"%A %B %d %Y\"", ")", "if", "locale_", ":", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "old_locale", ")", "return", "v" ]
dt should be a datetime.date object.
[ "dt", "should", "be", "a", "datetime", ".", "date", "object", "." ]
python
train
chemlab/chemlab
chemlab/qc/utils.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/qc/utils.py#L142-L149
def symorth(S): "Symmetric orthogonalization" E,U = np.linalg.eigh(S) n = len(E) Shalf = np.identity(n,'d') for i in range(n): Shalf[i,i] /= np.sqrt(E[i]) return simx(Shalf,U,True)
[ "def", "symorth", "(", "S", ")", ":", "E", ",", "U", "=", "np", ".", "linalg", ".", "eigh", "(", "S", ")", "n", "=", "len", "(", "E", ")", "Shalf", "=", "np", ".", "identity", "(", "n", ",", "'d'", ")", "for", "i", "in", "range", "(", "n", ")", ":", "Shalf", "[", "i", ",", "i", "]", "/=", "np", ".", "sqrt", "(", "E", "[", "i", "]", ")", "return", "simx", "(", "Shalf", ",", "U", ",", "True", ")" ]
Symmetric orthogonalization
[ "Symmetric", "orthogonalization" ]
python
train
PyCQA/pylint
pylint/lint.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/lint.py#L1360-L1392
def preprocess_options(args, search_for): """look for some options (keys of <search_for>) which have to be processed before others values of <search_for> are callback functions to call when the option is found """ i = 0 while i < len(args): arg = args[i] if arg.startswith("--"): try: option, val = arg[2:].split("=", 1) except ValueError: option, val = arg[2:], None try: cb, takearg = search_for[option] except KeyError: i += 1 else: del args[i] if takearg and val is None: if i >= len(args) or args[i].startswith("-"): msg = "Option %s expects a value" % option raise ArgumentPreprocessingError(msg) val = args[i] del args[i] elif not takearg and val is not None: msg = "Option %s doesn't expects a value" % option raise ArgumentPreprocessingError(msg) cb(option, val) else: i += 1
[ "def", "preprocess_options", "(", "args", ",", "search_for", ")", ":", "i", "=", "0", "while", "i", "<", "len", "(", "args", ")", ":", "arg", "=", "args", "[", "i", "]", "if", "arg", ".", "startswith", "(", "\"--\"", ")", ":", "try", ":", "option", ",", "val", "=", "arg", "[", "2", ":", "]", ".", "split", "(", "\"=\"", ",", "1", ")", "except", "ValueError", ":", "option", ",", "val", "=", "arg", "[", "2", ":", "]", ",", "None", "try", ":", "cb", ",", "takearg", "=", "search_for", "[", "option", "]", "except", "KeyError", ":", "i", "+=", "1", "else", ":", "del", "args", "[", "i", "]", "if", "takearg", "and", "val", "is", "None", ":", "if", "i", ">=", "len", "(", "args", ")", "or", "args", "[", "i", "]", ".", "startswith", "(", "\"-\"", ")", ":", "msg", "=", "\"Option %s expects a value\"", "%", "option", "raise", "ArgumentPreprocessingError", "(", "msg", ")", "val", "=", "args", "[", "i", "]", "del", "args", "[", "i", "]", "elif", "not", "takearg", "and", "val", "is", "not", "None", ":", "msg", "=", "\"Option %s doesn't expects a value\"", "%", "option", "raise", "ArgumentPreprocessingError", "(", "msg", ")", "cb", "(", "option", ",", "val", ")", "else", ":", "i", "+=", "1" ]
look for some options (keys of <search_for>) which have to be processed before others values of <search_for> are callback functions to call when the option is found
[ "look", "for", "some", "options", "(", "keys", "of", "<search_for", ">", ")", "which", "have", "to", "be", "processed", "before", "others" ]
python
test
isogeo/isogeo-api-py-minsdk
isogeo_pysdk/checker.py
https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L308-L349
def check_edit_tab(self, tab: str, md_type: str): """Check if asked tab is part of Isogeo web form and reliable with metadata type. :param str tab: tab to check. Must be one one of EDIT_TABS attribute :param str md_type: metadata type. Must be one one of FILTER_TYPES """ # check parameters types if not isinstance(tab, str): raise TypeError("'tab' expected a str value.") else: pass if not isinstance(md_type, str): raise TypeError("'md_type' expected a str value.") else: pass # check parameters values if tab not in EDIT_TABS: raise ValueError( "'{}' isn't a valid edition tab. " "Available values: {}".format(tab, " | ".join(EDIT_TABS)) ) else: pass if md_type not in FILTER_TYPES: if md_type in FILTER_TYPES.values(): md_type = self._convert_md_type(md_type) else: raise ValueError( "'{}' isn't a valid metadata type. " "Available values: {}".format(md_type, " | ".join(FILTER_TYPES)) ) else: pass # check adequation tab/md_type if md_type not in EDIT_TABS.get(tab): raise ValueError( "'{}' isn't a valid tab for a '{}'' metadata." " Only for these types: {}.".format(tab, md_type, EDIT_TABS.get(tab)) ) else: return True
[ "def", "check_edit_tab", "(", "self", ",", "tab", ":", "str", ",", "md_type", ":", "str", ")", ":", "# check parameters types", "if", "not", "isinstance", "(", "tab", ",", "str", ")", ":", "raise", "TypeError", "(", "\"'tab' expected a str value.\"", ")", "else", ":", "pass", "if", "not", "isinstance", "(", "md_type", ",", "str", ")", ":", "raise", "TypeError", "(", "\"'md_type' expected a str value.\"", ")", "else", ":", "pass", "# check parameters values", "if", "tab", "not", "in", "EDIT_TABS", ":", "raise", "ValueError", "(", "\"'{}' isn't a valid edition tab. \"", "\"Available values: {}\"", ".", "format", "(", "tab", ",", "\" | \"", ".", "join", "(", "EDIT_TABS", ")", ")", ")", "else", ":", "pass", "if", "md_type", "not", "in", "FILTER_TYPES", ":", "if", "md_type", "in", "FILTER_TYPES", ".", "values", "(", ")", ":", "md_type", "=", "self", ".", "_convert_md_type", "(", "md_type", ")", "else", ":", "raise", "ValueError", "(", "\"'{}' isn't a valid metadata type. \"", "\"Available values: {}\"", ".", "format", "(", "md_type", ",", "\" | \"", ".", "join", "(", "FILTER_TYPES", ")", ")", ")", "else", ":", "pass", "# check adequation tab/md_type", "if", "md_type", "not", "in", "EDIT_TABS", ".", "get", "(", "tab", ")", ":", "raise", "ValueError", "(", "\"'{}' isn't a valid tab for a '{}'' metadata.\"", "\" Only for these types: {}.\"", ".", "format", "(", "tab", ",", "md_type", ",", "EDIT_TABS", ".", "get", "(", "tab", ")", ")", ")", "else", ":", "return", "True" ]
Check if asked tab is part of Isogeo web form and reliable with metadata type. :param str tab: tab to check. Must be one one of EDIT_TABS attribute :param str md_type: metadata type. Must be one one of FILTER_TYPES
[ "Check", "if", "asked", "tab", "is", "part", "of", "Isogeo", "web", "form", "and", "reliable", "with", "metadata", "type", "." ]
python
train
saltstack/salt
salt/modules/svn.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/svn.py#L76-L130
def info(cwd, targets=None, user=None, username=None, password=None, fmt='str'): ''' Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo ''' opts = list() if fmt == 'xml': opts.append('--xml') if targets: opts += salt.utils.args.shlex_split(targets) infos = _run_svn('info', cwd, user, username, password, opts) if fmt in ('str', 'xml'): return infos info_list = [] for infosplit in infos.split('\n\n'): info_list.append(_INI_RE.findall(infosplit)) if fmt == 'list': return info_list if fmt == 'dict': return [dict(tmp) for tmp in info_list]
[ "def", "info", "(", "cwd", ",", "targets", "=", "None", ",", "user", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "fmt", "=", "'str'", ")", ":", "opts", "=", "list", "(", ")", "if", "fmt", "==", "'xml'", ":", "opts", ".", "append", "(", "'--xml'", ")", "if", "targets", ":", "opts", "+=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "targets", ")", "infos", "=", "_run_svn", "(", "'info'", ",", "cwd", ",", "user", ",", "username", ",", "password", ",", "opts", ")", "if", "fmt", "in", "(", "'str'", ",", "'xml'", ")", ":", "return", "infos", "info_list", "=", "[", "]", "for", "infosplit", "in", "infos", ".", "split", "(", "'\\n\\n'", ")", ":", "info_list", ".", "append", "(", "_INI_RE", ".", "findall", "(", "infosplit", ")", ")", "if", "fmt", "==", "'list'", ":", "return", "info_list", "if", "fmt", "==", "'dict'", ":", "return", "[", "dict", "(", "tmp", ")", "for", "tmp", "in", "info_list", "]" ]
Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo
[ "Display", "the", "Subversion", "information", "from", "the", "checkout", "." ]
python
train
goodmami/penman
penman.py
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L691-L704
def edges(self, source=None, relation=None, target=None): """ Return edges filtered by their *source*, *relation*, or *target*. Edges don't include terminal triples (node types or attributes). """ edgematch = lambda e: ( (source is None or source == e.source) and (relation is None or relation == e.relation) and (target is None or target == e.target) ) variables = self.variables() edges = [t for t in self._triples if t.target in variables] return list(filter(edgematch, edges))
[ "def", "edges", "(", "self", ",", "source", "=", "None", ",", "relation", "=", "None", ",", "target", "=", "None", ")", ":", "edgematch", "=", "lambda", "e", ":", "(", "(", "source", "is", "None", "or", "source", "==", "e", ".", "source", ")", "and", "(", "relation", "is", "None", "or", "relation", "==", "e", ".", "relation", ")", "and", "(", "target", "is", "None", "or", "target", "==", "e", ".", "target", ")", ")", "variables", "=", "self", ".", "variables", "(", ")", "edges", "=", "[", "t", "for", "t", "in", "self", ".", "_triples", "if", "t", ".", "target", "in", "variables", "]", "return", "list", "(", "filter", "(", "edgematch", ",", "edges", ")", ")" ]
Return edges filtered by their *source*, *relation*, or *target*. Edges don't include terminal triples (node types or attributes).
[ "Return", "edges", "filtered", "by", "their", "*", "source", "*", "*", "relation", "*", "or", "*", "target", "*", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L3299-L3318
def _load(self, keyframe=True): """Read all remaining pages from file.""" if self._cached: return pages = self.pages if not pages: return if not self._indexed: self._seek(-1) if not self._cache: return fh = self.parent.filehandle if keyframe is not None: keyframe = self._keyframe for i, page in enumerate(pages): if isinstance(page, inttypes): fh.seek(page) page = self._tiffpage(self.parent, index=i, keyframe=keyframe) pages[i] = page self._cached = True
[ "def", "_load", "(", "self", ",", "keyframe", "=", "True", ")", ":", "if", "self", ".", "_cached", ":", "return", "pages", "=", "self", ".", "pages", "if", "not", "pages", ":", "return", "if", "not", "self", ".", "_indexed", ":", "self", ".", "_seek", "(", "-", "1", ")", "if", "not", "self", ".", "_cache", ":", "return", "fh", "=", "self", ".", "parent", ".", "filehandle", "if", "keyframe", "is", "not", "None", ":", "keyframe", "=", "self", ".", "_keyframe", "for", "i", ",", "page", "in", "enumerate", "(", "pages", ")", ":", "if", "isinstance", "(", "page", ",", "inttypes", ")", ":", "fh", ".", "seek", "(", "page", ")", "page", "=", "self", ".", "_tiffpage", "(", "self", ".", "parent", ",", "index", "=", "i", ",", "keyframe", "=", "keyframe", ")", "pages", "[", "i", "]", "=", "page", "self", ".", "_cached", "=", "True" ]
Read all remaining pages from file.
[ "Read", "all", "remaining", "pages", "from", "file", "." ]
python
train
Cologler/fsoopify-python
fsoopify/nodes.py
https://github.com/Cologler/fsoopify-python/blob/83d45f16ae9abdea4fcc829373c32df501487dda/fsoopify/nodes.py#L305-L309
def has_directory(self, name: str): ''' check whether this directory contains the directory. ''' return os.path.isdir(self._path / name)
[ "def", "has_directory", "(", "self", ",", "name", ":", "str", ")", ":", "return", "os", ".", "path", ".", "isdir", "(", "self", ".", "_path", "/", "name", ")" ]
check whether this directory contains the directory.
[ "check", "whether", "this", "directory", "contains", "the", "directory", "." ]
python
train
saltstack/salt
salt/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L385-L436
def fill_buckets(self): ''' Get the configured backends and the intervals for any backend which supports them, and set up the update "buckets". There will be one bucket for each thing being updated at a given interval. ''' update_intervals = self.fileserver.update_intervals() self.buckets = {} for backend in self.fileserver.backends(): fstr = '{0}.update'.format(backend) try: update_func = self.fileserver.servers[fstr] except KeyError: log.debug( 'No update function for the %s filserver backend', backend ) continue if backend in update_intervals: # Variable intervals are supported for this backend for id_, interval in six.iteritems(update_intervals[backend]): if not interval: # Don't allow an interval of 0 interval = DEFAULT_INTERVAL log.debug( 'An update_interval of 0 is not supported, ' 'falling back to %s', interval ) i_ptr = self.buckets.setdefault(interval, OrderedDict()) # Backend doesn't technically need to be present in the # key, all we *really* need is the function reference, but # having it there makes it easier to provide meaningful # debug logging in the update threads. i_ptr.setdefault((backend, update_func), []).append(id_) else: # Variable intervals are not supported for this backend, so # fall back to the global interval for that fileserver. Since # this backend doesn't support variable updates, we have # nothing to pass to the backend's update func, so we'll just # set the value to None. try: interval_key = '{0}_update_interval'.format(backend) interval = self.opts[interval_key] except KeyError: interval = DEFAULT_INTERVAL log.warning( '%s key missing from configuration. Falling back to ' 'default interval of %d seconds', interval_key, interval ) self.buckets.setdefault( interval, OrderedDict())[(backend, update_func)] = None
[ "def", "fill_buckets", "(", "self", ")", ":", "update_intervals", "=", "self", ".", "fileserver", ".", "update_intervals", "(", ")", "self", ".", "buckets", "=", "{", "}", "for", "backend", "in", "self", ".", "fileserver", ".", "backends", "(", ")", ":", "fstr", "=", "'{0}.update'", ".", "format", "(", "backend", ")", "try", ":", "update_func", "=", "self", ".", "fileserver", ".", "servers", "[", "fstr", "]", "except", "KeyError", ":", "log", ".", "debug", "(", "'No update function for the %s filserver backend'", ",", "backend", ")", "continue", "if", "backend", "in", "update_intervals", ":", "# Variable intervals are supported for this backend", "for", "id_", ",", "interval", "in", "six", ".", "iteritems", "(", "update_intervals", "[", "backend", "]", ")", ":", "if", "not", "interval", ":", "# Don't allow an interval of 0", "interval", "=", "DEFAULT_INTERVAL", "log", ".", "debug", "(", "'An update_interval of 0 is not supported, '", "'falling back to %s'", ",", "interval", ")", "i_ptr", "=", "self", ".", "buckets", ".", "setdefault", "(", "interval", ",", "OrderedDict", "(", ")", ")", "# Backend doesn't technically need to be present in the", "# key, all we *really* need is the function reference, but", "# having it there makes it easier to provide meaningful", "# debug logging in the update threads.", "i_ptr", ".", "setdefault", "(", "(", "backend", ",", "update_func", ")", ",", "[", "]", ")", ".", "append", "(", "id_", ")", "else", ":", "# Variable intervals are not supported for this backend, so", "# fall back to the global interval for that fileserver. Since", "# this backend doesn't support variable updates, we have", "# nothing to pass to the backend's update func, so we'll just", "# set the value to None.", "try", ":", "interval_key", "=", "'{0}_update_interval'", ".", "format", "(", "backend", ")", "interval", "=", "self", ".", "opts", "[", "interval_key", "]", "except", "KeyError", ":", "interval", "=", "DEFAULT_INTERVAL", "log", ".", "warning", "(", "'%s key missing from configuration. Falling back to '", "'default interval of %d seconds'", ",", "interval_key", ",", "interval", ")", "self", ".", "buckets", ".", "setdefault", "(", "interval", ",", "OrderedDict", "(", ")", ")", "[", "(", "backend", ",", "update_func", ")", "]", "=", "None" ]
Get the configured backends and the intervals for any backend which supports them, and set up the update "buckets". There will be one bucket for each thing being updated at a given interval.
[ "Get", "the", "configured", "backends", "and", "the", "intervals", "for", "any", "backend", "which", "supports", "them", "and", "set", "up", "the", "update", "buckets", ".", "There", "will", "be", "one", "bucket", "for", "each", "thing", "being", "updated", "at", "a", "given", "interval", "." ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/utils.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/utils.py#L60-L70
def normrelpath(base, target): """ This function takes the base and target arguments as paths, and returns an equivalent relative path from base to the target, if both provided paths are absolute. """ if not all(map(isabs, [base, target])): return target return relpath(normpath(target), dirname(normpath(base)))
[ "def", "normrelpath", "(", "base", ",", "target", ")", ":", "if", "not", "all", "(", "map", "(", "isabs", ",", "[", "base", ",", "target", "]", ")", ")", ":", "return", "target", "return", "relpath", "(", "normpath", "(", "target", ")", ",", "dirname", "(", "normpath", "(", "base", ")", ")", ")" ]
This function takes the base and target arguments as paths, and returns an equivalent relative path from base to the target, if both provided paths are absolute.
[ "This", "function", "takes", "the", "base", "and", "target", "arguments", "as", "paths", "and", "returns", "an", "equivalent", "relative", "path", "from", "base", "to", "the", "target", "if", "both", "provided", "paths", "are", "absolute", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_utils.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_utils.py#L97-L104
def repr_timestamp(timestamp): """Return a debug representation of an HMC timestamp number.""" if timestamp is None: return 'None' dt = datetime_from_timestamp(timestamp) ret = "%d (%s)" % (timestamp, dt.strftime('%Y-%m-%d %H:%M:%S.%f %Z')) return ret
[ "def", "repr_timestamp", "(", "timestamp", ")", ":", "if", "timestamp", "is", "None", ":", "return", "'None'", "dt", "=", "datetime_from_timestamp", "(", "timestamp", ")", "ret", "=", "\"%d (%s)\"", "%", "(", "timestamp", ",", "dt", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S.%f %Z'", ")", ")", "return", "ret" ]
Return a debug representation of an HMC timestamp number.
[ "Return", "a", "debug", "representation", "of", "an", "HMC", "timestamp", "number", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_session.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_session.py#L1228-L1296
def wait_for_completion(self, operation_timeout=None): """ Wait for completion of the job, then delete the job on the HMC and return the result of the original asynchronous HMC operation, if it completed successfully. If the job completed in error, an :exc:`~zhmcclient.HTTPError` exception is raised. Parameters: operation_timeout (:term:`number`): Timeout in seconds, when waiting for completion of the job. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires, a :exc:`~zhmcclient.OperationTimeout` is raised. This method gives completion of the job priority over strictly achieving the timeout. This may cause a slightly longer duration of the method than prescribed by the timeout. Returns: :term:`json object` or `None`: The result of the original asynchronous operation that was performed by the job, from the ``job-results`` field of the response body of the "Query Job Status" HMC operation. That result is a :term:`json object` as described for the asynchronous operation, or `None` if the operation has no result. Raises: :exc:`~zhmcclient.HTTPError`: The job completed in error, or the job status cannot be retrieved, or the job cannot be deleted. :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for job completion. """ if operation_timeout is None: operation_timeout = \ self.session.retry_timeout_config.operation_timeout if operation_timeout > 0: start_time = time.time() while True: job_status, op_result_obj = self.check_for_completion() # We give completion of status priority over strictly achieving # the timeout, so we check status first. This may cause a longer # duration of the method than prescribed by the timeout. if job_status == 'complete': return op_result_obj if operation_timeout > 0: current_time = time.time() if current_time > start_time + operation_timeout: raise OperationTimeout( "Waiting for completion of job {} timed out " "(operation timeout: {} s)". format(self.uri, operation_timeout), operation_timeout) time.sleep(1)
[ "def", "wait_for_completion", "(", "self", ",", "operation_timeout", "=", "None", ")", ":", "if", "operation_timeout", "is", "None", ":", "operation_timeout", "=", "self", ".", "session", ".", "retry_timeout_config", ".", "operation_timeout", "if", "operation_timeout", ">", "0", ":", "start_time", "=", "time", ".", "time", "(", ")", "while", "True", ":", "job_status", ",", "op_result_obj", "=", "self", ".", "check_for_completion", "(", ")", "# We give completion of status priority over strictly achieving", "# the timeout, so we check status first. This may cause a longer", "# duration of the method than prescribed by the timeout.", "if", "job_status", "==", "'complete'", ":", "return", "op_result_obj", "if", "operation_timeout", ">", "0", ":", "current_time", "=", "time", ".", "time", "(", ")", "if", "current_time", ">", "start_time", "+", "operation_timeout", ":", "raise", "OperationTimeout", "(", "\"Waiting for completion of job {} timed out \"", "\"(operation timeout: {} s)\"", ".", "format", "(", "self", ".", "uri", ",", "operation_timeout", ")", ",", "operation_timeout", ")", "time", ".", "sleep", "(", "1", ")" ]
Wait for completion of the job, then delete the job on the HMC and return the result of the original asynchronous HMC operation, if it completed successfully. If the job completed in error, an :exc:`~zhmcclient.HTTPError` exception is raised. Parameters: operation_timeout (:term:`number`): Timeout in seconds, when waiting for completion of the job. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires, a :exc:`~zhmcclient.OperationTimeout` is raised. This method gives completion of the job priority over strictly achieving the timeout. This may cause a slightly longer duration of the method than prescribed by the timeout. Returns: :term:`json object` or `None`: The result of the original asynchronous operation that was performed by the job, from the ``job-results`` field of the response body of the "Query Job Status" HMC operation. That result is a :term:`json object` as described for the asynchronous operation, or `None` if the operation has no result. Raises: :exc:`~zhmcclient.HTTPError`: The job completed in error, or the job status cannot be retrieved, or the job cannot be deleted. :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ClientAuthError` :exc:`~zhmcclient.ServerAuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for job completion.
[ "Wait", "for", "completion", "of", "the", "job", "then", "delete", "the", "job", "on", "the", "HMC", "and", "return", "the", "result", "of", "the", "original", "asynchronous", "HMC", "operation", "if", "it", "completed", "successfully", "." ]
python
train