repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
instaloader/instaloader
instaloader/instaloader.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L490-L513
def get_stories(self, userids: Optional[List[int]] = None) -> Iterator[Story]: """Get available stories from followees or all stories of users whose ID are given. Does not mark stories as seen. To use this, one needs to be logged in :param userids: List of user IDs to be processed in terms of downloading their stories, or None. """ if not userids: data = self.context.graphql_query("d15efd8c0c5b23f0ef71f18bf363c704", {"only_stories": True})["data"]["user"] if data is None: raise BadResponseException('Bad stories reel JSON.') userids = list(edge["node"]["id"] for edge in data["feed_reels_tray"]["edge_reels_tray_to_reel"]["edges"]) def _userid_chunks(): userids_per_query = 100 for i in range(0, len(userids), userids_per_query): yield userids[i:i + userids_per_query] for userid_chunk in _userid_chunks(): stories = self.context.graphql_query("bf41e22b1c4ba4c9f31b844ebb7d9056", {"reel_ids": userid_chunk, "precomposed_overlay": False})["data"] yield from (Story(self.context, media) for media in stories['reels_media'])
[ "def", "get_stories", "(", "self", ",", "userids", ":", "Optional", "[", "List", "[", "int", "]", "]", "=", "None", ")", "->", "Iterator", "[", "Story", "]", ":", "if", "not", "userids", ":", "data", "=", "self", ".", "context", ".", "graphql_query", "(", "\"d15efd8c0c5b23f0ef71f18bf363c704\"", ",", "{", "\"only_stories\"", ":", "True", "}", ")", "[", "\"data\"", "]", "[", "\"user\"", "]", "if", "data", "is", "None", ":", "raise", "BadResponseException", "(", "'Bad stories reel JSON.'", ")", "userids", "=", "list", "(", "edge", "[", "\"node\"", "]", "[", "\"id\"", "]", "for", "edge", "in", "data", "[", "\"feed_reels_tray\"", "]", "[", "\"edge_reels_tray_to_reel\"", "]", "[", "\"edges\"", "]", ")", "def", "_userid_chunks", "(", ")", ":", "userids_per_query", "=", "100", "for", "i", "in", "range", "(", "0", ",", "len", "(", "userids", ")", ",", "userids_per_query", ")", ":", "yield", "userids", "[", "i", ":", "i", "+", "userids_per_query", "]", "for", "userid_chunk", "in", "_userid_chunks", "(", ")", ":", "stories", "=", "self", ".", "context", ".", "graphql_query", "(", "\"bf41e22b1c4ba4c9f31b844ebb7d9056\"", ",", "{", "\"reel_ids\"", ":", "userid_chunk", ",", "\"precomposed_overlay\"", ":", "False", "}", ")", "[", "\"data\"", "]", "yield", "from", "(", "Story", "(", "self", ".", "context", ",", "media", ")", "for", "media", "in", "stories", "[", "'reels_media'", "]", ")" ]
Get available stories from followees or all stories of users whose ID are given. Does not mark stories as seen. To use this, one needs to be logged in :param userids: List of user IDs to be processed in terms of downloading their stories, or None.
[ "Get", "available", "stories", "from", "followees", "or", "all", "stories", "of", "users", "whose", "ID", "are", "given", ".", "Does", "not", "mark", "stories", "as", "seen", ".", "To", "use", "this", "one", "needs", "to", "be", "logged", "in" ]
python
train
53.416667
Yelp/detect-secrets
detect_secrets/pre_commit_hook.py
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/pre_commit_hook.py#L127-L155
def raise_exception_if_baseline_file_is_unstaged(filename): """We want to make sure that if there are changes to the baseline file, they will be included in the commit. This way, we can keep our baselines up-to-date. :raises: ValueError """ try: files_changed_but_not_staged = subprocess.check_output( [ 'git', 'diff', '--name-only', ], ).split() except subprocess.CalledProcessError: # Since we don't pipe stderr, we get free logging through git. raise ValueError if filename.encode() in files_changed_but_not_staged: log.error(( 'Your baseline file ({}) is unstaged.\n' '`git add {}` to fix this.' ).format( filename, filename, )) raise ValueError
[ "def", "raise_exception_if_baseline_file_is_unstaged", "(", "filename", ")", ":", "try", ":", "files_changed_but_not_staged", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'diff'", ",", "'--name-only'", ",", "]", ",", ")", ".", "split", "(", ")", "except", "subprocess", ".", "CalledProcessError", ":", "# Since we don't pipe stderr, we get free logging through git.", "raise", "ValueError", "if", "filename", ".", "encode", "(", ")", "in", "files_changed_but_not_staged", ":", "log", ".", "error", "(", "(", "'Your baseline file ({}) is unstaged.\\n'", "'`git add {}` to fix this.'", ")", ".", "format", "(", "filename", ",", "filename", ",", ")", ")", "raise", "ValueError" ]
We want to make sure that if there are changes to the baseline file, they will be included in the commit. This way, we can keep our baselines up-to-date. :raises: ValueError
[ "We", "want", "to", "make", "sure", "that", "if", "there", "are", "changes", "to", "the", "baseline", "file", "they", "will", "be", "included", "in", "the", "commit", ".", "This", "way", "we", "can", "keep", "our", "baselines", "up", "-", "to", "-", "date", "." ]
python
train
28.965517
simonw/datasette
datasette/views/table.py
https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/views/table.py#L56-L173
async def display_columns_and_rows( self, database, table, description, rows, link_column=False, truncate_cells=0, ): "Returns columns, rows for specified table - including fancy foreign key treatment" table_metadata = self.ds.table_metadata(database, table) sortable_columns = await self.sortable_columns_for_table(database, table, True) columns = [ {"name": r[0], "sortable": r[0] in sortable_columns} for r in description ] pks = await self.ds.execute_against_connection_in_thread( database, lambda conn: detect_primary_keys(conn, table) ) column_to_foreign_key_table = { fk["column"]: fk["other_table"] for fk in await self.ds.foreign_keys_for_table(database, table) } cell_rows = [] for row in rows: cells = [] # Unless we are a view, the first column is a link - either to the rowid # or to the simple or compound primary key if link_column: cells.append( { "column": pks[0] if len(pks) == 1 else "Link", "value": jinja2.Markup( '<a href="/{database}/{table}/{flat_pks_quoted}">{flat_pks}</a>'.format( database=database, table=urllib.parse.quote_plus(table), flat_pks=str( jinja2.escape( path_from_row_pks(row, pks, not pks, False) ) ), flat_pks_quoted=path_from_row_pks(row, pks, not pks), ) ), } ) for value, column_dict in zip(row, columns): column = column_dict["name"] if link_column and len(pks) == 1 and column == pks[0]: # If there's a simple primary key, don't repeat the value as it's # already shown in the link column. continue # First let the plugins have a go # pylint: disable=no-member plugin_display_value = pm.hook.render_cell( value=value, column=column, table=table, database=database, datasette=self.ds, ) if plugin_display_value is not None: display_value = plugin_display_value elif isinstance(value, dict): # It's an expanded foreign key - display link to other row label = value["label"] value = value["value"] # The table we link to depends on the column other_table = column_to_foreign_key_table[column] link_template = ( LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE ) display_value = jinja2.Markup(link_template.format( database=database, table=urllib.parse.quote_plus(other_table), link_id=urllib.parse.quote_plus(str(value)), id=str(jinja2.escape(value)), label=str(jinja2.escape(label)), )) elif value in ("", None): display_value = jinja2.Markup("&nbsp;") elif is_url(str(value).strip()): display_value = jinja2.Markup( '<a href="{url}">{url}</a>'.format( url=jinja2.escape(value.strip()) ) ) elif column in table_metadata.get("units", {}) and value != "": # Interpret units using pint value = value * ureg(table_metadata["units"][column]) # Pint uses floating point which sometimes introduces errors in the compact # representation, which we have to round off to avoid ugliness. In the vast # majority of cases this rounding will be inconsequential. I hope. value = round(value.to_compact(), 6) display_value = jinja2.Markup( "{:~P}".format(value).replace(" ", "&nbsp;") ) else: display_value = str(value) if truncate_cells and len(display_value) > truncate_cells: display_value = display_value[:truncate_cells] + u"\u2026" cells.append({"column": column, "value": display_value}) cell_rows.append(cells) if link_column: # Add the link column header. # If it's a simple primary key, we have to remove and re-add that column name at # the beginning of the header row. if len(pks) == 1: columns = [col for col in columns if col["name"] != pks[0]] columns = [ {"name": pks[0] if len(pks) == 1 else "Link", "sortable": len(pks) == 1} ] + columns return columns, cell_rows
[ "async", "def", "display_columns_and_rows", "(", "self", ",", "database", ",", "table", ",", "description", ",", "rows", ",", "link_column", "=", "False", ",", "truncate_cells", "=", "0", ",", ")", ":", "table_metadata", "=", "self", ".", "ds", ".", "table_metadata", "(", "database", ",", "table", ")", "sortable_columns", "=", "await", "self", ".", "sortable_columns_for_table", "(", "database", ",", "table", ",", "True", ")", "columns", "=", "[", "{", "\"name\"", ":", "r", "[", "0", "]", ",", "\"sortable\"", ":", "r", "[", "0", "]", "in", "sortable_columns", "}", "for", "r", "in", "description", "]", "pks", "=", "await", "self", ".", "ds", ".", "execute_against_connection_in_thread", "(", "database", ",", "lambda", "conn", ":", "detect_primary_keys", "(", "conn", ",", "table", ")", ")", "column_to_foreign_key_table", "=", "{", "fk", "[", "\"column\"", "]", ":", "fk", "[", "\"other_table\"", "]", "for", "fk", "in", "await", "self", ".", "ds", ".", "foreign_keys_for_table", "(", "database", ",", "table", ")", "}", "cell_rows", "=", "[", "]", "for", "row", "in", "rows", ":", "cells", "=", "[", "]", "# Unless we are a view, the first column is a link - either to the rowid", "# or to the simple or compound primary key", "if", "link_column", ":", "cells", ".", "append", "(", "{", "\"column\"", ":", "pks", "[", "0", "]", "if", "len", "(", "pks", ")", "==", "1", "else", "\"Link\"", ",", "\"value\"", ":", "jinja2", ".", "Markup", "(", "'<a href=\"/{database}/{table}/{flat_pks_quoted}\">{flat_pks}</a>'", ".", "format", "(", "database", "=", "database", ",", "table", "=", "urllib", ".", "parse", ".", "quote_plus", "(", "table", ")", ",", "flat_pks", "=", "str", "(", "jinja2", ".", "escape", "(", "path_from_row_pks", "(", "row", ",", "pks", ",", "not", "pks", ",", "False", ")", ")", ")", ",", "flat_pks_quoted", "=", "path_from_row_pks", "(", "row", ",", "pks", ",", "not", "pks", ")", ",", ")", ")", ",", "}", ")", "for", "value", ",", "column_dict", "in", "zip", "(", "row", ",", "columns", ")", ":", "column", "=", "column_dict", "[", "\"name\"", "]", "if", "link_column", "and", "len", "(", "pks", ")", "==", "1", "and", "column", "==", "pks", "[", "0", "]", ":", "# If there's a simple primary key, don't repeat the value as it's", "# already shown in the link column.", "continue", "# First let the plugins have a go", "# pylint: disable=no-member", "plugin_display_value", "=", "pm", ".", "hook", ".", "render_cell", "(", "value", "=", "value", ",", "column", "=", "column", ",", "table", "=", "table", ",", "database", "=", "database", ",", "datasette", "=", "self", ".", "ds", ",", ")", "if", "plugin_display_value", "is", "not", "None", ":", "display_value", "=", "plugin_display_value", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "# It's an expanded foreign key - display link to other row", "label", "=", "value", "[", "\"label\"", "]", "value", "=", "value", "[", "\"value\"", "]", "# The table we link to depends on the column", "other_table", "=", "column_to_foreign_key_table", "[", "column", "]", "link_template", "=", "(", "LINK_WITH_LABEL", "if", "(", "label", "!=", "value", ")", "else", "LINK_WITH_VALUE", ")", "display_value", "=", "jinja2", ".", "Markup", "(", "link_template", ".", "format", "(", "database", "=", "database", ",", "table", "=", "urllib", ".", "parse", ".", "quote_plus", "(", "other_table", ")", ",", "link_id", "=", "urllib", ".", "parse", ".", "quote_plus", "(", "str", "(", "value", ")", ")", ",", "id", "=", "str", "(", "jinja2", ".", "escape", "(", "value", ")", ")", ",", "label", "=", "str", "(", "jinja2", ".", "escape", "(", "label", ")", ")", ",", ")", ")", "elif", "value", "in", "(", "\"\"", ",", "None", ")", ":", "display_value", "=", "jinja2", ".", "Markup", "(", "\"&nbsp;\"", ")", "elif", "is_url", "(", "str", "(", "value", ")", ".", "strip", "(", ")", ")", ":", "display_value", "=", "jinja2", ".", "Markup", "(", "'<a href=\"{url}\">{url}</a>'", ".", "format", "(", "url", "=", "jinja2", ".", "escape", "(", "value", ".", "strip", "(", ")", ")", ")", ")", "elif", "column", "in", "table_metadata", ".", "get", "(", "\"units\"", ",", "{", "}", ")", "and", "value", "!=", "\"\"", ":", "# Interpret units using pint", "value", "=", "value", "*", "ureg", "(", "table_metadata", "[", "\"units\"", "]", "[", "column", "]", ")", "# Pint uses floating point which sometimes introduces errors in the compact", "# representation, which we have to round off to avoid ugliness. In the vast", "# majority of cases this rounding will be inconsequential. I hope.", "value", "=", "round", "(", "value", ".", "to_compact", "(", ")", ",", "6", ")", "display_value", "=", "jinja2", ".", "Markup", "(", "\"{:~P}\"", ".", "format", "(", "value", ")", ".", "replace", "(", "\" \"", ",", "\"&nbsp;\"", ")", ")", "else", ":", "display_value", "=", "str", "(", "value", ")", "if", "truncate_cells", "and", "len", "(", "display_value", ")", ">", "truncate_cells", ":", "display_value", "=", "display_value", "[", ":", "truncate_cells", "]", "+", "u\"\\u2026\"", "cells", ".", "append", "(", "{", "\"column\"", ":", "column", ",", "\"value\"", ":", "display_value", "}", ")", "cell_rows", ".", "append", "(", "cells", ")", "if", "link_column", ":", "# Add the link column header.", "# If it's a simple primary key, we have to remove and re-add that column name at", "# the beginning of the header row.", "if", "len", "(", "pks", ")", "==", "1", ":", "columns", "=", "[", "col", "for", "col", "in", "columns", "if", "col", "[", "\"name\"", "]", "!=", "pks", "[", "0", "]", "]", "columns", "=", "[", "{", "\"name\"", ":", "pks", "[", "0", "]", "if", "len", "(", "pks", ")", "==", "1", "else", "\"Link\"", ",", "\"sortable\"", ":", "len", "(", "pks", ")", "==", "1", "}", "]", "+", "columns", "return", "columns", ",", "cell_rows" ]
Returns columns, rows for specified table - including fancy foreign key treatment
[ "Returns", "columns", "rows", "for", "specified", "table", "-", "including", "fancy", "foreign", "key", "treatment" ]
python
train
45.449153
aleju/imgaug
imgaug/augmentables/heatmaps.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/heatmaps.py#L409-L441
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0): """ Create a heatmaps object from an heatmap array containing values ranging from 0 to 255. Parameters ---------- arr_uint8 : (H,W) ndarray or (H,W,C) ndarray Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels. Expected dtype is uint8. shape : tuple of int Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the heatmap(s) array, unless it is identical to the image shape (note the likely difference between the arrays in the number of channels). If there is not a corresponding image, use the shape of the heatmaps array. min_value : float, optional Minimum value for the heatmaps that the 0-to-255 array represents. This will usually be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the underlying ``(0, 255)`` array to value range ``(min_value, max_value)``. max_value : float, optional Maximum value for the heatmaps that 0-to-255 array represents. See parameter `min_value` for details. Returns ------- imgaug.HeatmapsOnImage Heatmaps object. """ arr_0to1 = arr_uint8.astype(np.float32) / 255.0 return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
[ "def", "from_uint8", "(", "arr_uint8", ",", "shape", ",", "min_value", "=", "0.0", ",", "max_value", "=", "1.0", ")", ":", "arr_0to1", "=", "arr_uint8", ".", "astype", "(", "np", ".", "float32", ")", "/", "255.0", "return", "HeatmapsOnImage", ".", "from_0to1", "(", "arr_0to1", ",", "shape", ",", "min_value", "=", "min_value", ",", "max_value", "=", "max_value", ")" ]
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255. Parameters ---------- arr_uint8 : (H,W) ndarray or (H,W,C) ndarray Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels. Expected dtype is uint8. shape : tuple of int Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the heatmap(s) array, unless it is identical to the image shape (note the likely difference between the arrays in the number of channels). If there is not a corresponding image, use the shape of the heatmaps array. min_value : float, optional Minimum value for the heatmaps that the 0-to-255 array represents. This will usually be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the underlying ``(0, 255)`` array to value range ``(min_value, max_value)``. max_value : float, optional Maximum value for the heatmaps that 0-to-255 array represents. See parameter `min_value` for details. Returns ------- imgaug.HeatmapsOnImage Heatmaps object.
[ "Create", "a", "heatmaps", "object", "from", "an", "heatmap", "array", "containing", "values", "ranging", "from", "0", "to", "255", "." ]
python
valid
45.333333
GemHQ/round-py
round/wrappers.py
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/wrappers.py#L37-L57
def with_mfa(self, mfa_token): """Set the MFA token for the next request. `mfa_token`s are only good for one request. Use this method to chain into the protected action you want to perform. Note: Only useful for Application authentication. Usage: account.with_mfa(application.totp.now()).pay(...) Args: mfa_token (str/function, optional): TOTP token for the Application OR a callable/function which will generate such a token when called. Returns: self """ if hasattr(mfa_token, '__call__'): # callable() is unsupported by 3.1 and 3.2 self.context.mfa_token = mfa_token.__call__() else: self.context.mfa_token = mfa_token return self
[ "def", "with_mfa", "(", "self", ",", "mfa_token", ")", ":", "if", "hasattr", "(", "mfa_token", ",", "'__call__'", ")", ":", "# callable() is unsupported by 3.1 and 3.2", "self", ".", "context", ".", "mfa_token", "=", "mfa_token", ".", "__call__", "(", ")", "else", ":", "self", ".", "context", ".", "mfa_token", "=", "mfa_token", "return", "self" ]
Set the MFA token for the next request. `mfa_token`s are only good for one request. Use this method to chain into the protected action you want to perform. Note: Only useful for Application authentication. Usage: account.with_mfa(application.totp.now()).pay(...) Args: mfa_token (str/function, optional): TOTP token for the Application OR a callable/function which will generate such a token when called. Returns: self
[ "Set", "the", "MFA", "token", "for", "the", "next", "request", ".", "mfa_token", "s", "are", "only", "good", "for", "one", "request", ".", "Use", "this", "method", "to", "chain", "into", "the", "protected", "action", "you", "want", "to", "perform", "." ]
python
train
36.666667
chrisspen/dtree
dtree.py
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L956-L964
def get_best_splitting_attr(self): """ Returns the name of the attribute with the highest gain. """ best = (-1e999999, None) for attr in self.attributes: best = max(best, (self.get_gain(attr), attr)) best_gain, best_attr = best return best_attr
[ "def", "get_best_splitting_attr", "(", "self", ")", ":", "best", "=", "(", "-", "1e999999", ",", "None", ")", "for", "attr", "in", "self", ".", "attributes", ":", "best", "=", "max", "(", "best", ",", "(", "self", ".", "get_gain", "(", "attr", ")", ",", "attr", ")", ")", "best_gain", ",", "best_attr", "=", "best", "return", "best_attr" ]
Returns the name of the attribute with the highest gain.
[ "Returns", "the", "name", "of", "the", "attribute", "with", "the", "highest", "gain", "." ]
python
train
33.777778
sebdah/dynamic-dynamodb
dynamic_dynamodb/calculators.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/calculators.py#L118-L151
def decrease_writes_in_units( current_provisioning, units, min_provisioned_writes, log_tag): """ Decrease the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we decrease with :returns: int -- New provisioning value :type min_provisioned_writes: int :param min_provisioned_writes: Configured min provisioned writes :type log_tag: str :param log_tag: Prefix for the log """ updated_provisioning = int(current_provisioning) - int(units) min_provisioned_writes = __get_min_writes( current_provisioning, min_provisioned_writes, log_tag) if updated_provisioning < min_provisioned_writes: logger.info( '{0} - Reached provisioned writes min limit: {1:d}'.format( log_tag, int(min_provisioned_writes))) return min_provisioned_writes logger.debug( '{0} - Write provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ "def", "decrease_writes_in_units", "(", "current_provisioning", ",", "units", ",", "min_provisioned_writes", ",", "log_tag", ")", ":", "updated_provisioning", "=", "int", "(", "current_provisioning", ")", "-", "int", "(", "units", ")", "min_provisioned_writes", "=", "__get_min_writes", "(", "current_provisioning", ",", "min_provisioned_writes", ",", "log_tag", ")", "if", "updated_provisioning", "<", "min_provisioned_writes", ":", "logger", ".", "info", "(", "'{0} - Reached provisioned writes min limit: {1:d}'", ".", "format", "(", "log_tag", ",", "int", "(", "min_provisioned_writes", ")", ")", ")", "return", "min_provisioned_writes", "logger", ".", "debug", "(", "'{0} - Write provisioning will be decreased to {1:d} units'", ".", "format", "(", "log_tag", ",", "int", "(", "updated_provisioning", ")", ")", ")", "return", "updated_provisioning" ]
Decrease the current_provisioning with units units :type current_provisioning: int :param current_provisioning: The current provisioning :type units: int :param units: How many units should we decrease with :returns: int -- New provisioning value :type min_provisioned_writes: int :param min_provisioned_writes: Configured min provisioned writes :type log_tag: str :param log_tag: Prefix for the log
[ "Decrease", "the", "current_provisioning", "with", "units", "units" ]
python
train
34.088235
Gandi/gandi.cli
gandi/cli/core/base.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/base.py#L121-L123
def safe_call(cls, method, *args): """ Call a remote api method but don't raise if an error occurred.""" return cls.call(method, *args, safe=True)
[ "def", "safe_call", "(", "cls", ",", "method", ",", "*", "args", ")", ":", "return", "cls", ".", "call", "(", "method", ",", "*", "args", ",", "safe", "=", "True", ")" ]
Call a remote api method but don't raise if an error occurred.
[ "Call", "a", "remote", "api", "method", "but", "don", "t", "raise", "if", "an", "error", "occurred", "." ]
python
train
53.333333
eyeseast/python-tablefu
table_fu/formatting.py
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L22-L38
def ap_state(value, failure_string=None): """ Converts a state's name, postal abbreviation or FIPS to A.P. style. Example usage: >> ap_state("California") 'Calif.' """ try: return statestyle.get(value).ap except: if failure_string: return failure_string else: return value
[ "def", "ap_state", "(", "value", ",", "failure_string", "=", "None", ")", ":", "try", ":", "return", "statestyle", ".", "get", "(", "value", ")", ".", "ap", "except", ":", "if", "failure_string", ":", "return", "failure_string", "else", ":", "return", "value" ]
Converts a state's name, postal abbreviation or FIPS to A.P. style. Example usage: >> ap_state("California") 'Calif.'
[ "Converts", "a", "state", "s", "name", "postal", "abbreviation", "or", "FIPS", "to", "A", ".", "P", ".", "style", ".", "Example", "usage", ":", ">>", "ap_state", "(", "California", ")", "Calif", "." ]
python
train
21.117647
juju/theblues
theblues/charmstore.py
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/charmstore.py#L482-L495
def _get_path(entity_id): '''Get the entity_id as a string if it is a Reference. @param entity_id The ID either a reference or a string of the entity to get. @return entity_id as a string ''' try: path = entity_id.path() except AttributeError: path = entity_id if path.startswith('cs:'): path = path[3:] return path
[ "def", "_get_path", "(", "entity_id", ")", ":", "try", ":", "path", "=", "entity_id", ".", "path", "(", ")", "except", "AttributeError", ":", "path", "=", "entity_id", "if", "path", ".", "startswith", "(", "'cs:'", ")", ":", "path", "=", "path", "[", "3", ":", "]", "return", "path" ]
Get the entity_id as a string if it is a Reference. @param entity_id The ID either a reference or a string of the entity to get. @return entity_id as a string
[ "Get", "the", "entity_id", "as", "a", "string", "if", "it", "is", "a", "Reference", "." ]
python
train
26.357143
quantopian/zipline
zipline/assets/asset_db_migrations.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L133-L158
def downgrades(src): """Decorator for marking that a method is a downgrade to a version to the previous version. Parameters ---------- src : int The version this downgrades from. Returns ------- decorator : callable[(callable) -> callable] The decorator to apply. """ def _(f): destination = src - 1 @do(operator.setitem(_downgrade_methods, destination)) @wraps(f) def wrapper(op, conn, version_info_table): conn.execute(version_info_table.delete()) # clear the version f(op) write_version_info(conn, version_info_table, destination) return wrapper return _
[ "def", "downgrades", "(", "src", ")", ":", "def", "_", "(", "f", ")", ":", "destination", "=", "src", "-", "1", "@", "do", "(", "operator", ".", "setitem", "(", "_downgrade_methods", ",", "destination", ")", ")", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "op", ",", "conn", ",", "version_info_table", ")", ":", "conn", ".", "execute", "(", "version_info_table", ".", "delete", "(", ")", ")", "# clear the version", "f", "(", "op", ")", "write_version_info", "(", "conn", ",", "version_info_table", ",", "destination", ")", "return", "wrapper", "return", "_" ]
Decorator for marking that a method is a downgrade to a version to the previous version. Parameters ---------- src : int The version this downgrades from. Returns ------- decorator : callable[(callable) -> callable] The decorator to apply.
[ "Decorator", "for", "marking", "that", "a", "method", "is", "a", "downgrade", "to", "a", "version", "to", "the", "previous", "version", "." ]
python
train
25.884615
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1434-L1459
def observe(self, seconds=None): """ Begins the observer loop (synchronously). Loops for ``seconds`` or until this region's stopObserver() method is called. If ``seconds`` is None, the observer loop cycles until stopped. If this method is called while the observer loop is already running, it returns False. Returns True if the observer could be started, False otherwise. """ # Check if observer is already running if self._observer.isRunning: return False # Could not start # Set timeout if seconds is not None: timeout = time.time() + seconds else: timeout = None # Start observe loop while (not self._observer.isStopped) and (seconds is None or time.time() < timeout): # Check registered events self._observer.check_events() # Sleep for scan rate time.sleep(1/self.getObserveScanRate()) return True
[ "def", "observe", "(", "self", ",", "seconds", "=", "None", ")", ":", "# Check if observer is already running", "if", "self", ".", "_observer", ".", "isRunning", ":", "return", "False", "# Could not start", "# Set timeout", "if", "seconds", "is", "not", "None", ":", "timeout", "=", "time", ".", "time", "(", ")", "+", "seconds", "else", ":", "timeout", "=", "None", "# Start observe loop", "while", "(", "not", "self", ".", "_observer", ".", "isStopped", ")", "and", "(", "seconds", "is", "None", "or", "time", ".", "time", "(", ")", "<", "timeout", ")", ":", "# Check registered events", "self", ".", "_observer", ".", "check_events", "(", ")", "# Sleep for scan rate", "time", ".", "sleep", "(", "1", "/", "self", ".", "getObserveScanRate", "(", ")", ")", "return", "True" ]
Begins the observer loop (synchronously). Loops for ``seconds`` or until this region's stopObserver() method is called. If ``seconds`` is None, the observer loop cycles until stopped. If this method is called while the observer loop is already running, it returns False. Returns True if the observer could be started, False otherwise.
[ "Begins", "the", "observer", "loop", "(", "synchronously", ")", "." ]
python
train
37.538462
PyCQA/astroid
astroid/brain/brain_namedtuple_enum.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_namedtuple_enum.py#L164-L219
def infer_named_tuple(node, context=None): """Specific inference function for namedtuple Call node""" tuple_base_name = nodes.Name(name="tuple", parent=node.root()) class_node, name, attributes = infer_func_form( node, tuple_base_name, context=context ) call_site = arguments.CallSite.from_call(node) func = next(extract_node("import collections; collections.namedtuple").infer()) try: rename = next(call_site.infer_argument(func, "rename", context)).bool_value() except InferenceError: rename = False if rename: attributes = _get_renamed_namedtuple_attributes(attributes) replace_args = ", ".join("{arg}=None".format(arg=arg) for arg in attributes) field_def = ( " {name} = property(lambda self: self[{index:d}], " "doc='Alias for field number {index:d}')" ) field_defs = "\n".join( field_def.format(name=name, index=index) for index, name in enumerate(attributes) ) fake = AstroidBuilder(MANAGER).string_build( """ class %(name)s(tuple): __slots__ = () _fields = %(fields)r def _asdict(self): return self.__dict__ @classmethod def _make(cls, iterable, new=tuple.__new__, len=len): return new(cls, iterable) def _replace(self, %(replace_args)s): return self def __getnewargs__(self): return tuple(self) %(field_defs)s """ % { "name": name, "fields": attributes, "field_defs": field_defs, "replace_args": replace_args, } ) class_node.locals["_asdict"] = fake.body[0].locals["_asdict"] class_node.locals["_make"] = fake.body[0].locals["_make"] class_node.locals["_replace"] = fake.body[0].locals["_replace"] class_node.locals["_fields"] = fake.body[0].locals["_fields"] for attr in attributes: class_node.locals[attr] = fake.body[0].locals[attr] # we use UseInferenceDefault, we can't be a generator so return an iterator return iter([class_node])
[ "def", "infer_named_tuple", "(", "node", ",", "context", "=", "None", ")", ":", "tuple_base_name", "=", "nodes", ".", "Name", "(", "name", "=", "\"tuple\"", ",", "parent", "=", "node", ".", "root", "(", ")", ")", "class_node", ",", "name", ",", "attributes", "=", "infer_func_form", "(", "node", ",", "tuple_base_name", ",", "context", "=", "context", ")", "call_site", "=", "arguments", ".", "CallSite", ".", "from_call", "(", "node", ")", "func", "=", "next", "(", "extract_node", "(", "\"import collections; collections.namedtuple\"", ")", ".", "infer", "(", ")", ")", "try", ":", "rename", "=", "next", "(", "call_site", ".", "infer_argument", "(", "func", ",", "\"rename\"", ",", "context", ")", ")", ".", "bool_value", "(", ")", "except", "InferenceError", ":", "rename", "=", "False", "if", "rename", ":", "attributes", "=", "_get_renamed_namedtuple_attributes", "(", "attributes", ")", "replace_args", "=", "\", \"", ".", "join", "(", "\"{arg}=None\"", ".", "format", "(", "arg", "=", "arg", ")", "for", "arg", "in", "attributes", ")", "field_def", "=", "(", "\" {name} = property(lambda self: self[{index:d}], \"", "\"doc='Alias for field number {index:d}')\"", ")", "field_defs", "=", "\"\\n\"", ".", "join", "(", "field_def", ".", "format", "(", "name", "=", "name", ",", "index", "=", "index", ")", "for", "index", ",", "name", "in", "enumerate", "(", "attributes", ")", ")", "fake", "=", "AstroidBuilder", "(", "MANAGER", ")", ".", "string_build", "(", "\"\"\"\nclass %(name)s(tuple):\n __slots__ = ()\n _fields = %(fields)r\n def _asdict(self):\n return self.__dict__\n @classmethod\n def _make(cls, iterable, new=tuple.__new__, len=len):\n return new(cls, iterable)\n def _replace(self, %(replace_args)s):\n return self\n def __getnewargs__(self):\n return tuple(self)\n%(field_defs)s\n \"\"\"", "%", "{", "\"name\"", ":", "name", ",", "\"fields\"", ":", "attributes", ",", "\"field_defs\"", ":", "field_defs", ",", "\"replace_args\"", ":", "replace_args", ",", "}", ")", "class_node", ".", "locals", "[", "\"_asdict\"", "]", "=", "fake", ".", "body", "[", "0", "]", ".", "locals", "[", "\"_asdict\"", "]", "class_node", ".", "locals", "[", "\"_make\"", "]", "=", "fake", ".", "body", "[", "0", "]", ".", "locals", "[", "\"_make\"", "]", "class_node", ".", "locals", "[", "\"_replace\"", "]", "=", "fake", ".", "body", "[", "0", "]", ".", "locals", "[", "\"_replace\"", "]", "class_node", ".", "locals", "[", "\"_fields\"", "]", "=", "fake", ".", "body", "[", "0", "]", ".", "locals", "[", "\"_fields\"", "]", "for", "attr", "in", "attributes", ":", "class_node", ".", "locals", "[", "attr", "]", "=", "fake", ".", "body", "[", "0", "]", ".", "locals", "[", "attr", "]", "# we use UseInferenceDefault, we can't be a generator so return an iterator", "return", "iter", "(", "[", "class_node", "]", ")" ]
Specific inference function for namedtuple Call node
[ "Specific", "inference", "function", "for", "namedtuple", "Call", "node" ]
python
train
35.642857
cbclab/MOT
mot/lib/cl_environments.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L210-L244
def all_devices(cl_device_type=None, platform=None): """Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments. """ if isinstance(cl_device_type, str): cl_device_type = device_type_from_string(cl_device_type) runtime_list = [] if platform is None: platforms = cl.get_platforms() else: platforms = [platform] for platform in platforms: if cl_device_type: devices = platform.get_devices(device_type=cl_device_type) else: devices = platform.get_devices() for device in devices: if device_supports_double(device): env = CLEnvironment(platform, device) runtime_list.append(env) return runtime_list
[ "def", "all_devices", "(", "cl_device_type", "=", "None", ",", "platform", "=", "None", ")", ":", "if", "isinstance", "(", "cl_device_type", ",", "str", ")", ":", "cl_device_type", "=", "device_type_from_string", "(", "cl_device_type", ")", "runtime_list", "=", "[", "]", "if", "platform", "is", "None", ":", "platforms", "=", "cl", ".", "get_platforms", "(", ")", "else", ":", "platforms", "=", "[", "platform", "]", "for", "platform", "in", "platforms", ":", "if", "cl_device_type", ":", "devices", "=", "platform", ".", "get_devices", "(", "device_type", "=", "cl_device_type", ")", "else", ":", "devices", "=", "platform", ".", "get_devices", "(", ")", "for", "device", "in", "devices", ":", "if", "device_supports_double", "(", "device", ")", ":", "env", "=", "CLEnvironment", "(", "platform", ",", "device", ")", "runtime_list", ".", "append", "(", "env", ")", "return", "runtime_list" ]
Get multiple device environments, optionally only of the indicated type. This will only fetch devices that support double point precision. Args: cl_device_type (cl.device_type.* or string): The type of the device we want, can be a opencl device type or a string matching 'GPU' or 'CPU'. platform (opencl platform): The opencl platform to select the devices from Returns: list of CLEnvironment: List with the CL device environments.
[ "Get", "multiple", "device", "environments", "optionally", "only", "of", "the", "indicated", "type", "." ]
python
train
35.485714
eaton-lab/toytree
toytree/Toytree.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Toytree.py#L545-L560
def resolve_polytomy( self, dist=1.0, support=100, recursive=True): """ Returns a copy of the tree with all polytomies randomly resolved. Does not transform tree in-place. """ nself = self.copy() nself.treenode.resolve_polytomy( default_dist=dist, default_support=support, recursive=recursive) nself._coords.update() return nself
[ "def", "resolve_polytomy", "(", "self", ",", "dist", "=", "1.0", ",", "support", "=", "100", ",", "recursive", "=", "True", ")", ":", "nself", "=", "self", ".", "copy", "(", ")", "nself", ".", "treenode", ".", "resolve_polytomy", "(", "default_dist", "=", "dist", ",", "default_support", "=", "support", ",", "recursive", "=", "recursive", ")", "nself", ".", "_coords", ".", "update", "(", ")", "return", "nself" ]
Returns a copy of the tree with all polytomies randomly resolved. Does not transform tree in-place.
[ "Returns", "a", "copy", "of", "the", "tree", "with", "all", "polytomies", "randomly", "resolved", ".", "Does", "not", "transform", "tree", "in", "-", "place", "." ]
python
train
27.875
vanheeringen-lab/gimmemotifs
gimmemotifs/rocmetrics.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/rocmetrics.py#L204-L230
def enr_at_fpr(fg_vals, bg_vals, fpr=0.01): """ Computes the enrichment at a specific FPR (default 1%). Parameters ---------- fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. fpr : float, optional The FPR (between 0.0 and 1.0). Returns ------- enrichment : float The enrichment at the specified FPR. """ pos = np.array(fg_vals) neg = np.array(bg_vals) s = scoreatpercentile(neg, 100 - fpr * 100) neg_matches = float(len(neg[neg >= s])) if neg_matches == 0: return float("inf") return len(pos[pos >= s]) / neg_matches * len(neg) / float(len(pos))
[ "def", "enr_at_fpr", "(", "fg_vals", ",", "bg_vals", ",", "fpr", "=", "0.01", ")", ":", "pos", "=", "np", ".", "array", "(", "fg_vals", ")", "neg", "=", "np", ".", "array", "(", "bg_vals", ")", "s", "=", "scoreatpercentile", "(", "neg", ",", "100", "-", "fpr", "*", "100", ")", "neg_matches", "=", "float", "(", "len", "(", "neg", "[", "neg", ">=", "s", "]", ")", ")", "if", "neg_matches", "==", "0", ":", "return", "float", "(", "\"inf\"", ")", "return", "len", "(", "pos", "[", "pos", ">=", "s", "]", ")", "/", "neg_matches", "*", "len", "(", "neg", ")", "/", "float", "(", "len", "(", "pos", ")", ")" ]
Computes the enrichment at a specific FPR (default 1%). Parameters ---------- fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. fpr : float, optional The FPR (between 0.0 and 1.0). Returns ------- enrichment : float The enrichment at the specified FPR.
[ "Computes", "the", "enrichment", "at", "a", "specific", "FPR", "(", "default", "1%", ")", "." ]
python
train
26.444444
i3visio/osrframework
osrframework/searchfy.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/searchfy.py#L98-L187
def main(params=None): """ Main function to launch usufy. The function is created in this way so as to let other applications make use of the full configuration capabilities of the application. The parameters received are used as parsed by this modules `getParser()`. Args: ----- params: A list with the parameters as grabbed by the terminal. It is None when this is called by an entry_point. If it is called by osrf the data is already parsed. Returns: -------- A list of i3visio entities. """ if params == None: parser = getParser() args = parser.parse_args(params) else: args = params results = [] print(general.title(banner.text)) sayingHello = """ Searchfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018 This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For additional info, visit <{}>. """.format(general.LICENSE_URL) print(general.info(sayingHello)) if args.license: general.showLicense() else: # Showing the execution time... startTime= dt.datetime.now() print(str(startTime) + "\tStarting search in different platform(s)... Relax!\n") print(general.emphasis("\tPress <Ctrl + C> to stop...\n")) # Performing the search try: results = performSearch(platformNames=args.platforms, queries=args.queries, process=args.process, excludePlatformNames=args.exclude) except KeyboardInterrupt: print(general.error("\n[!] Process manually stopped by the user. Workers terminated without providing any result.\n")) results = [] # Generating summary files for each ... if args.extension: # Verifying if the outputPath exists if not os.path.exists (args.output_folder): os.makedirs(args.output_folder) # Grabbing the results fileHeader = os.path.join(args.output_folder, args.file_header) # Iterating through the given extensions to print its values for ext in args.extension: # Generating output files general.exportUsufy(results, ext, fileHeader) # Printing the results if requested now = dt.datetime.now() print("\n{}\tResults obtained:\n".format(str(now))) print(general.success(general.usufyToTextExport(results))) if args.web_browser: general.openResultsInBrowser(results) now = dt.datetime.now() print("\n{date}\tYou can find all the information collected in the following files:".format(date=str(now))) for ext in args.extension: # Showing the output files print("\t" + general.emphasis(fileHeader + "." + ext)) # Showing the execution time... endTime= dt.datetime.now() print("\n{date}\tFinishing execution...\n".format(date=str(endTime))) print("Total time used:\t" + general.emphasis(str(endTime-startTime))) print("Average seconds/query:\t" + general.emphasis(str((endTime-startTime).total_seconds()/len(args.platforms))) +" seconds\n") # Urging users to place an issue on Github... print(banner.footer) if params: return results
[ "def", "main", "(", "params", "=", "None", ")", ":", "if", "params", "==", "None", ":", "parser", "=", "getParser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "params", ")", "else", ":", "args", "=", "params", "results", "=", "[", "]", "print", "(", "general", ".", "title", "(", "banner", ".", "text", ")", ")", "sayingHello", "=", "\"\"\"\n Searchfy | Copyright (C) Yaiza Rubio & Félix Brezo (i3visio) 2014-2018\n\nThis program comes with ABSOLUTELY NO WARRANTY. This is free software, and you\nare welcome to redistribute it under certain conditions. For additional info,\nvisit <{}>.\n\"\"\"", ".", "format", "(", "general", ".", "LICENSE_URL", ")", "print", "(", "general", ".", "info", "(", "sayingHello", ")", ")", "if", "args", ".", "license", ":", "general", ".", "showLicense", "(", ")", "else", ":", "# Showing the execution time...", "startTime", "=", "dt", ".", "datetime", ".", "now", "(", ")", "print", "(", "str", "(", "startTime", ")", "+", "\"\\tStarting search in different platform(s)... Relax!\\n\"", ")", "print", "(", "general", ".", "emphasis", "(", "\"\\tPress <Ctrl + C> to stop...\\n\"", ")", ")", "# Performing the search", "try", ":", "results", "=", "performSearch", "(", "platformNames", "=", "args", ".", "platforms", ",", "queries", "=", "args", ".", "queries", ",", "process", "=", "args", ".", "process", ",", "excludePlatformNames", "=", "args", ".", "exclude", ")", "except", "KeyboardInterrupt", ":", "print", "(", "general", ".", "error", "(", "\"\\n[!] Process manually stopped by the user. Workers terminated without providing any result.\\n\"", ")", ")", "results", "=", "[", "]", "# Generating summary files for each ...", "if", "args", ".", "extension", ":", "# Verifying if the outputPath exists", "if", "not", "os", ".", "path", ".", "exists", "(", "args", ".", "output_folder", ")", ":", "os", ".", "makedirs", "(", "args", ".", "output_folder", ")", "# Grabbing the results", "fileHeader", "=", "os", ".", "path", ".", "join", "(", "args", ".", "output_folder", ",", "args", ".", "file_header", ")", "# Iterating through the given extensions to print its values", "for", "ext", "in", "args", ".", "extension", ":", "# Generating output files", "general", ".", "exportUsufy", "(", "results", ",", "ext", ",", "fileHeader", ")", "# Printing the results if requested", "now", "=", "dt", ".", "datetime", ".", "now", "(", ")", "print", "(", "\"\\n{}\\tResults obtained:\\n\"", ".", "format", "(", "str", "(", "now", ")", ")", ")", "print", "(", "general", ".", "success", "(", "general", ".", "usufyToTextExport", "(", "results", ")", ")", ")", "if", "args", ".", "web_browser", ":", "general", ".", "openResultsInBrowser", "(", "results", ")", "now", "=", "dt", ".", "datetime", ".", "now", "(", ")", "print", "(", "\"\\n{date}\\tYou can find all the information collected in the following files:\"", ".", "format", "(", "date", "=", "str", "(", "now", ")", ")", ")", "for", "ext", "in", "args", ".", "extension", ":", "# Showing the output files", "print", "(", "\"\\t\"", "+", "general", ".", "emphasis", "(", "fileHeader", "+", "\".\"", "+", "ext", ")", ")", "# Showing the execution time...", "endTime", "=", "dt", ".", "datetime", ".", "now", "(", ")", "print", "(", "\"\\n{date}\\tFinishing execution...\\n\"", ".", "format", "(", "date", "=", "str", "(", "endTime", ")", ")", ")", "print", "(", "\"Total time used:\\t\"", "+", "general", ".", "emphasis", "(", "str", "(", "endTime", "-", "startTime", ")", ")", ")", "print", "(", "\"Average seconds/query:\\t\"", "+", "general", ".", "emphasis", "(", "str", "(", "(", "endTime", "-", "startTime", ")", ".", "total_seconds", "(", ")", "/", "len", "(", "args", ".", "platforms", ")", ")", ")", "+", "\" seconds\\n\"", ")", "# Urging users to place an issue on Github...", "print", "(", "banner", ".", "footer", ")", "if", "params", ":", "return", "results" ]
Main function to launch usufy. The function is created in this way so as to let other applications make use of the full configuration capabilities of the application. The parameters received are used as parsed by this modules `getParser()`. Args: ----- params: A list with the parameters as grabbed by the terminal. It is None when this is called by an entry_point. If it is called by osrf the data is already parsed. Returns: -------- A list of i3visio entities.
[ "Main", "function", "to", "launch", "usufy", "." ]
python
train
36.844444
knipknap/SpiffWorkflow
SpiffWorkflow/task.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/task.py#L456-L470
def _find_ancestor(self, task_spec): """ Returns the ancestor that has the given task spec assigned. If no such ancestor was found, the root task is returned. :type task_spec: TaskSpec :param task_spec: The wanted task spec. :rtype: Task :returns: The ancestor. """ if self.parent is None: return self if self.parent.task_spec == task_spec: return self.parent return self.parent._find_ancestor(task_spec)
[ "def", "_find_ancestor", "(", "self", ",", "task_spec", ")", ":", "if", "self", ".", "parent", "is", "None", ":", "return", "self", "if", "self", ".", "parent", ".", "task_spec", "==", "task_spec", ":", "return", "self", ".", "parent", "return", "self", ".", "parent", ".", "_find_ancestor", "(", "task_spec", ")" ]
Returns the ancestor that has the given task spec assigned. If no such ancestor was found, the root task is returned. :type task_spec: TaskSpec :param task_spec: The wanted task spec. :rtype: Task :returns: The ancestor.
[ "Returns", "the", "ancestor", "that", "has", "the", "given", "task", "spec", "assigned", ".", "If", "no", "such", "ancestor", "was", "found", "the", "root", "task", "is", "returned", "." ]
python
valid
33.666667
pyannote/pyannote-metrics
pyannote/metrics/binary_classification.py
https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/binary_classification.py#L38-L78
def det_curve(y_true, scores, distances=False): """DET curve Parameters ---------- y_true : (n_samples, ) array-like Boolean reference. scores : (n_samples, ) array-like Predicted score. distances : boolean, optional When True, indicate that `scores` are actually `distances` Returns ------- fpr : numpy array False alarm rate fnr : numpy array False rejection rate thresholds : numpy array Corresponding thresholds eer : float Equal error rate """ if distances: scores = -scores # compute false positive and false negative rates # (a.k.a. false alarm and false rejection rates) fpr, tpr, thresholds = sklearn.metrics.roc_curve( y_true, scores, pos_label=True) fnr = 1 - tpr if distances: thresholds = -thresholds # estimate equal error rate eer_index = np.where(fpr > fnr)[0][0] eer = .25 * (fpr[eer_index-1] + fpr[eer_index] + fnr[eer_index-1] + fnr[eer_index]) return fpr, fnr, thresholds, eer
[ "def", "det_curve", "(", "y_true", ",", "scores", ",", "distances", "=", "False", ")", ":", "if", "distances", ":", "scores", "=", "-", "scores", "# compute false positive and false negative rates", "# (a.k.a. false alarm and false rejection rates)", "fpr", ",", "tpr", ",", "thresholds", "=", "sklearn", ".", "metrics", ".", "roc_curve", "(", "y_true", ",", "scores", ",", "pos_label", "=", "True", ")", "fnr", "=", "1", "-", "tpr", "if", "distances", ":", "thresholds", "=", "-", "thresholds", "# estimate equal error rate", "eer_index", "=", "np", ".", "where", "(", "fpr", ">", "fnr", ")", "[", "0", "]", "[", "0", "]", "eer", "=", ".25", "*", "(", "fpr", "[", "eer_index", "-", "1", "]", "+", "fpr", "[", "eer_index", "]", "+", "fnr", "[", "eer_index", "-", "1", "]", "+", "fnr", "[", "eer_index", "]", ")", "return", "fpr", ",", "fnr", ",", "thresholds", ",", "eer" ]
DET curve Parameters ---------- y_true : (n_samples, ) array-like Boolean reference. scores : (n_samples, ) array-like Predicted score. distances : boolean, optional When True, indicate that `scores` are actually `distances` Returns ------- fpr : numpy array False alarm rate fnr : numpy array False rejection rate thresholds : numpy array Corresponding thresholds eer : float Equal error rate
[ "DET", "curve" ]
python
train
25.634146
JelleAalbers/multihist
multihist.py
https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L427-L440
def slice(self, start, stop=None, axis=0): """Restrict histogram to bins whose data values (not bin numbers) along axis are between start and stop (both inclusive). Returns d dimensional histogram.""" if stop is None: # Make a 1=bin slice stop = start axis = self.get_axis_number(axis) start_bin = max(0, self.get_axis_bin_index(start, axis)) stop_bin = min(len(self.bin_centers(axis)) - 1, # TODO: test off by one! self.get_axis_bin_index(stop, axis)) new_bin_edges = self.bin_edges.copy() new_bin_edges[axis] = new_bin_edges[axis][start_bin:stop_bin + 2] # TODO: Test off by one here! return Histdd.from_histogram(np.take(self.histogram, np.arange(start_bin, stop_bin + 1), axis=axis), bin_edges=new_bin_edges, axis_names=self.axis_names)
[ "def", "slice", "(", "self", ",", "start", ",", "stop", "=", "None", ",", "axis", "=", "0", ")", ":", "if", "stop", "is", "None", ":", "# Make a 1=bin slice", "stop", "=", "start", "axis", "=", "self", ".", "get_axis_number", "(", "axis", ")", "start_bin", "=", "max", "(", "0", ",", "self", ".", "get_axis_bin_index", "(", "start", ",", "axis", ")", ")", "stop_bin", "=", "min", "(", "len", "(", "self", ".", "bin_centers", "(", "axis", ")", ")", "-", "1", ",", "# TODO: test off by one!", "self", ".", "get_axis_bin_index", "(", "stop", ",", "axis", ")", ")", "new_bin_edges", "=", "self", ".", "bin_edges", ".", "copy", "(", ")", "new_bin_edges", "[", "axis", "]", "=", "new_bin_edges", "[", "axis", "]", "[", "start_bin", ":", "stop_bin", "+", "2", "]", "# TODO: Test off by one here!", "return", "Histdd", ".", "from_histogram", "(", "np", ".", "take", "(", "self", ".", "histogram", ",", "np", ".", "arange", "(", "start_bin", ",", "stop_bin", "+", "1", ")", ",", "axis", "=", "axis", ")", ",", "bin_edges", "=", "new_bin_edges", ",", "axis_names", "=", "self", ".", "axis_names", ")" ]
Restrict histogram to bins whose data values (not bin numbers) along axis are between start and stop (both inclusive). Returns d dimensional histogram.
[ "Restrict", "histogram", "to", "bins", "whose", "data", "values", "(", "not", "bin", "numbers", ")", "along", "axis", "are", "between", "start", "and", "stop", "(", "both", "inclusive", ")", ".", "Returns", "d", "dimensional", "histogram", "." ]
python
train
63.285714
michaelaye/pyciss
pyciss/opusapi.py
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L251-L281
def get_between_times(self, t1, t2, target=None): """ Query for OPUS data between times t1 and t2. Parameters ---------- t1, t2 : datetime.datetime, strings Start and end time for the query. If type is datetime, will be converted to isoformat string. If type is string already, it needs to be in an accepted international format for time strings. target : str Potential target for the observation query. Most likely will reduce the amount of data matching the query a lot. Returns ------- None, but set's state of the object to have new query results stored in self.obsids. """ try: # checking if times have isoformat() method (datetimes have) t1 = t1.isoformat() t2 = t2.isoformat() except AttributeError: # if not, should already be a string, so do nothing. pass myquery = self._get_time_query(t1, t2) if target is not None: myquery["target"] = target self.create_files_request(myquery, fmt="json") self.unpack_json_response()
[ "def", "get_between_times", "(", "self", ",", "t1", ",", "t2", ",", "target", "=", "None", ")", ":", "try", ":", "# checking if times have isoformat() method (datetimes have)", "t1", "=", "t1", ".", "isoformat", "(", ")", "t2", "=", "t2", ".", "isoformat", "(", ")", "except", "AttributeError", ":", "# if not, should already be a string, so do nothing.", "pass", "myquery", "=", "self", ".", "_get_time_query", "(", "t1", ",", "t2", ")", "if", "target", "is", "not", "None", ":", "myquery", "[", "\"target\"", "]", "=", "target", "self", ".", "create_files_request", "(", "myquery", ",", "fmt", "=", "\"json\"", ")", "self", ".", "unpack_json_response", "(", ")" ]
Query for OPUS data between times t1 and t2. Parameters ---------- t1, t2 : datetime.datetime, strings Start and end time for the query. If type is datetime, will be converted to isoformat string. If type is string already, it needs to be in an accepted international format for time strings. target : str Potential target for the observation query. Most likely will reduce the amount of data matching the query a lot. Returns ------- None, but set's state of the object to have new query results stored in self.obsids.
[ "Query", "for", "OPUS", "data", "between", "times", "t1", "and", "t2", "." ]
python
train
37.677419
theonion/django-bulbs
bulbs/contributions/filters.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/filters.py#L16-L26
def filter_queryset(self, request, queryset, view): """Apply the relevant behaviors to the view queryset.""" start_value = self.get_start(request) if start_value: queryset = self.apply_published_filter(queryset, "after", start_value) end_value = self.get_end(request) if end_value: # Forces the end_value to be the last second of the date provided in the query. # Necessary currently as our Published filter for es only applies to gte & lte. queryset = self.apply_published_filter(queryset, "before", end_value) return queryset
[ "def", "filter_queryset", "(", "self", ",", "request", ",", "queryset", ",", "view", ")", ":", "start_value", "=", "self", ".", "get_start", "(", "request", ")", "if", "start_value", ":", "queryset", "=", "self", ".", "apply_published_filter", "(", "queryset", ",", "\"after\"", ",", "start_value", ")", "end_value", "=", "self", ".", "get_end", "(", "request", ")", "if", "end_value", ":", "# Forces the end_value to be the last second of the date provided in the query.", "# Necessary currently as our Published filter for es only applies to gte & lte.", "queryset", "=", "self", ".", "apply_published_filter", "(", "queryset", ",", "\"before\"", ",", "end_value", ")", "return", "queryset" ]
Apply the relevant behaviors to the view queryset.
[ "Apply", "the", "relevant", "behaviors", "to", "the", "view", "queryset", "." ]
python
train
55.727273
gem/oq-engine
openquake/commonlib/shapefileparser.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/shapefileparser.py#L910-L935
def read(self, nrml_file, validate=False, simple_fault_spacing=1.0, complex_mesh_spacing=5.0, mfd_spacing=0.1): """ Build the source model from nrml format """ self.source_file = nrml_file if validate: converter = SourceConverter(1.0, simple_fault_spacing, complex_mesh_spacing, mfd_spacing, 10.0) converter.fname = nrml_file root = nrml.read(nrml_file) if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4': sg_nodes = [root.sourceModel.nodes] else: # NRML 0.5 sg_nodes = root.sourceModel.nodes sources = [] for sg_node in sg_nodes: for no, src_node in enumerate(sg_node, 1): if validate: print("Validating Source %s" % src_node.attrib["id"]) converter.convert_node(src_node) sources.append(src_node) return SourceModel(sources)
[ "def", "read", "(", "self", ",", "nrml_file", ",", "validate", "=", "False", ",", "simple_fault_spacing", "=", "1.0", ",", "complex_mesh_spacing", "=", "5.0", ",", "mfd_spacing", "=", "0.1", ")", ":", "self", ".", "source_file", "=", "nrml_file", "if", "validate", ":", "converter", "=", "SourceConverter", "(", "1.0", ",", "simple_fault_spacing", ",", "complex_mesh_spacing", ",", "mfd_spacing", ",", "10.0", ")", "converter", ".", "fname", "=", "nrml_file", "root", "=", "nrml", ".", "read", "(", "nrml_file", ")", "if", "root", "[", "'xmlns'", "]", "==", "'http://openquake.org/xmlns/nrml/0.4'", ":", "sg_nodes", "=", "[", "root", ".", "sourceModel", ".", "nodes", "]", "else", ":", "# NRML 0.5", "sg_nodes", "=", "root", ".", "sourceModel", ".", "nodes", "sources", "=", "[", "]", "for", "sg_node", "in", "sg_nodes", ":", "for", "no", ",", "src_node", "in", "enumerate", "(", "sg_node", ",", "1", ")", ":", "if", "validate", ":", "print", "(", "\"Validating Source %s\"", "%", "src_node", ".", "attrib", "[", "\"id\"", "]", ")", "converter", ".", "convert_node", "(", "src_node", ")", "sources", ".", "append", "(", "src_node", ")", "return", "SourceModel", "(", "sources", ")" ]
Build the source model from nrml format
[ "Build", "the", "source", "model", "from", "nrml", "format" ]
python
train
41.346154
merll/docker-fabric
dockerfabric/utils/files.py
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/utils/files.py#L98-L108
def local_temp_dir(): """ Creates a local temporary directory. The directory is removed when no longer needed. Failure to do so will be ignored. :return: Path to the temporary directory. :rtype: unicode """ path = tempfile.mkdtemp() yield path shutil.rmtree(path, ignore_errors=True)
[ "def", "local_temp_dir", "(", ")", ":", "path", "=", "tempfile", ".", "mkdtemp", "(", ")", "yield", "path", "shutil", ".", "rmtree", "(", "path", ",", "ignore_errors", "=", "True", ")" ]
Creates a local temporary directory. The directory is removed when no longer needed. Failure to do so will be ignored. :return: Path to the temporary directory. :rtype: unicode
[ "Creates", "a", "local", "temporary", "directory", ".", "The", "directory", "is", "removed", "when", "no", "longer", "needed", ".", "Failure", "to", "do", "so", "will", "be", "ignored", "." ]
python
train
28.181818
gmr/queries
queries/tornado_session.py
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L452-L458
def _incr_exceptions(self, conn): """Increment the number of exceptions for the current connection. :param psycopg2.extensions.connection conn: the psycopg2 connection """ self._pool_manager.get_connection(self.pid, conn).exceptions += 1
[ "def", "_incr_exceptions", "(", "self", ",", "conn", ")", ":", "self", ".", "_pool_manager", ".", "get_connection", "(", "self", ".", "pid", ",", "conn", ")", ".", "exceptions", "+=", "1" ]
Increment the number of exceptions for the current connection. :param psycopg2.extensions.connection conn: the psycopg2 connection
[ "Increment", "the", "number", "of", "exceptions", "for", "the", "current", "connection", "." ]
python
train
37.857143
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L181-L207
def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2)
[ "def", "QRatio", "(", "s1", ",", "s2", ",", "force_ascii", "=", "True", ",", "full_process", "=", "True", ")", ":", "if", "full_process", ":", "p1", "=", "utils", ".", "full_process", "(", "s1", ",", "force_ascii", "=", "force_ascii", ")", "p2", "=", "utils", ".", "full_process", "(", "s2", ",", "force_ascii", "=", "force_ascii", ")", "else", ":", "p1", "=", "s1", "p2", "=", "s2", "if", "not", "utils", ".", "validate_string", "(", "p1", ")", ":", "return", "0", "if", "not", "utils", ".", "validate_string", "(", "p2", ")", ":", "return", "0", "return", "ratio", "(", "p1", ",", "p2", ")" ]
Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio
[ "Quick", "ratio", "comparison", "between", "two", "strings", "." ]
python
train
28.740741
Loudr/pale
pale/endpoint.py
https://github.com/Loudr/pale/blob/dc002ee6032c856551143af222ff8f71ed9853fe/pale/endpoint.py#L351-L367
def _parse_handler_result(self, result): """Parses the item(s) returned by your handler implementation. Handlers may return a single item (payload), or a tuple that gets passed to the Response class __init__ method of your HTTP layer. _parse_handler_result separates the payload from the rest the tuple, as well as providing the tuple so that it can be re-composed after the payload has been run through the `_returns` Resource's renderer. """ if isinstance(result, (list, tuple)): payload = result[0] list_result = list(result) else: payload = result list_result = [""] return payload, list_result
[ "def", "_parse_handler_result", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "(", "list", ",", "tuple", ")", ")", ":", "payload", "=", "result", "[", "0", "]", "list_result", "=", "list", "(", "result", ")", "else", ":", "payload", "=", "result", "list_result", "=", "[", "\"\"", "]", "return", "payload", ",", "list_result" ]
Parses the item(s) returned by your handler implementation. Handlers may return a single item (payload), or a tuple that gets passed to the Response class __init__ method of your HTTP layer. _parse_handler_result separates the payload from the rest the tuple, as well as providing the tuple so that it can be re-composed after the payload has been run through the `_returns` Resource's renderer.
[ "Parses", "the", "item", "(", "s", ")", "returned", "by", "your", "handler", "implementation", "." ]
python
train
41.882353
mmp2/megaman
megaman/plotter/covar_plotter3.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L38-L42
def merge_keywords(x,y): """Given two dicts, merge them into a new dict as a shallow copy.""" z = x.copy() z.update(y) return z
[ "def", "merge_keywords", "(", "x", ",", "y", ")", ":", "z", "=", "x", ".", "copy", "(", ")", "z", ".", "update", "(", "y", ")", "return", "z" ]
Given two dicts, merge them into a new dict as a shallow copy.
[ "Given", "two", "dicts", "merge", "them", "into", "a", "new", "dict", "as", "a", "shallow", "copy", "." ]
python
train
27.8
jaywink/federation
federation/utils/django.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/utils/django.py#L30-L39
def get_function_from_config(item): """ Import the function to get profile by handle. """ config = get_configuration() func_path = config.get(item) module_path, func_name = func_path.rsplit(".", 1) module = importlib.import_module(module_path) func = getattr(module, func_name) return func
[ "def", "get_function_from_config", "(", "item", ")", ":", "config", "=", "get_configuration", "(", ")", "func_path", "=", "config", ".", "get", "(", "item", ")", "module_path", ",", "func_name", "=", "func_path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "module", "=", "importlib", ".", "import_module", "(", "module_path", ")", "func", "=", "getattr", "(", "module", ",", "func_name", ")", "return", "func" ]
Import the function to get profile by handle.
[ "Import", "the", "function", "to", "get", "profile", "by", "handle", "." ]
python
train
31.6
mathandy/svgpathtools
svgpathtools/bezier.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/bezier.py#L122-L144
def split_bezier(bpoints, t): """Uses deCasteljau's recursion to split the Bezier curve at t into two Bezier curves of the same order.""" def split_bezier_recursion(bpoints_left_, bpoints_right_, bpoints_, t_): if len(bpoints_) == 1: bpoints_left_.append(bpoints_[0]) bpoints_right_.append(bpoints_[0]) else: new_points = [None]*(len(bpoints_) - 1) bpoints_left_.append(bpoints_[0]) bpoints_right_.append(bpoints_[-1]) for i in range(len(bpoints_) - 1): new_points[i] = (1 - t_)*bpoints_[i] + t_*bpoints_[i + 1] bpoints_left_, bpoints_right_ = split_bezier_recursion( bpoints_left_, bpoints_right_, new_points, t_) return bpoints_left_, bpoints_right_ bpoints_left = [] bpoints_right = [] bpoints_left, bpoints_right = \ split_bezier_recursion(bpoints_left, bpoints_right, bpoints, t) bpoints_right.reverse() return bpoints_left, bpoints_right
[ "def", "split_bezier", "(", "bpoints", ",", "t", ")", ":", "def", "split_bezier_recursion", "(", "bpoints_left_", ",", "bpoints_right_", ",", "bpoints_", ",", "t_", ")", ":", "if", "len", "(", "bpoints_", ")", "==", "1", ":", "bpoints_left_", ".", "append", "(", "bpoints_", "[", "0", "]", ")", "bpoints_right_", ".", "append", "(", "bpoints_", "[", "0", "]", ")", "else", ":", "new_points", "=", "[", "None", "]", "*", "(", "len", "(", "bpoints_", ")", "-", "1", ")", "bpoints_left_", ".", "append", "(", "bpoints_", "[", "0", "]", ")", "bpoints_right_", ".", "append", "(", "bpoints_", "[", "-", "1", "]", ")", "for", "i", "in", "range", "(", "len", "(", "bpoints_", ")", "-", "1", ")", ":", "new_points", "[", "i", "]", "=", "(", "1", "-", "t_", ")", "*", "bpoints_", "[", "i", "]", "+", "t_", "*", "bpoints_", "[", "i", "+", "1", "]", "bpoints_left_", ",", "bpoints_right_", "=", "split_bezier_recursion", "(", "bpoints_left_", ",", "bpoints_right_", ",", "new_points", ",", "t_", ")", "return", "bpoints_left_", ",", "bpoints_right_", "bpoints_left", "=", "[", "]", "bpoints_right", "=", "[", "]", "bpoints_left", ",", "bpoints_right", "=", "split_bezier_recursion", "(", "bpoints_left", ",", "bpoints_right", ",", "bpoints", ",", "t", ")", "bpoints_right", ".", "reverse", "(", ")", "return", "bpoints_left", ",", "bpoints_right" ]
Uses deCasteljau's recursion to split the Bezier curve at t into two Bezier curves of the same order.
[ "Uses", "deCasteljau", "s", "recursion", "to", "split", "the", "Bezier", "curve", "at", "t", "into", "two", "Bezier", "curves", "of", "the", "same", "order", "." ]
python
train
43.565217
google/budou
budou/parser.py
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L129-L149
def get_parser(segmenter, **options): """Gets a parser. Args: segmenter (str): Segmenter to use. options (:obj:`dict`, optional): Optional settings. Returns: Parser (:obj:`budou.parser.Parser`) Raises: ValueError: If unsupported segmenter is specified. """ if segmenter == 'nlapi': return NLAPIParser(**options) elif segmenter == 'mecab': return MecabParser() elif segmenter == 'tinysegmenter': return TinysegmenterParser() else: raise ValueError('Segmenter {} is not supported.'.format(segmenter))
[ "def", "get_parser", "(", "segmenter", ",", "*", "*", "options", ")", ":", "if", "segmenter", "==", "'nlapi'", ":", "return", "NLAPIParser", "(", "*", "*", "options", ")", "elif", "segmenter", "==", "'mecab'", ":", "return", "MecabParser", "(", ")", "elif", "segmenter", "==", "'tinysegmenter'", ":", "return", "TinysegmenterParser", "(", ")", "else", ":", "raise", "ValueError", "(", "'Segmenter {} is not supported.'", ".", "format", "(", "segmenter", ")", ")" ]
Gets a parser. Args: segmenter (str): Segmenter to use. options (:obj:`dict`, optional): Optional settings. Returns: Parser (:obj:`budou.parser.Parser`) Raises: ValueError: If unsupported segmenter is specified.
[ "Gets", "a", "parser", "." ]
python
train
25.285714
JarryShaw/PyPCAPKit
src/protocols/internet/ipv6_opts.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/ipv6_opts.py#L212-L235
def _read_opt_type(self, kind): """Read option type field. Positional arguments: * kind -- int, option kind value Returns: * dict -- extracted IPv6_Opts option Structure of option type field [RFC 791]: Octets Bits Name Descriptions 0 0 ipv6_opts.opt.type.value Option Number 0 0 ipv6_opts.opt.type.action Action (00-11) 0 2 ipv6_opts.opt.type.change Change Flag (0/1) """ bin_ = bin(kind)[2:].zfill(8) type_ = dict( value=kind, action=_IPv6_Opts_ACT.get(bin_[:2]), change=True if int(bin_[2], base=2) else False, ) return type_
[ "def", "_read_opt_type", "(", "self", ",", "kind", ")", ":", "bin_", "=", "bin", "(", "kind", ")", "[", "2", ":", "]", ".", "zfill", "(", "8", ")", "type_", "=", "dict", "(", "value", "=", "kind", ",", "action", "=", "_IPv6_Opts_ACT", ".", "get", "(", "bin_", "[", ":", "2", "]", ")", ",", "change", "=", "True", "if", "int", "(", "bin_", "[", "2", "]", ",", "base", "=", "2", ")", "else", "False", ",", ")", "return", "type_" ]
Read option type field. Positional arguments: * kind -- int, option kind value Returns: * dict -- extracted IPv6_Opts option Structure of option type field [RFC 791]: Octets Bits Name Descriptions 0 0 ipv6_opts.opt.type.value Option Number 0 0 ipv6_opts.opt.type.action Action (00-11) 0 2 ipv6_opts.opt.type.change Change Flag (0/1)
[ "Read", "option", "type", "field", "." ]
python
train
32.458333
graphql-python/graphql-core-next
graphql/utilities/find_breaking_changes.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/find_breaking_changes.py#L520-L548
def find_types_removed_from_unions( old_schema: GraphQLSchema, new_schema: GraphQLSchema ) -> List[BreakingChange]: """Find types removed from unions. Given two schemas, returns a list containing descriptions of any breaking changes in the new_schema related to removing types from a union type. """ old_type_map = old_schema.type_map new_type_map = new_schema.type_map types_removed_from_union = [] for old_type_name, old_type in old_type_map.items(): new_type = new_type_map.get(old_type_name) if not (is_union_type(old_type) and is_union_type(new_type)): continue old_type = cast(GraphQLUnionType, old_type) new_type = cast(GraphQLUnionType, new_type) type_names_in_new_union = {type_.name for type_ in new_type.types} for type_ in old_type.types: type_name = type_.name if type_name not in type_names_in_new_union: types_removed_from_union.append( BreakingChange( BreakingChangeType.TYPE_REMOVED_FROM_UNION, f"{type_name} was removed from union type {old_type_name}.", ) ) return types_removed_from_union
[ "def", "find_types_removed_from_unions", "(", "old_schema", ":", "GraphQLSchema", ",", "new_schema", ":", "GraphQLSchema", ")", "->", "List", "[", "BreakingChange", "]", ":", "old_type_map", "=", "old_schema", ".", "type_map", "new_type_map", "=", "new_schema", ".", "type_map", "types_removed_from_union", "=", "[", "]", "for", "old_type_name", ",", "old_type", "in", "old_type_map", ".", "items", "(", ")", ":", "new_type", "=", "new_type_map", ".", "get", "(", "old_type_name", ")", "if", "not", "(", "is_union_type", "(", "old_type", ")", "and", "is_union_type", "(", "new_type", ")", ")", ":", "continue", "old_type", "=", "cast", "(", "GraphQLUnionType", ",", "old_type", ")", "new_type", "=", "cast", "(", "GraphQLUnionType", ",", "new_type", ")", "type_names_in_new_union", "=", "{", "type_", ".", "name", "for", "type_", "in", "new_type", ".", "types", "}", "for", "type_", "in", "old_type", ".", "types", ":", "type_name", "=", "type_", ".", "name", "if", "type_name", "not", "in", "type_names_in_new_union", ":", "types_removed_from_union", ".", "append", "(", "BreakingChange", "(", "BreakingChangeType", ".", "TYPE_REMOVED_FROM_UNION", ",", "f\"{type_name} was removed from union type {old_type_name}.\"", ",", ")", ")", "return", "types_removed_from_union" ]
Find types removed from unions. Given two schemas, returns a list containing descriptions of any breaking changes in the new_schema related to removing types from a union type.
[ "Find", "types", "removed", "from", "unions", "." ]
python
train
42.275862
evandempsey/porter2-stemmer
porter2stemmer/porter2stemmer.py
https://github.com/evandempsey/porter2-stemmer/blob/949824b7767c25efb014ef738e682442fa70c10b/porter2stemmer/porter2stemmer.py#L124-L136
def strip_possessives(self, word): """ Get rid of apostrophes indicating possession. """ if word.endswith("'s'"): return word[:-3] elif word.endswith("'s"): return word[:-2] elif word.endswith("'"): return word[:-1] else: return word
[ "def", "strip_possessives", "(", "self", ",", "word", ")", ":", "if", "word", ".", "endswith", "(", "\"'s'\"", ")", ":", "return", "word", "[", ":", "-", "3", "]", "elif", "word", ".", "endswith", "(", "\"'s\"", ")", ":", "return", "word", "[", ":", "-", "2", "]", "elif", "word", ".", "endswith", "(", "\"'\"", ")", ":", "return", "word", "[", ":", "-", "1", "]", "else", ":", "return", "word" ]
Get rid of apostrophes indicating possession.
[ "Get", "rid", "of", "apostrophes", "indicating", "possession", "." ]
python
train
25.076923
pycontribs/pyrax
pyrax/cloudblockstorage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudblockstorage.py#L240-L245
def list_snapshots(self): """ Returns a list of all snapshots of this volume. """ return [snap for snap in self.manager.list_snapshots() if snap.volume_id == self.id]
[ "def", "list_snapshots", "(", "self", ")", ":", "return", "[", "snap", "for", "snap", "in", "self", ".", "manager", ".", "list_snapshots", "(", ")", "if", "snap", ".", "volume_id", "==", "self", ".", "id", "]" ]
Returns a list of all snapshots of this volume.
[ "Returns", "a", "list", "of", "all", "snapshots", "of", "this", "volume", "." ]
python
train
34.833333
kislyuk/ensure
ensure/main.py
https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L481-L487
def matches(self, pattern, flags=0): """ Ensures :attr:`subject` matches regular expression *pattern*. """ if not re.match(pattern, self._subject, flags): raise self._error_factory(_format("Expected {} to match {}", self._subject, pattern)) return ChainInspector(self._subject)
[ "def", "matches", "(", "self", ",", "pattern", ",", "flags", "=", "0", ")", ":", "if", "not", "re", ".", "match", "(", "pattern", ",", "self", ".", "_subject", ",", "flags", ")", ":", "raise", "self", ".", "_error_factory", "(", "_format", "(", "\"Expected {} to match {}\"", ",", "self", ".", "_subject", ",", "pattern", ")", ")", "return", "ChainInspector", "(", "self", ".", "_subject", ")" ]
Ensures :attr:`subject` matches regular expression *pattern*.
[ "Ensures", ":", "attr", ":", "subject", "matches", "regular", "expression", "*", "pattern", "*", "." ]
python
train
46.142857
iotile/coretools
iotileemulate/iotile/emulate/virtual/emulated_tile.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/virtual/emulated_tile.py#L396-L406
def describe_config_variable(self, config_id): """Describe the config variable by its id.""" config = self._config_variables.get(config_id) if config is None: return [Error.INVALID_ARRAY_KEY, 0, 0, 0, 0] packed_size = config.total_size packed_size |= int(config.variable) << 15 return [0, 0, 0, config_id, packed_size]
[ "def", "describe_config_variable", "(", "self", ",", "config_id", ")", ":", "config", "=", "self", ".", "_config_variables", ".", "get", "(", "config_id", ")", "if", "config", "is", "None", ":", "return", "[", "Error", ".", "INVALID_ARRAY_KEY", ",", "0", ",", "0", ",", "0", ",", "0", "]", "packed_size", "=", "config", ".", "total_size", "packed_size", "|=", "int", "(", "config", ".", "variable", ")", "<<", "15", "return", "[", "0", ",", "0", ",", "0", ",", "config_id", ",", "packed_size", "]" ]
Describe the config variable by its id.
[ "Describe", "the", "config", "variable", "by", "its", "id", "." ]
python
train
33.727273
MSchnei/pyprf_feature
pyprf_feature/analysis/old/pRF_mdlCrt.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L29-L66
def loadPng(varNumVol, tplPngSize, strPathPng): """Load PNG files. Parameters ---------- varNumVol : float Number of volumes, i.e. number of time points in all runs. tplPngSize : tuple Shape of the stimulus image (i.e. png). strPathPng: str Path to the folder cointaining the png files. Returns ------- aryPngData : 2d numpy array, shape [png_x, png_y, n_vols] Stack of stimulus data. """ print('------Load PNGs') # Create list of png files to load: lstPngPaths = [None] * varNumVol for idx01 in range(0, varNumVol): lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png') # Load png files. The png data will be saved in a numpy array of the # following order: aryPngData[x-pixel, y-pixel, PngNumber]. The # sp.misc.imread function actually contains three values per pixel (RGB), # but since the stimuli are black-and-white, any one of these is sufficient # and we discard the others. aryPngData = np.zeros((tplPngSize[0], tplPngSize[1], varNumVol)) for idx01 in range(0, varNumVol): aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01])) # Convert RGB values (0 to 255) to integer ones and zeros: aryPngData = (aryPngData > 0).astype(int) return aryPngData
[ "def", "loadPng", "(", "varNumVol", ",", "tplPngSize", ",", "strPathPng", ")", ":", "print", "(", "'------Load PNGs'", ")", "# Create list of png files to load:", "lstPngPaths", "=", "[", "None", "]", "*", "varNumVol", "for", "idx01", "in", "range", "(", "0", ",", "varNumVol", ")", ":", "lstPngPaths", "[", "idx01", "]", "=", "(", "strPathPng", "+", "str", "(", "idx01", ")", "+", "'.png'", ")", "# Load png files. The png data will be saved in a numpy array of the", "# following order: aryPngData[x-pixel, y-pixel, PngNumber]. The", "# sp.misc.imread function actually contains three values per pixel (RGB),", "# but since the stimuli are black-and-white, any one of these is sufficient", "# and we discard the others.", "aryPngData", "=", "np", ".", "zeros", "(", "(", "tplPngSize", "[", "0", "]", ",", "tplPngSize", "[", "1", "]", ",", "varNumVol", ")", ")", "for", "idx01", "in", "range", "(", "0", ",", "varNumVol", ")", ":", "aryPngData", "[", ":", ",", ":", ",", "idx01", "]", "=", "np", ".", "array", "(", "Image", ".", "open", "(", "lstPngPaths", "[", "idx01", "]", ")", ")", "# Convert RGB values (0 to 255) to integer ones and zeros:", "aryPngData", "=", "(", "aryPngData", ">", "0", ")", ".", "astype", "(", "int", ")", "return", "aryPngData" ]
Load PNG files. Parameters ---------- varNumVol : float Number of volumes, i.e. number of time points in all runs. tplPngSize : tuple Shape of the stimulus image (i.e. png). strPathPng: str Path to the folder cointaining the png files. Returns ------- aryPngData : 2d numpy array, shape [png_x, png_y, n_vols] Stack of stimulus data.
[ "Load", "PNG", "files", "." ]
python
train
35.131579
productml/blurr
blurr/runner/spark_runner.py
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L160-L177
def print_output(self, per_identity_data: 'RDD') -> None: """ Basic helper function to write data to stdout. If window BTS was provided then the window BTS output is written, otherwise, the streaming BTS output is written to stdout. WARNING - For large datasets this will be extremely slow. :param per_identity_data: Output of the `execute()` call. """ if not self._window_bts: data = per_identity_data.flatMap( lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()]) else: # Convert to a DataFrame first so that the data can be saved as a CSV data = per_identity_data.map( lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder)) for row in data.collect(): print(row)
[ "def", "print_output", "(", "self", ",", "per_identity_data", ":", "'RDD'", ")", "->", "None", ":", "if", "not", "self", ".", "_window_bts", ":", "data", "=", "per_identity_data", ".", "flatMap", "(", "lambda", "x", ":", "[", "json", ".", "dumps", "(", "data", ",", "cls", "=", "BlurrJSONEncoder", ")", "for", "data", "in", "x", "[", "1", "]", "[", "0", "]", ".", "items", "(", ")", "]", ")", "else", ":", "# Convert to a DataFrame first so that the data can be saved as a CSV", "data", "=", "per_identity_data", ".", "map", "(", "lambda", "x", ":", "json", ".", "dumps", "(", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", "[", "1", "]", ")", ",", "cls", "=", "BlurrJSONEncoder", ")", ")", "for", "row", "in", "data", ".", "collect", "(", ")", ":", "print", "(", "row", ")" ]
Basic helper function to write data to stdout. If window BTS was provided then the window BTS output is written, otherwise, the streaming BTS output is written to stdout. WARNING - For large datasets this will be extremely slow. :param per_identity_data: Output of the `execute()` call.
[ "Basic", "helper", "function", "to", "write", "data", "to", "stdout", ".", "If", "window", "BTS", "was", "provided", "then", "the", "window", "BTS", "output", "is", "written", "otherwise", "the", "streaming", "BTS", "output", "is", "written", "to", "stdout", "." ]
python
train
46.277778
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L301-L303
def _type_string(label, case=None): """Shortcut for string like fields""" return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), ""
[ "def", "_type_string", "(", "label", ",", "case", "=", "None", ")", ":", "return", "label", ",", "abstractSearch", ".", "in_string", ",", "lambda", "s", ":", "abstractRender", ".", "default", "(", "s", ",", "case", "=", "case", ")", ",", "\"\"" ]
Shortcut for string like fields
[ "Shortcut", "for", "string", "like", "fields" ]
python
train
56.666667
armet/python-armet
armet/serializers/base.py
https://github.com/armet/python-armet/blob/d61eca9082256cb1e7f7f3c7f2fbc4b697157de7/armet/serializers/base.py#L28-L45
def serialize(self, data=None): """ Transforms the object into an acceptable format for transmission. @throws ValueError To indicate this serializer does not support the encoding of the specified object. """ if data is not None and self.response is not None: # Set the content type. self.response['Content-Type'] = self.media_types[0] # Write the encoded and prepared data to the response. self.response.write(data) # Return the serialized data. # This has normally been transformed by a base class. return data
[ "def", "serialize", "(", "self", ",", "data", "=", "None", ")", ":", "if", "data", "is", "not", "None", "and", "self", ".", "response", "is", "not", "None", ":", "# Set the content type.", "self", ".", "response", "[", "'Content-Type'", "]", "=", "self", ".", "media_types", "[", "0", "]", "# Write the encoded and prepared data to the response.", "self", ".", "response", ".", "write", "(", "data", ")", "# Return the serialized data.", "# This has normally been transformed by a base class.", "return", "data" ]
Transforms the object into an acceptable format for transmission. @throws ValueError To indicate this serializer does not support the encoding of the specified object.
[ "Transforms", "the", "object", "into", "an", "acceptable", "format", "for", "transmission", "." ]
python
valid
35.166667
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L58-L75
def is_vert_aligned(c): """Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
[ "def", "is_vert_aligned", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_vert_aligned", "(", "bbox_from_span", "(", "_to_span", "(", "c", "[", "i", "]", ")", ")", ",", "bbox_from_span", "(", "_to_span", "(", "c", "[", "0", "]", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "c", ")", ")", "]", ")" ]
Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "true", "if", "all", "the", "components", "of", "c", "are", "vertically", "aligned", "." ]
python
train
30.444444
ray-project/ray
python/ray/function_manager.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L106-L140
def from_function(cls, function): """Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function. """ module_name = function.__module__ function_name = function.__name__ class_name = "" function_source_hasher = hashlib.sha1() try: # If we are running a script or are in IPython, include the source # code in the hash. source = inspect.getsource(function) if sys.version_info[0] >= 3: source = source.encode() function_source_hasher.update(source) function_source_hash = function_source_hasher.digest() except (IOError, OSError, TypeError): # Source code may not be available: # e.g. Cython or Python interpreter. function_source_hash = b"" return cls(module_name, function_name, class_name, function_source_hash)
[ "def", "from_function", "(", "cls", ",", "function", ")", ":", "module_name", "=", "function", ".", "__module__", "function_name", "=", "function", ".", "__name__", "class_name", "=", "\"\"", "function_source_hasher", "=", "hashlib", ".", "sha1", "(", ")", "try", ":", "# If we are running a script or are in IPython, include the source", "# code in the hash.", "source", "=", "inspect", ".", "getsource", "(", "function", ")", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "source", "=", "source", ".", "encode", "(", ")", "function_source_hasher", ".", "update", "(", "source", ")", "function_source_hash", "=", "function_source_hasher", ".", "digest", "(", ")", "except", "(", "IOError", ",", "OSError", ",", "TypeError", ")", ":", "# Source code may not be available:", "# e.g. Cython or Python interpreter.", "function_source_hash", "=", "b\"\"", "return", "cls", "(", "module_name", ",", "function_name", ",", "class_name", ",", "function_source_hash", ")" ]
Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function.
[ "Create", "a", "FunctionDescriptor", "from", "a", "function", "instance", "." ]
python
train
38.657143
h2oai/h2o-3
h2o-bindings/bin/bindings.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/bindings.py#L420-L444
def write_to_file(filename, content): """ Writes content to the given file. The file's directory will be created if needed. :param filename: name of the output file, relative to the "destination folder" provided by the user :param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each line will be appended with a "\n". Lines containing None will be skipped. """ if not config["destdir"]: print("{destdir} config variable not present. Did you forget to run init()?") sys.exit(8) abs_filename = os.path.abspath(config["destdir"] + "/" + filename) abs_filepath = os.path.dirname(abs_filename) if not os.path.exists(abs_filepath): try: os.makedirs(abs_filepath) except OSError as e: print("Cannot create directory " + abs_filepath) print("Error %d: %s" % (e.errno, e.strerror)) sys.exit(6) with codecs.open(abs_filename, "w", "utf-8") as out: if isinstance(content, str): content = [content] for line in content: if line is not None: out.write(line) out.write("\n")
[ "def", "write_to_file", "(", "filename", ",", "content", ")", ":", "if", "not", "config", "[", "\"destdir\"", "]", ":", "print", "(", "\"{destdir} config variable not present. Did you forget to run init()?\"", ")", "sys", ".", "exit", "(", "8", ")", "abs_filename", "=", "os", ".", "path", ".", "abspath", "(", "config", "[", "\"destdir\"", "]", "+", "\"/\"", "+", "filename", ")", "abs_filepath", "=", "os", ".", "path", ".", "dirname", "(", "abs_filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "abs_filepath", ")", ":", "try", ":", "os", ".", "makedirs", "(", "abs_filepath", ")", "except", "OSError", "as", "e", ":", "print", "(", "\"Cannot create directory \"", "+", "abs_filepath", ")", "print", "(", "\"Error %d: %s\"", "%", "(", "e", ".", "errno", ",", "e", ".", "strerror", ")", ")", "sys", ".", "exit", "(", "6", ")", "with", "codecs", ".", "open", "(", "abs_filename", ",", "\"w\"", ",", "\"utf-8\"", ")", "as", "out", ":", "if", "isinstance", "(", "content", ",", "str", ")", ":", "content", "=", "[", "content", "]", "for", "line", "in", "content", ":", "if", "line", "is", "not", "None", ":", "out", ".", "write", "(", "line", ")", "out", ".", "write", "(", "\"\\n\"", ")" ]
Writes content to the given file. The file's directory will be created if needed. :param filename: name of the output file, relative to the "destination folder" provided by the user :param content: iterable (line-by-line) that should be written to the file. Either a list or a generator. Each line will be appended with a "\n". Lines containing None will be skipped.
[ "Writes", "content", "to", "the", "given", "file", ".", "The", "file", "s", "directory", "will", "be", "created", "if", "needed", ".", ":", "param", "filename", ":", "name", "of", "the", "output", "file", "relative", "to", "the", "destination", "folder", "provided", "by", "the", "user", ":", "param", "content", ":", "iterable", "(", "line", "-", "by", "-", "line", ")", "that", "should", "be", "written", "to", "the", "file", ".", "Either", "a", "list", "or", "a", "generator", ".", "Each", "line", "will", "be", "appended", "with", "a", "\\", "n", ".", "Lines", "containing", "None", "will", "be", "skipped", "." ]
python
test
47.76
wummel/dosage
dosagelib/plugins/s.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/plugins/s.py#L332-L336
def namer(cls, imageUrl, pageUrl): """Use strip index number for image name.""" index = int(compile(r'id=(\d+)').search(pageUrl).group(1)) ext = imageUrl.rsplit('.', 1)[1] return "SnowFlakes-%d.%s" % (index, ext)
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "index", "=", "int", "(", "compile", "(", "r'id=(\\d+)'", ")", ".", "search", "(", "pageUrl", ")", ".", "group", "(", "1", ")", ")", "ext", "=", "imageUrl", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "return", "\"SnowFlakes-%d.%s\"", "%", "(", "index", ",", "ext", ")" ]
Use strip index number for image name.
[ "Use", "strip", "index", "number", "for", "image", "name", "." ]
python
train
48
CxAalto/gtfspy
gtfspy/spreading/spreading_stop.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/spreading/spreading_stop.py#L23-L51
def visit(self, event): """ Visit the stop if it has not been visited already by an event with earlier arr_time_ut (or with other trip that does not require a transfer) Parameters ---------- event : Event an instance of the Event (namedtuple) Returns ------- visited : bool if visit is stored, returns True, otherwise False """ to_visit = False if event.arr_time_ut <= self.min_transfer_time+self.get_min_visit_time(): to_visit = True else: for ve in self.visit_events: if (event.trip_I == ve.trip_I) and event.arr_time_ut < ve.arr_time_ut: to_visit = True if to_visit: self.visit_events.append(event) min_time = self.get_min_visit_time() # remove any visits that are 'too old' self.visit_events = [v for v in self.visit_events if v.arr_time_ut <= min_time+self.min_transfer_time] return to_visit
[ "def", "visit", "(", "self", ",", "event", ")", ":", "to_visit", "=", "False", "if", "event", ".", "arr_time_ut", "<=", "self", ".", "min_transfer_time", "+", "self", ".", "get_min_visit_time", "(", ")", ":", "to_visit", "=", "True", "else", ":", "for", "ve", "in", "self", ".", "visit_events", ":", "if", "(", "event", ".", "trip_I", "==", "ve", ".", "trip_I", ")", "and", "event", ".", "arr_time_ut", "<", "ve", ".", "arr_time_ut", ":", "to_visit", "=", "True", "if", "to_visit", ":", "self", ".", "visit_events", ".", "append", "(", "event", ")", "min_time", "=", "self", ".", "get_min_visit_time", "(", ")", "# remove any visits that are 'too old'", "self", ".", "visit_events", "=", "[", "v", "for", "v", "in", "self", ".", "visit_events", "if", "v", ".", "arr_time_ut", "<=", "min_time", "+", "self", ".", "min_transfer_time", "]", "return", "to_visit" ]
Visit the stop if it has not been visited already by an event with earlier arr_time_ut (or with other trip that does not require a transfer) Parameters ---------- event : Event an instance of the Event (namedtuple) Returns ------- visited : bool if visit is stored, returns True, otherwise False
[ "Visit", "the", "stop", "if", "it", "has", "not", "been", "visited", "already", "by", "an", "event", "with", "earlier", "arr_time_ut", "(", "or", "with", "other", "trip", "that", "does", "not", "require", "a", "transfer", ")" ]
python
valid
35.275862
treycucco/bidon
bidon/db/access/data_access.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/data_access.py#L446-L458
def autocommit(data_access): """Make statements autocommit. :param data_access: a DataAccess instance """ if not data_access.autocommit: data_access.commit() old_autocommit = data_access.autocommit data_access.autocommit = True try: yield data_access finally: data_access.autocommit = old_autocommit
[ "def", "autocommit", "(", "data_access", ")", ":", "if", "not", "data_access", ".", "autocommit", ":", "data_access", ".", "commit", "(", ")", "old_autocommit", "=", "data_access", ".", "autocommit", "data_access", ".", "autocommit", "=", "True", "try", ":", "yield", "data_access", "finally", ":", "data_access", ".", "autocommit", "=", "old_autocommit" ]
Make statements autocommit. :param data_access: a DataAccess instance
[ "Make", "statements", "autocommit", "." ]
python
train
24.307692
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/calculator.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/calculator.py#L295-L359
def _node_le(self, node_self, node_other): '''_node_le Low-level api: Return True if all descendants of one node exist in the other node. Otherwise False. This is a recursive method. Parameters ---------- node_self : `Element` A node to be compared. node_other : `Element` Another node to be compared. Returns ------- bool True if all descendants of node_self exist in node_other, otherwise False. ''' for x in ['tag', 'text', 'tail']: if node_self.__getattribute__(x) != node_other.__getattribute__(x): return False for a in node_self.attrib: if a not in node_other.attrib or \ node_self.attrib[a] != node_other.attrib[a]: return False for child in node_self.getchildren(): peers = self._get_peers(child, node_other) if len(peers) < 1: return False elif len(peers) > 1: raise ConfigError('not unique peer of node {}' \ .format(self.device.get_xpath(child))) else: schma_node = self.device.get_schema_node(child) if schma_node.get('ordered-by') == 'user' and \ schma_node.get('type') == 'leaf-list' or \ schma_node.get('ordered-by') == 'user' and \ schma_node.get('type') == 'list': elder_siblings = list(child.itersiblings(tag=child.tag, preceding=True)) if elder_siblings: immediate_elder_sibling = elder_siblings[0] peers_of_immediate_elder_sibling = \ self._get_peers(immediate_elder_sibling, node_other) if len(peers_of_immediate_elder_sibling) < 1: return False elif len(peers_of_immediate_elder_sibling) > 1: p = self.device.get_xpath(immediate_elder_sibling) raise ConfigError('not unique peer of node {}' \ .format(p)) elder_siblings_of_peer = \ list(peers[0].itersiblings(tag=child.tag, preceding=True)) if peers_of_immediate_elder_sibling[0] not in \ elder_siblings_of_peer: return False if not self._node_le(child, peers[0]): return False return True
[ "def", "_node_le", "(", "self", ",", "node_self", ",", "node_other", ")", ":", "for", "x", "in", "[", "'tag'", ",", "'text'", ",", "'tail'", "]", ":", "if", "node_self", ".", "__getattribute__", "(", "x", ")", "!=", "node_other", ".", "__getattribute__", "(", "x", ")", ":", "return", "False", "for", "a", "in", "node_self", ".", "attrib", ":", "if", "a", "not", "in", "node_other", ".", "attrib", "or", "node_self", ".", "attrib", "[", "a", "]", "!=", "node_other", ".", "attrib", "[", "a", "]", ":", "return", "False", "for", "child", "in", "node_self", ".", "getchildren", "(", ")", ":", "peers", "=", "self", ".", "_get_peers", "(", "child", ",", "node_other", ")", "if", "len", "(", "peers", ")", "<", "1", ":", "return", "False", "elif", "len", "(", "peers", ")", ">", "1", ":", "raise", "ConfigError", "(", "'not unique peer of node {}'", ".", "format", "(", "self", ".", "device", ".", "get_xpath", "(", "child", ")", ")", ")", "else", ":", "schma_node", "=", "self", ".", "device", ".", "get_schema_node", "(", "child", ")", "if", "schma_node", ".", "get", "(", "'ordered-by'", ")", "==", "'user'", "and", "schma_node", ".", "get", "(", "'type'", ")", "==", "'leaf-list'", "or", "schma_node", ".", "get", "(", "'ordered-by'", ")", "==", "'user'", "and", "schma_node", ".", "get", "(", "'type'", ")", "==", "'list'", ":", "elder_siblings", "=", "list", "(", "child", ".", "itersiblings", "(", "tag", "=", "child", ".", "tag", ",", "preceding", "=", "True", ")", ")", "if", "elder_siblings", ":", "immediate_elder_sibling", "=", "elder_siblings", "[", "0", "]", "peers_of_immediate_elder_sibling", "=", "self", ".", "_get_peers", "(", "immediate_elder_sibling", ",", "node_other", ")", "if", "len", "(", "peers_of_immediate_elder_sibling", ")", "<", "1", ":", "return", "False", "elif", "len", "(", "peers_of_immediate_elder_sibling", ")", ">", "1", ":", "p", "=", "self", ".", "device", ".", "get_xpath", "(", "immediate_elder_sibling", ")", "raise", "ConfigError", "(", "'not unique peer of node {}'", ".", "format", "(", "p", ")", ")", "elder_siblings_of_peer", "=", "list", "(", "peers", "[", "0", "]", ".", "itersiblings", "(", "tag", "=", "child", ".", "tag", ",", "preceding", "=", "True", ")", ")", "if", "peers_of_immediate_elder_sibling", "[", "0", "]", "not", "in", "elder_siblings_of_peer", ":", "return", "False", "if", "not", "self", ".", "_node_le", "(", "child", ",", "peers", "[", "0", "]", ")", ":", "return", "False", "return", "True" ]
_node_le Low-level api: Return True if all descendants of one node exist in the other node. Otherwise False. This is a recursive method. Parameters ---------- node_self : `Element` A node to be compared. node_other : `Element` Another node to be compared. Returns ------- bool True if all descendants of node_self exist in node_other, otherwise False.
[ "_node_le" ]
python
train
42.584615
softlayer/softlayer-python
SoftLayer/CLI/loadbal/service_add.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/loadbal/service_add.py#L31-L53
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address): """Adds a new load balancer service.""" mgr = SoftLayer.LoadBalancerManager(env.client) loadbal_id, group_id = loadbal.parse_id(identifier) # check if the IP is valid ip_address_id = None if ip_address: ip_service = env.client['Network_Subnet_IpAddress'] ip_record = ip_service.getByIpAddress(ip_address) if len(ip_record) > 0: ip_address_id = ip_record['id'] mgr.add_service(loadbal_id, group_id, ip_address_id=ip_address_id, enabled=enabled, port=port, weight=weight, hc_type=healthcheck_type) env.fout('Load balancer service is being added!')
[ "def", "cli", "(", "env", ",", "identifier", ",", "enabled", ",", "port", ",", "weight", ",", "healthcheck_type", ",", "ip_address", ")", ":", "mgr", "=", "SoftLayer", ".", "LoadBalancerManager", "(", "env", ".", "client", ")", "loadbal_id", ",", "group_id", "=", "loadbal", ".", "parse_id", "(", "identifier", ")", "# check if the IP is valid", "ip_address_id", "=", "None", "if", "ip_address", ":", "ip_service", "=", "env", ".", "client", "[", "'Network_Subnet_IpAddress'", "]", "ip_record", "=", "ip_service", ".", "getByIpAddress", "(", "ip_address", ")", "if", "len", "(", "ip_record", ")", ">", "0", ":", "ip_address_id", "=", "ip_record", "[", "'id'", "]", "mgr", ".", "add_service", "(", "loadbal_id", ",", "group_id", ",", "ip_address_id", "=", "ip_address_id", ",", "enabled", "=", "enabled", ",", "port", "=", "port", ",", "weight", "=", "weight", ",", "hc_type", "=", "healthcheck_type", ")", "env", ".", "fout", "(", "'Load balancer service is being added!'", ")" ]
Adds a new load balancer service.
[ "Adds", "a", "new", "load", "balancer", "service", "." ]
python
train
34.521739
waqasbhatti/astrobase
astrobase/lcproc/checkplotgen.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/checkplotgen.py#L102-L394
def update_checkplotdict_nbrlcs( checkplotdict, timecol, magcol, errcol, lcformat='hat-sql', lcformatdir=None, verbose=True, ): '''For all neighbors in a checkplotdict, make LCs and phased LCs. Parameters ---------- checkplotdict : dict This is the checkplot to process. The light curves for the neighbors to the object here will be extracted from the stored file paths, and this function will make plots of these time-series. If the object has 'best' periods and epochs generated by period-finder functions in this checkplotdict, phased light curve plots of each neighbor will be made using these to check the effects of blending. timecol,magcol,errcol : str The timecol, magcol, and errcol keys used to generate this object's checkplot. This is used to extract the correct times-series from the neighbors' light curves. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. Returns ------- dict The input checkplotdict is returned with the neighor light curve plots added in. ''' try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return checkplotdict except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return checkplotdict if not ('neighbors' in checkplotdict and checkplotdict['neighbors'] and len(checkplotdict['neighbors']) > 0): LOGERROR('no neighbors for %s, not updating...' % (checkplotdict['objectid'])) return checkplotdict # get our object's magkeys to compare to the neighbor objmagkeys = {} # handle diff generations of checkplots if 'available_bands' in checkplotdict['objectinfo']: mclist = checkplotdict['objectinfo']['available_bands'] else: mclist = ('bmag','vmag','rmag','imag','jmag','hmag','kmag', 'sdssu','sdssg','sdssr','sdssi','sdssz') for mc in mclist: if (mc in checkplotdict['objectinfo'] and checkplotdict['objectinfo'][mc] is not None and np.isfinite(checkplotdict['objectinfo'][mc])): objmagkeys[mc] = checkplotdict['objectinfo'][mc] # if there are actually neighbors, go through them in order for nbr in checkplotdict['neighbors']: objectid, lcfpath = (nbr['objectid'], nbr['lcfpath']) # get the light curve if not os.path.exists(lcfpath): LOGERROR('objectid: %s, neighbor: %s, ' 'lightcurve: %s not found, skipping...' % (checkplotdict['objectid'], objectid, lcfpath)) continue lcdict = readerfunc(lcfpath) # this should handle lists/tuples being returned by readerfunc # we assume that the first element is the actual lcdict # FIXME: figure out how to not need this assumption if ( (isinstance(lcdict, (list, tuple))) and (isinstance(lcdict[0], dict)) ): lcdict = lcdict[0] # 0. get this neighbor's magcols and get the magdiff and colordiff # between it and the object nbrmagkeys = {} for mc in objmagkeys: if (('objectinfo' in lcdict) and (isinstance(lcdict['objectinfo'], dict)) and (mc in lcdict['objectinfo']) and (lcdict['objectinfo'][mc] is not None) and (np.isfinite(lcdict['objectinfo'][mc]))): nbrmagkeys[mc] = lcdict['objectinfo'][mc] # now calculate the magdiffs magdiffs = {} for omc in objmagkeys: if omc in nbrmagkeys: magdiffs[omc] = objmagkeys[omc] - nbrmagkeys[omc] # calculate colors and colordiffs colordiffs = {} # generate the list of colors to get # NOTE: here, we don't really bother with new/old gen checkplots # maybe change this later to handle arbitrary colors for ctrio in (['bmag','vmag','bvcolor'], ['vmag','kmag','vkcolor'], ['jmag','kmag','jkcolor'], ['sdssi','jmag','ijcolor'], ['sdssg','kmag','gkcolor'], ['sdssg','sdssr','grcolor']): m1, m2, color = ctrio if (m1 in objmagkeys and m2 in objmagkeys and m1 in nbrmagkeys and m2 in nbrmagkeys): objcolor = objmagkeys[m1] - objmagkeys[m2] nbrcolor = nbrmagkeys[m1] - nbrmagkeys[m2] colordiffs[color] = objcolor - nbrcolor # finally, add all the color and magdiff info to the nbr dict nbr.update({'magdiffs':magdiffs, 'colordiffs':colordiffs}) # # process magcols # # normalize using the special function if specified if normfunc is not None: lcdict = normfunc(lcdict) try: # get the times, mags, and errs # dereference the columns and get them from the lcdict if '.' in timecol: timecolget = timecol.split('.') else: timecolget = [timecol] times = _dict_get(lcdict, timecolget) if '.' in magcol: magcolget = magcol.split('.') else: magcolget = [magcol] mags = _dict_get(lcdict, magcolget) if '.' in errcol: errcolget = errcol.split('.') else: errcolget = [errcol] errs = _dict_get(lcdict, errcolget) except KeyError: LOGERROR('LC for neighbor: %s (target object: %s) does not ' 'have one or more of the required columns: %s, ' 'skipping...' % (objectid, checkplotdict['objectid'], ', '.join([timecol, magcol, errcol]))) continue # filter the input times, mags, errs; do sigclipping and normalization stimes, smags, serrs = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=4.0) # normalize here if not using special normalization if normfunc is None: ntimes, nmags = normalize_magseries( stimes, smags, magsarefluxes=magsarefluxes ) xtimes, xmags, xerrs = ntimes, nmags, serrs else: xtimes, xmags, xerrs = stimes, smags, serrs # check if this neighbor has enough finite points in its LC # fail early if not enough light curve points if ((xtimes is None) or (xmags is None) or (xerrs is None) or (xtimes.size < 49) or (xmags.size < 49) or (xerrs.size < 49)): LOGERROR("one or more of times, mags, errs appear to be None " "after sig-clipping. are the measurements all nan? " "can't make neighbor light curve plots " "for target: %s, neighbor: %s, neighbor LC: %s" % (checkplotdict['objectid'], nbr['objectid'], nbr['lcfpath'])) continue # # now we can start doing stuff if everything checks out # # make an unphased mag-series plot nbrdict = _pkl_magseries_plot(xtimes, xmags, xerrs, magsarefluxes=magsarefluxes) # update the nbr nbr.update(nbrdict) # for each lspmethod in the checkplot, make a corresponding plot for # this neighbor # figure out the period finder methods present if 'pfmethods' in checkplotdict: pfmethods = checkplotdict['pfmethods'] else: pfmethods = [] for cpkey in checkplotdict: for pfkey in PFMETHODS: if pfkey in cpkey: pfmethods.append(pfkey) for lspt in pfmethods: # initialize this lspmethod entry nbr[lspt] = {} # we only care about the best period and its options operiod, oepoch = (checkplotdict[lspt][0]['period'], checkplotdict[lspt][0]['epoch']) (ophasewrap, ophasesort, ophasebin, ominbinelems, oplotxlim) = ( checkplotdict[lspt][0]['phasewrap'], checkplotdict[lspt][0]['phasesort'], checkplotdict[lspt][0]['phasebin'], checkplotdict[lspt][0]['minbinelems'], checkplotdict[lspt][0]['plotxlim'], ) # make the phasedlc plot for this period nbr = _pkl_phased_magseries_plot( nbr, lspt.split('-')[1], # this splits '<pfindex>-<pfmethod>' 0, xtimes, xmags, xerrs, operiod, oepoch, phasewrap=ophasewrap, phasesort=ophasesort, phasebin=ophasebin, minbinelems=ominbinelems, plotxlim=oplotxlim, magsarefluxes=magsarefluxes, verbose=verbose, override_pfmethod=lspt ) # at this point, this neighbor's dict should be up to date with all # info, magseries plot, and all phased LC plots # return the updated checkplotdict return checkplotdict
[ "def", "update_checkplotdict_nbrlcs", "(", "checkplotdict", ",", "timecol", ",", "magcol", ",", "errcol", ",", "lcformat", "=", "'hat-sql'", ",", "lcformatdir", "=", "None", ",", "verbose", "=", "True", ",", ")", ":", "try", ":", "formatinfo", "=", "get_lcformat", "(", "lcformat", ",", "use_lcformat_dir", "=", "lcformatdir", ")", "if", "formatinfo", ":", "(", "dfileglob", ",", "readerfunc", ",", "dtimecols", ",", "dmagcols", ",", "derrcols", ",", "magsarefluxes", ",", "normfunc", ")", "=", "formatinfo", "else", ":", "LOGERROR", "(", "\"can't figure out the light curve format\"", ")", "return", "checkplotdict", "except", "Exception", "as", "e", ":", "LOGEXCEPTION", "(", "\"can't figure out the light curve format\"", ")", "return", "checkplotdict", "if", "not", "(", "'neighbors'", "in", "checkplotdict", "and", "checkplotdict", "[", "'neighbors'", "]", "and", "len", "(", "checkplotdict", "[", "'neighbors'", "]", ")", ">", "0", ")", ":", "LOGERROR", "(", "'no neighbors for %s, not updating...'", "%", "(", "checkplotdict", "[", "'objectid'", "]", ")", ")", "return", "checkplotdict", "# get our object's magkeys to compare to the neighbor", "objmagkeys", "=", "{", "}", "# handle diff generations of checkplots", "if", "'available_bands'", "in", "checkplotdict", "[", "'objectinfo'", "]", ":", "mclist", "=", "checkplotdict", "[", "'objectinfo'", "]", "[", "'available_bands'", "]", "else", ":", "mclist", "=", "(", "'bmag'", ",", "'vmag'", ",", "'rmag'", ",", "'imag'", ",", "'jmag'", ",", "'hmag'", ",", "'kmag'", ",", "'sdssu'", ",", "'sdssg'", ",", "'sdssr'", ",", "'sdssi'", ",", "'sdssz'", ")", "for", "mc", "in", "mclist", ":", "if", "(", "mc", "in", "checkplotdict", "[", "'objectinfo'", "]", "and", "checkplotdict", "[", "'objectinfo'", "]", "[", "mc", "]", "is", "not", "None", "and", "np", ".", "isfinite", "(", "checkplotdict", "[", "'objectinfo'", "]", "[", "mc", "]", ")", ")", ":", "objmagkeys", "[", "mc", "]", "=", "checkplotdict", "[", "'objectinfo'", "]", "[", "mc", "]", "# if there are actually neighbors, go through them in order", "for", "nbr", "in", "checkplotdict", "[", "'neighbors'", "]", ":", "objectid", ",", "lcfpath", "=", "(", "nbr", "[", "'objectid'", "]", ",", "nbr", "[", "'lcfpath'", "]", ")", "# get the light curve", "if", "not", "os", ".", "path", ".", "exists", "(", "lcfpath", ")", ":", "LOGERROR", "(", "'objectid: %s, neighbor: %s, '", "'lightcurve: %s not found, skipping...'", "%", "(", "checkplotdict", "[", "'objectid'", "]", ",", "objectid", ",", "lcfpath", ")", ")", "continue", "lcdict", "=", "readerfunc", "(", "lcfpath", ")", "# this should handle lists/tuples being returned by readerfunc", "# we assume that the first element is the actual lcdict", "# FIXME: figure out how to not need this assumption", "if", "(", "(", "isinstance", "(", "lcdict", ",", "(", "list", ",", "tuple", ")", ")", ")", "and", "(", "isinstance", "(", "lcdict", "[", "0", "]", ",", "dict", ")", ")", ")", ":", "lcdict", "=", "lcdict", "[", "0", "]", "# 0. get this neighbor's magcols and get the magdiff and colordiff", "# between it and the object", "nbrmagkeys", "=", "{", "}", "for", "mc", "in", "objmagkeys", ":", "if", "(", "(", "'objectinfo'", "in", "lcdict", ")", "and", "(", "isinstance", "(", "lcdict", "[", "'objectinfo'", "]", ",", "dict", ")", ")", "and", "(", "mc", "in", "lcdict", "[", "'objectinfo'", "]", ")", "and", "(", "lcdict", "[", "'objectinfo'", "]", "[", "mc", "]", "is", "not", "None", ")", "and", "(", "np", ".", "isfinite", "(", "lcdict", "[", "'objectinfo'", "]", "[", "mc", "]", ")", ")", ")", ":", "nbrmagkeys", "[", "mc", "]", "=", "lcdict", "[", "'objectinfo'", "]", "[", "mc", "]", "# now calculate the magdiffs", "magdiffs", "=", "{", "}", "for", "omc", "in", "objmagkeys", ":", "if", "omc", "in", "nbrmagkeys", ":", "magdiffs", "[", "omc", "]", "=", "objmagkeys", "[", "omc", "]", "-", "nbrmagkeys", "[", "omc", "]", "# calculate colors and colordiffs", "colordiffs", "=", "{", "}", "# generate the list of colors to get", "# NOTE: here, we don't really bother with new/old gen checkplots", "# maybe change this later to handle arbitrary colors", "for", "ctrio", "in", "(", "[", "'bmag'", ",", "'vmag'", ",", "'bvcolor'", "]", ",", "[", "'vmag'", ",", "'kmag'", ",", "'vkcolor'", "]", ",", "[", "'jmag'", ",", "'kmag'", ",", "'jkcolor'", "]", ",", "[", "'sdssi'", ",", "'jmag'", ",", "'ijcolor'", "]", ",", "[", "'sdssg'", ",", "'kmag'", ",", "'gkcolor'", "]", ",", "[", "'sdssg'", ",", "'sdssr'", ",", "'grcolor'", "]", ")", ":", "m1", ",", "m2", ",", "color", "=", "ctrio", "if", "(", "m1", "in", "objmagkeys", "and", "m2", "in", "objmagkeys", "and", "m1", "in", "nbrmagkeys", "and", "m2", "in", "nbrmagkeys", ")", ":", "objcolor", "=", "objmagkeys", "[", "m1", "]", "-", "objmagkeys", "[", "m2", "]", "nbrcolor", "=", "nbrmagkeys", "[", "m1", "]", "-", "nbrmagkeys", "[", "m2", "]", "colordiffs", "[", "color", "]", "=", "objcolor", "-", "nbrcolor", "# finally, add all the color and magdiff info to the nbr dict", "nbr", ".", "update", "(", "{", "'magdiffs'", ":", "magdiffs", ",", "'colordiffs'", ":", "colordiffs", "}", ")", "#", "# process magcols", "#", "# normalize using the special function if specified", "if", "normfunc", "is", "not", "None", ":", "lcdict", "=", "normfunc", "(", "lcdict", ")", "try", ":", "# get the times, mags, and errs", "# dereference the columns and get them from the lcdict", "if", "'.'", "in", "timecol", ":", "timecolget", "=", "timecol", ".", "split", "(", "'.'", ")", "else", ":", "timecolget", "=", "[", "timecol", "]", "times", "=", "_dict_get", "(", "lcdict", ",", "timecolget", ")", "if", "'.'", "in", "magcol", ":", "magcolget", "=", "magcol", ".", "split", "(", "'.'", ")", "else", ":", "magcolget", "=", "[", "magcol", "]", "mags", "=", "_dict_get", "(", "lcdict", ",", "magcolget", ")", "if", "'.'", "in", "errcol", ":", "errcolget", "=", "errcol", ".", "split", "(", "'.'", ")", "else", ":", "errcolget", "=", "[", "errcol", "]", "errs", "=", "_dict_get", "(", "lcdict", ",", "errcolget", ")", "except", "KeyError", ":", "LOGERROR", "(", "'LC for neighbor: %s (target object: %s) does not '", "'have one or more of the required columns: %s, '", "'skipping...'", "%", "(", "objectid", ",", "checkplotdict", "[", "'objectid'", "]", ",", "', '", ".", "join", "(", "[", "timecol", ",", "magcol", ",", "errcol", "]", ")", ")", ")", "continue", "# filter the input times, mags, errs; do sigclipping and normalization", "stimes", ",", "smags", ",", "serrs", "=", "sigclip_magseries", "(", "times", ",", "mags", ",", "errs", ",", "magsarefluxes", "=", "magsarefluxes", ",", "sigclip", "=", "4.0", ")", "# normalize here if not using special normalization", "if", "normfunc", "is", "None", ":", "ntimes", ",", "nmags", "=", "normalize_magseries", "(", "stimes", ",", "smags", ",", "magsarefluxes", "=", "magsarefluxes", ")", "xtimes", ",", "xmags", ",", "xerrs", "=", "ntimes", ",", "nmags", ",", "serrs", "else", ":", "xtimes", ",", "xmags", ",", "xerrs", "=", "stimes", ",", "smags", ",", "serrs", "# check if this neighbor has enough finite points in its LC", "# fail early if not enough light curve points", "if", "(", "(", "xtimes", "is", "None", ")", "or", "(", "xmags", "is", "None", ")", "or", "(", "xerrs", "is", "None", ")", "or", "(", "xtimes", ".", "size", "<", "49", ")", "or", "(", "xmags", ".", "size", "<", "49", ")", "or", "(", "xerrs", ".", "size", "<", "49", ")", ")", ":", "LOGERROR", "(", "\"one or more of times, mags, errs appear to be None \"", "\"after sig-clipping. are the measurements all nan? \"", "\"can't make neighbor light curve plots \"", "\"for target: %s, neighbor: %s, neighbor LC: %s\"", "%", "(", "checkplotdict", "[", "'objectid'", "]", ",", "nbr", "[", "'objectid'", "]", ",", "nbr", "[", "'lcfpath'", "]", ")", ")", "continue", "#", "# now we can start doing stuff if everything checks out", "#", "# make an unphased mag-series plot", "nbrdict", "=", "_pkl_magseries_plot", "(", "xtimes", ",", "xmags", ",", "xerrs", ",", "magsarefluxes", "=", "magsarefluxes", ")", "# update the nbr", "nbr", ".", "update", "(", "nbrdict", ")", "# for each lspmethod in the checkplot, make a corresponding plot for", "# this neighbor", "# figure out the period finder methods present", "if", "'pfmethods'", "in", "checkplotdict", ":", "pfmethods", "=", "checkplotdict", "[", "'pfmethods'", "]", "else", ":", "pfmethods", "=", "[", "]", "for", "cpkey", "in", "checkplotdict", ":", "for", "pfkey", "in", "PFMETHODS", ":", "if", "pfkey", "in", "cpkey", ":", "pfmethods", ".", "append", "(", "pfkey", ")", "for", "lspt", "in", "pfmethods", ":", "# initialize this lspmethod entry", "nbr", "[", "lspt", "]", "=", "{", "}", "# we only care about the best period and its options", "operiod", ",", "oepoch", "=", "(", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'period'", "]", ",", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'epoch'", "]", ")", "(", "ophasewrap", ",", "ophasesort", ",", "ophasebin", ",", "ominbinelems", ",", "oplotxlim", ")", "=", "(", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'phasewrap'", "]", ",", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'phasesort'", "]", ",", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'phasebin'", "]", ",", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'minbinelems'", "]", ",", "checkplotdict", "[", "lspt", "]", "[", "0", "]", "[", "'plotxlim'", "]", ",", ")", "# make the phasedlc plot for this period", "nbr", "=", "_pkl_phased_magseries_plot", "(", "nbr", ",", "lspt", ".", "split", "(", "'-'", ")", "[", "1", "]", ",", "# this splits '<pfindex>-<pfmethod>'", "0", ",", "xtimes", ",", "xmags", ",", "xerrs", ",", "operiod", ",", "oepoch", ",", "phasewrap", "=", "ophasewrap", ",", "phasesort", "=", "ophasesort", ",", "phasebin", "=", "ophasebin", ",", "minbinelems", "=", "ominbinelems", ",", "plotxlim", "=", "oplotxlim", ",", "magsarefluxes", "=", "magsarefluxes", ",", "verbose", "=", "verbose", ",", "override_pfmethod", "=", "lspt", ")", "# at this point, this neighbor's dict should be up to date with all", "# info, magseries plot, and all phased LC plots", "# return the updated checkplotdict", "return", "checkplotdict" ]
For all neighbors in a checkplotdict, make LCs and phased LCs. Parameters ---------- checkplotdict : dict This is the checkplot to process. The light curves for the neighbors to the object here will be extracted from the stored file paths, and this function will make plots of these time-series. If the object has 'best' periods and epochs generated by period-finder functions in this checkplotdict, phased light curve plots of each neighbor will be made using these to check the effects of blending. timecol,magcol,errcol : str The timecol, magcol, and errcol keys used to generate this object's checkplot. This is used to extract the correct times-series from the neighbors' light curves. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. Returns ------- dict The input checkplotdict is returned with the neighor light curve plots added in.
[ "For", "all", "neighbors", "in", "a", "checkplotdict", "make", "LCs", "and", "phased", "LCs", "." ]
python
valid
35.559727
juiceinc/recipe
recipe/shelf.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L168-L279
def parse_field(fld, selectable, aggregated=True, default_aggregation='sum'): """ Parse a field object from yaml into a sqlalchemy expression """ # An aggregation is a callable that takes a single field expression # None will perform no aggregation aggregation_lookup = { 'sum': func.sum, 'min': func.min, 'max': func.max, 'avg': func.avg, 'count': func.count, 'count_distinct': lambda fld: func.count(distinct(fld)), 'month': lambda fld: func.date_trunc('month', fld), 'week': lambda fld: func.date_trunc('week', fld), 'year': lambda fld: func.date_trunc('year', fld), 'quarter': lambda fld: func.date_trunc('quarter', fld), 'age': lambda fld: func.date_part('year', func.age(fld)), None: lambda fld: fld, } # Ensure that the dictionary contains: # { # 'value': str, # 'aggregation': str|None, # 'condition': dict|None # } if isinstance(fld, basestring): fld = { 'value': fld, } if not isinstance(fld, dict): raise BadIngredient('fields must be a string or a dict') if 'value' not in fld: raise BadIngredient('fields must contain a value') if not isinstance(fld['value'], basestring): raise BadIngredient('field value must be a string') # Ensure a condition if 'condition' in fld: if not isinstance(fld['condition'], dict) and \ not fld['condition'] is None: raise BadIngredient('condition must be null or an object') else: fld['condition'] = None # Ensure an aggregation initial_aggregation = default_aggregation if aggregated else None if 'aggregation' in fld: if not isinstance(fld['aggregation'], basestring) and \ not fld['aggregation'] is None: raise BadIngredient('aggregation must be null or an string') if fld['aggregation'] is None: fld['aggregation'] = initial_aggregation else: fld['aggregation'] = initial_aggregation value = fld.get('value', None) if value is None: raise BadIngredient('field value is not defined') field_parts = [] for word in tokenize(value): if word in ('MINUS', 'PLUS', 'DIVIDE', 'MULTIPLY'): field_parts.append(word) else: field_parts.append(find_column(selectable, word)) if len(field_parts) is None: raise BadIngredient('field is not defined.') # Fields should have an odd number of parts if len(field_parts) % 2 != 1: raise BadIngredient('field does not have the right number of parts') field = field_parts[0] if len(field_parts) > 1: # if we need to add and subtract from the field # join the field parts into pairs, for instance if field parts is # [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third] # we will get two pairs here # [('MINUS', MyTable.second), ('PLUS', MyTable.third)] for operator, other_field in zip(field_parts[1::2], field_parts[2::2]): if operator == 'PLUS': field = field.__add__(other_field) elif operator == 'MINUS': field = field.__sub__(other_field) elif operator == 'DIVIDE': field = field.__div__(other_field) elif operator == 'MULTIPLY': field = field.__mul__(other_field) else: raise BadIngredient('Unknown operator {}'.format(operator)) # Handle the aggregator aggr = fld.get('aggregation', 'sum') if aggr is not None: aggr = aggr.strip() if aggr not in aggregation_lookup: raise BadIngredient('unknown aggregation {}'.format(aggr)) aggregator = aggregation_lookup[aggr] condition = parse_condition( fld.get('condition', None), selectable, aggregated=False, default_aggregation=default_aggregation ) if condition is not None: field = case([(condition, field)]) return aggregator(field)
[ "def", "parse_field", "(", "fld", ",", "selectable", ",", "aggregated", "=", "True", ",", "default_aggregation", "=", "'sum'", ")", ":", "# An aggregation is a callable that takes a single field expression", "# None will perform no aggregation", "aggregation_lookup", "=", "{", "'sum'", ":", "func", ".", "sum", ",", "'min'", ":", "func", ".", "min", ",", "'max'", ":", "func", ".", "max", ",", "'avg'", ":", "func", ".", "avg", ",", "'count'", ":", "func", ".", "count", ",", "'count_distinct'", ":", "lambda", "fld", ":", "func", ".", "count", "(", "distinct", "(", "fld", ")", ")", ",", "'month'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'month'", ",", "fld", ")", ",", "'week'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'week'", ",", "fld", ")", ",", "'year'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'year'", ",", "fld", ")", ",", "'quarter'", ":", "lambda", "fld", ":", "func", ".", "date_trunc", "(", "'quarter'", ",", "fld", ")", ",", "'age'", ":", "lambda", "fld", ":", "func", ".", "date_part", "(", "'year'", ",", "func", ".", "age", "(", "fld", ")", ")", ",", "None", ":", "lambda", "fld", ":", "fld", ",", "}", "# Ensure that the dictionary contains:", "# {", "# 'value': str,", "# 'aggregation': str|None,", "# 'condition': dict|None", "# }", "if", "isinstance", "(", "fld", ",", "basestring", ")", ":", "fld", "=", "{", "'value'", ":", "fld", ",", "}", "if", "not", "isinstance", "(", "fld", ",", "dict", ")", ":", "raise", "BadIngredient", "(", "'fields must be a string or a dict'", ")", "if", "'value'", "not", "in", "fld", ":", "raise", "BadIngredient", "(", "'fields must contain a value'", ")", "if", "not", "isinstance", "(", "fld", "[", "'value'", "]", ",", "basestring", ")", ":", "raise", "BadIngredient", "(", "'field value must be a string'", ")", "# Ensure a condition", "if", "'condition'", "in", "fld", ":", "if", "not", "isinstance", "(", "fld", "[", "'condition'", "]", ",", "dict", ")", "and", "not", "fld", "[", "'condition'", "]", "is", "None", ":", "raise", "BadIngredient", "(", "'condition must be null or an object'", ")", "else", ":", "fld", "[", "'condition'", "]", "=", "None", "# Ensure an aggregation", "initial_aggregation", "=", "default_aggregation", "if", "aggregated", "else", "None", "if", "'aggregation'", "in", "fld", ":", "if", "not", "isinstance", "(", "fld", "[", "'aggregation'", "]", ",", "basestring", ")", "and", "not", "fld", "[", "'aggregation'", "]", "is", "None", ":", "raise", "BadIngredient", "(", "'aggregation must be null or an string'", ")", "if", "fld", "[", "'aggregation'", "]", "is", "None", ":", "fld", "[", "'aggregation'", "]", "=", "initial_aggregation", "else", ":", "fld", "[", "'aggregation'", "]", "=", "initial_aggregation", "value", "=", "fld", ".", "get", "(", "'value'", ",", "None", ")", "if", "value", "is", "None", ":", "raise", "BadIngredient", "(", "'field value is not defined'", ")", "field_parts", "=", "[", "]", "for", "word", "in", "tokenize", "(", "value", ")", ":", "if", "word", "in", "(", "'MINUS'", ",", "'PLUS'", ",", "'DIVIDE'", ",", "'MULTIPLY'", ")", ":", "field_parts", ".", "append", "(", "word", ")", "else", ":", "field_parts", ".", "append", "(", "find_column", "(", "selectable", ",", "word", ")", ")", "if", "len", "(", "field_parts", ")", "is", "None", ":", "raise", "BadIngredient", "(", "'field is not defined.'", ")", "# Fields should have an odd number of parts", "if", "len", "(", "field_parts", ")", "%", "2", "!=", "1", ":", "raise", "BadIngredient", "(", "'field does not have the right number of parts'", ")", "field", "=", "field_parts", "[", "0", "]", "if", "len", "(", "field_parts", ")", ">", "1", ":", "# if we need to add and subtract from the field", "# join the field parts into pairs, for instance if field parts is", "# [MyTable.first, 'MINUS', MyTable.second, 'PLUS', MyTable.third]", "# we will get two pairs here", "# [('MINUS', MyTable.second), ('PLUS', MyTable.third)]", "for", "operator", ",", "other_field", "in", "zip", "(", "field_parts", "[", "1", ":", ":", "2", "]", ",", "field_parts", "[", "2", ":", ":", "2", "]", ")", ":", "if", "operator", "==", "'PLUS'", ":", "field", "=", "field", ".", "__add__", "(", "other_field", ")", "elif", "operator", "==", "'MINUS'", ":", "field", "=", "field", ".", "__sub__", "(", "other_field", ")", "elif", "operator", "==", "'DIVIDE'", ":", "field", "=", "field", ".", "__div__", "(", "other_field", ")", "elif", "operator", "==", "'MULTIPLY'", ":", "field", "=", "field", ".", "__mul__", "(", "other_field", ")", "else", ":", "raise", "BadIngredient", "(", "'Unknown operator {}'", ".", "format", "(", "operator", ")", ")", "# Handle the aggregator", "aggr", "=", "fld", ".", "get", "(", "'aggregation'", ",", "'sum'", ")", "if", "aggr", "is", "not", "None", ":", "aggr", "=", "aggr", ".", "strip", "(", ")", "if", "aggr", "not", "in", "aggregation_lookup", ":", "raise", "BadIngredient", "(", "'unknown aggregation {}'", ".", "format", "(", "aggr", ")", ")", "aggregator", "=", "aggregation_lookup", "[", "aggr", "]", "condition", "=", "parse_condition", "(", "fld", ".", "get", "(", "'condition'", ",", "None", ")", ",", "selectable", ",", "aggregated", "=", "False", ",", "default_aggregation", "=", "default_aggregation", ")", "if", "condition", "is", "not", "None", ":", "field", "=", "case", "(", "[", "(", "condition", ",", "field", ")", "]", ")", "return", "aggregator", "(", "field", ")" ]
Parse a field object from yaml into a sqlalchemy expression
[ "Parse", "a", "field", "object", "from", "yaml", "into", "a", "sqlalchemy", "expression" ]
python
train
35.785714
venmo/slouch
example.py
https://github.com/venmo/slouch/blob/000b03bc220a0d7aa5b06f59caf423e2b63a81d7/example.py#L58-L86
def stop(opts, bot, event): """Usage: stop [--name=<name>] [--notify=<slack_username>] Stop a timer. _name_ works the same as for `start`. If given _slack_username_, reply with an at-mention to the given user. """ name = opts['--name'] slack_username = opts['--notify'] now = datetime.datetime.now() delta = now - bot.timers.pop(name) response = bot.stop_fmt.format(delta) if slack_username: mention = '' # The slack api (provided by https://github.com/os/slacker) is available on all bots. users = bot.slack.users.list().body['members'] for user in users: if user['name'] == slack_username: mention = "<@%s>" % user['id'] break response = "%s: %s" % (mention, response) return response
[ "def", "stop", "(", "opts", ",", "bot", ",", "event", ")", ":", "name", "=", "opts", "[", "'--name'", "]", "slack_username", "=", "opts", "[", "'--notify'", "]", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta", "=", "now", "-", "bot", ".", "timers", ".", "pop", "(", "name", ")", "response", "=", "bot", ".", "stop_fmt", ".", "format", "(", "delta", ")", "if", "slack_username", ":", "mention", "=", "''", "# The slack api (provided by https://github.com/os/slacker) is available on all bots.", "users", "=", "bot", ".", "slack", ".", "users", ".", "list", "(", ")", ".", "body", "[", "'members'", "]", "for", "user", "in", "users", ":", "if", "user", "[", "'name'", "]", "==", "slack_username", ":", "mention", "=", "\"<@%s>\"", "%", "user", "[", "'id'", "]", "break", "response", "=", "\"%s: %s\"", "%", "(", "mention", ",", "response", ")", "return", "response" ]
Usage: stop [--name=<name>] [--notify=<slack_username>] Stop a timer. _name_ works the same as for `start`. If given _slack_username_, reply with an at-mention to the given user.
[ "Usage", ":", "stop", "[", "--", "name", "=", "<name", ">", "]", "[", "--", "notify", "=", "<slack_username", ">", "]" ]
python
train
27.517241
arviz-devs/arviz
arviz/stats/stats.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/stats.py#L564-L611
def _gpdfit(x): """Estimate the parameters for the Generalized Pareto Distribution (GPD). Empirical Bayes estimate for the parameters of the generalized Pareto distribution given the data. Parameters ---------- x : array sorted 1D data array Returns ------- k : float estimated shape parameter sigma : float estimated scale parameter """ prior_bs = 3 prior_k = 10 len_x = len(x) m_est = 30 + int(len_x ** 0.5) b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5)) b_ary /= prior_bs * x[int(len_x / 4 + 0.5) - 1] b_ary += 1 / x[-1] k_ary = np.log1p(-b_ary[:, None] * x).mean(axis=1) # pylint: disable=no-member len_scale = len_x * (np.log(-(b_ary / k_ary)) - k_ary - 1) weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1) # remove negligible weights real_idxs = weights >= 10 * np.finfo(float).eps if not np.all(real_idxs): weights = weights[real_idxs] b_ary = b_ary[real_idxs] # normalise weights weights /= weights.sum() # posterior mean for b b_post = np.sum(b_ary * weights) # estimate for k k_post = np.log1p(-b_post * x).mean() # pylint: disable=invalid-unary-operand-type,no-member # add prior for k_post k_post = (len_x * k_post + prior_k * 0.5) / (len_x + prior_k) sigma = -k_post / b_post return k_post, sigma
[ "def", "_gpdfit", "(", "x", ")", ":", "prior_bs", "=", "3", "prior_k", "=", "10", "len_x", "=", "len", "(", "x", ")", "m_est", "=", "30", "+", "int", "(", "len_x", "**", "0.5", ")", "b_ary", "=", "1", "-", "np", ".", "sqrt", "(", "m_est", "/", "(", "np", ".", "arange", "(", "1", ",", "m_est", "+", "1", ",", "dtype", "=", "float", ")", "-", "0.5", ")", ")", "b_ary", "/=", "prior_bs", "*", "x", "[", "int", "(", "len_x", "/", "4", "+", "0.5", ")", "-", "1", "]", "b_ary", "+=", "1", "/", "x", "[", "-", "1", "]", "k_ary", "=", "np", ".", "log1p", "(", "-", "b_ary", "[", ":", ",", "None", "]", "*", "x", ")", ".", "mean", "(", "axis", "=", "1", ")", "# pylint: disable=no-member", "len_scale", "=", "len_x", "*", "(", "np", ".", "log", "(", "-", "(", "b_ary", "/", "k_ary", ")", ")", "-", "k_ary", "-", "1", ")", "weights", "=", "1", "/", "np", ".", "exp", "(", "len_scale", "-", "len_scale", "[", ":", ",", "None", "]", ")", ".", "sum", "(", "axis", "=", "1", ")", "# remove negligible weights", "real_idxs", "=", "weights", ">=", "10", "*", "np", ".", "finfo", "(", "float", ")", ".", "eps", "if", "not", "np", ".", "all", "(", "real_idxs", ")", ":", "weights", "=", "weights", "[", "real_idxs", "]", "b_ary", "=", "b_ary", "[", "real_idxs", "]", "# normalise weights", "weights", "/=", "weights", ".", "sum", "(", ")", "# posterior mean for b", "b_post", "=", "np", ".", "sum", "(", "b_ary", "*", "weights", ")", "# estimate for k", "k_post", "=", "np", ".", "log1p", "(", "-", "b_post", "*", "x", ")", ".", "mean", "(", ")", "# pylint: disable=invalid-unary-operand-type,no-member", "# add prior for k_post", "k_post", "=", "(", "len_x", "*", "k_post", "+", "prior_k", "*", "0.5", ")", "/", "(", "len_x", "+", "prior_k", ")", "sigma", "=", "-", "k_post", "/", "b_post", "return", "k_post", ",", "sigma" ]
Estimate the parameters for the Generalized Pareto Distribution (GPD). Empirical Bayes estimate for the parameters of the generalized Pareto distribution given the data. Parameters ---------- x : array sorted 1D data array Returns ------- k : float estimated shape parameter sigma : float estimated scale parameter
[ "Estimate", "the", "parameters", "for", "the", "Generalized", "Pareto", "Distribution", "(", "GPD", ")", "." ]
python
train
28.958333
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_trilloam.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_trilloam.py#L51-L62
def l2traceroute_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") l2traceroute = ET.Element("l2traceroute") config = l2traceroute input = ET.SubElement(l2traceroute, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "l2traceroute_input_rbridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "l2traceroute", "=", "ET", ".", "Element", "(", "\"l2traceroute\"", ")", "config", "=", "l2traceroute", "input", "=", "ET", ".", "SubElement", "(", "l2traceroute", ",", "\"input\"", ")", "rbridge_id", "=", "ET", ".", "SubElement", "(", "input", ",", "\"rbridge-id\"", ")", "rbridge_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
37.583333
src-d/lookout-sdk
python/lookout/sdk/grpc/connection.py
https://github.com/src-d/lookout-sdk/blob/2ca64a77b022864fed3bb31d12997712c7e98e6e/python/lookout/sdk/grpc/connection.py#L90-L104
def to_grpc_address(target: str) -> str: """Converts a standard gRPC target to one that is supported by grpcio :param target: the server address. :returns: the converted address. """ u = urlparse(target) if u.scheme == "dns": raise ValueError("dns:// not supported") if u.scheme == "unix": return "unix:"+u.path return u.netloc
[ "def", "to_grpc_address", "(", "target", ":", "str", ")", "->", "str", ":", "u", "=", "urlparse", "(", "target", ")", "if", "u", ".", "scheme", "==", "\"dns\"", ":", "raise", "ValueError", "(", "\"dns:// not supported\"", ")", "if", "u", ".", "scheme", "==", "\"unix\"", ":", "return", "\"unix:\"", "+", "u", ".", "path", "return", "u", ".", "netloc" ]
Converts a standard gRPC target to one that is supported by grpcio :param target: the server address. :returns: the converted address.
[ "Converts", "a", "standard", "gRPC", "target", "to", "one", "that", "is", "supported", "by", "grpcio" ]
python
train
24.333333
polyaxon/polyaxon-cli
polyaxon_cli/cli/project.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/project.py#L726-L735
def download(ctx): """Download code of the current project.""" user, project_name = get_project_or_local(ctx.obj.get('project')) try: PolyaxonClient().project.download_repo(user, project_name) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download code for project `{}`.'.format(project_name)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.')
[ "def", "download", "(", "ctx", ")", ":", "user", ",", "project_name", "=", "get_project_or_local", "(", "ctx", ".", "obj", ".", "get", "(", "'project'", ")", ")", "try", ":", "PolyaxonClient", "(", ")", ".", "project", ".", "download_repo", "(", "user", ",", "project_name", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not download code for project `{}`.'", ".", "format", "(", "project_name", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "Printer", ".", "print_success", "(", "'Files downloaded.'", ")" ]
Download code of the current project.
[ "Download", "code", "of", "the", "current", "project", "." ]
python
valid
51.2
inorton/junit2html
junit2htmlreport/parser.py
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L232-L237
def passed(self): """ Return all the passing testcases :return: """ return [test for test in self.all() if not test.failed() and not test.skipped()]
[ "def", "passed", "(", "self", ")", ":", "return", "[", "test", "for", "test", "in", "self", ".", "all", "(", ")", "if", "not", "test", ".", "failed", "(", ")", "and", "not", "test", ".", "skipped", "(", ")", "]" ]
Return all the passing testcases :return:
[ "Return", "all", "the", "passing", "testcases", ":", "return", ":" ]
python
train
30.5
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3954-L3958
def user_identity_show(self, user_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/user_identities#show-identity" api_path = "/api/v2/users/{user_id}/identities/{id}.json" api_path = api_path.format(user_id=user_id, id=id) return self.call(api_path, **kwargs)
[ "def", "user_identity_show", "(", "self", ",", "user_id", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/{user_id}/identities/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "user_id", "=", "user_id", ",", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/user_identities#show-identity
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "user_identities#show", "-", "identity" ]
python
train
61.4
python-xlib/python-xlib
Xlib/display.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/display.py#L788-L793
def bell(self, percent = 0, onerror = None): """Ring the bell at the volume percent which is relative the base volume. See XBell(3X11).""" request.Bell(display = self.display, onerror = onerror, percent = percent)
[ "def", "bell", "(", "self", ",", "percent", "=", "0", ",", "onerror", "=", "None", ")", ":", "request", ".", "Bell", "(", "display", "=", "self", ".", "display", ",", "onerror", "=", "onerror", ",", "percent", "=", "percent", ")" ]
Ring the bell at the volume percent which is relative the base volume. See XBell(3X11).
[ "Ring", "the", "bell", "at", "the", "volume", "percent", "which", "is", "relative", "the", "base", "volume", ".", "See", "XBell", "(", "3X11", ")", "." ]
python
train
45.666667
edx/completion
completion/api/v1/views.py
https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/v1/views.py#L55-L107
def _validate_and_parse(self, batch_object): """ Performs validation on the batch object to make sure it is in the proper format. Parameters: * batch_object: The data provided to a POST. The expected format is the following: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } Return Value: * tuple: (User, CourseKey, List of tuples (UsageKey, completion_float) Raises: django.core.exceptions.ValidationError: If any aspect of validation fails a ValidationError is raised. ObjectDoesNotExist: If a database object cannot be found an ObjectDoesNotExist is raised. """ if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): raise ValidationError( _("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.") ) for key in self.REQUIRED_KEYS: if key not in batch_object: raise ValidationError(_("Key '{key}' not found.").format(key=key)) username = batch_object['username'] user = User.objects.get(username=username) course_key_obj = self._validate_and_parse_course_key(batch_object['course_key']) if not CourseEnrollment.is_enrolled(user, course_key_obj): raise ValidationError(_('User is not enrolled in course.')) blocks = batch_object['blocks'] block_objs = [] for block_key in blocks: block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj) completion = float(blocks[block_key]) block_objs.append((block_key_obj, completion)) return user, course_key_obj, block_objs
[ "def", "_validate_and_parse", "(", "self", ",", "batch_object", ")", ":", "if", "not", "waffle", ".", "waffle", "(", ")", ".", "is_enabled", "(", "waffle", ".", "ENABLE_COMPLETION_TRACKING", ")", ":", "raise", "ValidationError", "(", "_", "(", "\"BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.\"", ")", ")", "for", "key", "in", "self", ".", "REQUIRED_KEYS", ":", "if", "key", "not", "in", "batch_object", ":", "raise", "ValidationError", "(", "_", "(", "\"Key '{key}' not found.\"", ")", ".", "format", "(", "key", "=", "key", ")", ")", "username", "=", "batch_object", "[", "'username'", "]", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "course_key_obj", "=", "self", ".", "_validate_and_parse_course_key", "(", "batch_object", "[", "'course_key'", "]", ")", "if", "not", "CourseEnrollment", ".", "is_enrolled", "(", "user", ",", "course_key_obj", ")", ":", "raise", "ValidationError", "(", "_", "(", "'User is not enrolled in course.'", ")", ")", "blocks", "=", "batch_object", "[", "'blocks'", "]", "block_objs", "=", "[", "]", "for", "block_key", "in", "blocks", ":", "block_key_obj", "=", "self", ".", "_validate_and_parse_block_key", "(", "block_key", ",", "course_key_obj", ")", "completion", "=", "float", "(", "blocks", "[", "block_key", "]", ")", "block_objs", ".", "append", "(", "(", "block_key_obj", ",", "completion", ")", ")", "return", "user", ",", "course_key_obj", ",", "block_objs" ]
Performs validation on the batch object to make sure it is in the proper format. Parameters: * batch_object: The data provided to a POST. The expected format is the following: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } Return Value: * tuple: (User, CourseKey, List of tuples (UsageKey, completion_float) Raises: django.core.exceptions.ValidationError: If any aspect of validation fails a ValidationError is raised. ObjectDoesNotExist: If a database object cannot be found an ObjectDoesNotExist is raised.
[ "Performs", "validation", "on", "the", "batch", "object", "to", "make", "sure", "it", "is", "in", "the", "proper", "format", "." ]
python
train
36.849057
xav/Grapefruit
grapefruit.py
https://github.com/xav/Grapefruit/blob/b3d88375be727a3a1ec5839fbc462e0e8e0836e4/grapefruit.py#L992-L1020
def rgb_to_websafe(r, g=None, b=None, alt=False): """Convert the color from RGB to 'web safe' RGB Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] :alt: If True, use the alternative color instead of the nearest one. Can be used for dithering. Returns: The color as an (r, g, b) tuple in the range: the range: r[0...1], g[0...1], b[0...1] >>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0) '(1, 0.6, 0)' """ if type(r) in [list,tuple]: r, g, b = r websafeComponent = _websafe_component return tuple((websafeComponent(v, alt) for v in (r, g, b)))
[ "def", "rgb_to_websafe", "(", "r", ",", "g", "=", "None", ",", "b", "=", "None", ",", "alt", "=", "False", ")", ":", "if", "type", "(", "r", ")", "in", "[", "list", ",", "tuple", "]", ":", "r", ",", "g", ",", "b", "=", "r", "websafeComponent", "=", "_websafe_component", "return", "tuple", "(", "(", "websafeComponent", "(", "v", ",", "alt", ")", "for", "v", "in", "(", "r", ",", "g", ",", "b", ")", ")", ")" ]
Convert the color from RGB to 'web safe' RGB Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] :alt: If True, use the alternative color instead of the nearest one. Can be used for dithering. Returns: The color as an (r, g, b) tuple in the range: the range: r[0...1], g[0...1], b[0...1] >>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0) '(1, 0.6, 0)'
[ "Convert", "the", "color", "from", "RGB", "to", "web", "safe", "RGB" ]
python
train
23.482759
MisterWil/abodepy
abodepy/event_controller.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L49-L74
def add_device_callback(self, devices, callback): """Register a device callback.""" if not devices: return False if not isinstance(devices, (tuple, list)): devices = [devices] for device in devices: # Device may be a device_id device_id = device # If they gave us an actual device, get that devices ID if isinstance(device, AbodeDevice): device_id = device.device_id # Validate the device is valid if not self._abode.get_device(device_id): raise AbodeException((ERROR.EVENT_DEVICE_INVALID)) _LOGGER.debug( "Subscribing to updated for device_id: %s", device_id) self._device_callbacks[device_id].append((callback)) return True
[ "def", "add_device_callback", "(", "self", ",", "devices", ",", "callback", ")", ":", "if", "not", "devices", ":", "return", "False", "if", "not", "isinstance", "(", "devices", ",", "(", "tuple", ",", "list", ")", ")", ":", "devices", "=", "[", "devices", "]", "for", "device", "in", "devices", ":", "# Device may be a device_id", "device_id", "=", "device", "# If they gave us an actual device, get that devices ID", "if", "isinstance", "(", "device", ",", "AbodeDevice", ")", ":", "device_id", "=", "device", ".", "device_id", "# Validate the device is valid", "if", "not", "self", ".", "_abode", ".", "get_device", "(", "device_id", ")", ":", "raise", "AbodeException", "(", "(", "ERROR", ".", "EVENT_DEVICE_INVALID", ")", ")", "_LOGGER", ".", "debug", "(", "\"Subscribing to updated for device_id: %s\"", ",", "device_id", ")", "self", ".", "_device_callbacks", "[", "device_id", "]", ".", "append", "(", "(", "callback", ")", ")", "return", "True" ]
Register a device callback.
[ "Register", "a", "device", "callback", "." ]
python
train
31.346154
calmjs/calmjs
src/calmjs/toolchain.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L1553-L1595
def calf(self, spec): """ Typical safe usage is this, which sets everything that could be problematic up. Requires the filename which everything will be produced to. """ if not isinstance(spec, Spec): raise TypeError('spec must be of type Spec') if not spec.get(BUILD_DIR): tempdir = realpath(mkdtemp()) spec.advise(CLEANUP, shutil.rmtree, tempdir) build_dir = join(tempdir, 'build') mkdir(build_dir) spec[BUILD_DIR] = build_dir else: build_dir = self.realpath(spec, BUILD_DIR) if not isdir(build_dir): logger.error("build_dir '%s' is not a directory", build_dir) raise_os_error(errno.ENOTDIR, build_dir) self.realpath(spec, EXPORT_TARGET) # Finally, handle setup which may set up the deferred advices, # as all the toolchain (and its runtime and/or its parent # runtime and related toolchains) spec advises should have been # done. spec.handle(SETUP) try: process = ('prepare', 'compile', 'assemble', 'link', 'finalize') for p in process: spec.handle('before_' + p) getattr(self, p)(spec) spec.handle('after_' + p) spec.handle(SUCCESS) except ToolchainCancel: # quietly handle the issue and move on out of here. pass finally: spec.handle(CLEANUP)
[ "def", "calf", "(", "self", ",", "spec", ")", ":", "if", "not", "isinstance", "(", "spec", ",", "Spec", ")", ":", "raise", "TypeError", "(", "'spec must be of type Spec'", ")", "if", "not", "spec", ".", "get", "(", "BUILD_DIR", ")", ":", "tempdir", "=", "realpath", "(", "mkdtemp", "(", ")", ")", "spec", ".", "advise", "(", "CLEANUP", ",", "shutil", ".", "rmtree", ",", "tempdir", ")", "build_dir", "=", "join", "(", "tempdir", ",", "'build'", ")", "mkdir", "(", "build_dir", ")", "spec", "[", "BUILD_DIR", "]", "=", "build_dir", "else", ":", "build_dir", "=", "self", ".", "realpath", "(", "spec", ",", "BUILD_DIR", ")", "if", "not", "isdir", "(", "build_dir", ")", ":", "logger", ".", "error", "(", "\"build_dir '%s' is not a directory\"", ",", "build_dir", ")", "raise_os_error", "(", "errno", ".", "ENOTDIR", ",", "build_dir", ")", "self", ".", "realpath", "(", "spec", ",", "EXPORT_TARGET", ")", "# Finally, handle setup which may set up the deferred advices,", "# as all the toolchain (and its runtime and/or its parent", "# runtime and related toolchains) spec advises should have been", "# done.", "spec", ".", "handle", "(", "SETUP", ")", "try", ":", "process", "=", "(", "'prepare'", ",", "'compile'", ",", "'assemble'", ",", "'link'", ",", "'finalize'", ")", "for", "p", "in", "process", ":", "spec", ".", "handle", "(", "'before_'", "+", "p", ")", "getattr", "(", "self", ",", "p", ")", "(", "spec", ")", "spec", ".", "handle", "(", "'after_'", "+", "p", ")", "spec", ".", "handle", "(", "SUCCESS", ")", "except", "ToolchainCancel", ":", "# quietly handle the issue and move on out of here.", "pass", "finally", ":", "spec", ".", "handle", "(", "CLEANUP", ")" ]
Typical safe usage is this, which sets everything that could be problematic up. Requires the filename which everything will be produced to.
[ "Typical", "safe", "usage", "is", "this", "which", "sets", "everything", "that", "could", "be", "problematic", "up", "." ]
python
train
34.767442
pirate/mesh-networking
examples/large_network.py
https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/large_network.py#L14-L23
def hops(node1, node2): """returns # of hops it takes to get from node1 to node2, 1 means they're on the same link""" if node1 == node2: return 0 elif set(node1.interfaces) & set(node2.interfaces): # they share a common interface return 1 else: # Not implemented yet, graphsearch to find min hops between two nodes return 0
[ "def", "hops", "(", "node1", ",", "node2", ")", ":", "if", "node1", "==", "node2", ":", "return", "0", "elif", "set", "(", "node1", ".", "interfaces", ")", "&", "set", "(", "node2", ".", "interfaces", ")", ":", "# they share a common interface", "return", "1", "else", ":", "# Not implemented yet, graphsearch to find min hops between two nodes", "return", "0" ]
returns # of hops it takes to get from node1 to node2, 1 means they're on the same link
[ "returns", "#", "of", "hops", "it", "takes", "to", "get", "from", "node1", "to", "node2", "1", "means", "they", "re", "on", "the", "same", "link" ]
python
train
37
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_misc.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_misc.py#L83-L90
def qnh_estimate(self): '''estimate QNH pressure from GPS altitude and scaled pressure''' alt_gps = self.master.field('GPS_RAW_INT', 'alt', 0) * 0.001 pressure2 = self.master.field('SCALED_PRESSURE', 'press_abs', 0) ground_temp = self.get_mav_param('GND_TEMP', 21) temp = ground_temp + 273.15 pressure1 = pressure2 / math.exp(math.log(1.0 - (alt_gps / (153.8462 * temp))) / 0.190259) return pressure1
[ "def", "qnh_estimate", "(", "self", ")", ":", "alt_gps", "=", "self", ".", "master", ".", "field", "(", "'GPS_RAW_INT'", ",", "'alt'", ",", "0", ")", "*", "0.001", "pressure2", "=", "self", ".", "master", ".", "field", "(", "'SCALED_PRESSURE'", ",", "'press_abs'", ",", "0", ")", "ground_temp", "=", "self", ".", "get_mav_param", "(", "'GND_TEMP'", ",", "21", ")", "temp", "=", "ground_temp", "+", "273.15", "pressure1", "=", "pressure2", "/", "math", ".", "exp", "(", "math", ".", "log", "(", "1.0", "-", "(", "alt_gps", "/", "(", "153.8462", "*", "temp", ")", ")", ")", "/", "0.190259", ")", "return", "pressure1" ]
estimate QNH pressure from GPS altitude and scaled pressure
[ "estimate", "QNH", "pressure", "from", "GPS", "altitude", "and", "scaled", "pressure" ]
python
train
56.125
hthiery/python-fritzhome
pyfritzhome/cli.py
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/cli.py#L74-L77
def device_statistics(fritz, args): """Command that prints the device statistics.""" stats = fritz.get_device_statistics(args.ain) print(stats)
[ "def", "device_statistics", "(", "fritz", ",", "args", ")", ":", "stats", "=", "fritz", ".", "get_device_statistics", "(", "args", ".", "ain", ")", "print", "(", "stats", ")" ]
Command that prints the device statistics.
[ "Command", "that", "prints", "the", "device", "statistics", "." ]
python
train
38
hydraplatform/hydra-base
hydra_base/lib/users.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/users.py#L425-L433
def get_role_by_code(role_code,**kwargs): """ Get a role by its code """ try: role = db.DBSession.query(Role).filter(Role.code==role_code).one() return role except NoResultFound: raise ResourceNotFoundError("Role not found (role_code={})".format(role_code))
[ "def", "get_role_by_code", "(", "role_code", ",", "*", "*", "kwargs", ")", ":", "try", ":", "role", "=", "db", ".", "DBSession", ".", "query", "(", "Role", ")", ".", "filter", "(", "Role", ".", "code", "==", "role_code", ")", ".", "one", "(", ")", "return", "role", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Role not found (role_code={})\"", ".", "format", "(", "role_code", ")", ")" ]
Get a role by its code
[ "Get", "a", "role", "by", "its", "code" ]
python
train
33
seleniumbase/SeleniumBase
seleniumbase/fixtures/page_actions.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/page_actions.py#L71-L86
def is_text_visible(driver, text, selector, by=By.CSS_SELECTOR): """ Returns whether the specified text is visible in the specified selector. @Params driver - the webdriver object (required) text - the text string to search for selector - the locator that is used (required) by - the method to search for the locator (Default: By.CSS_SELECTOR) @Returns Boolean (is text visible) """ try: element = driver.find_element(by=by, value=selector) return element.is_displayed() and text in element.text except Exception: return False
[ "def", "is_text_visible", "(", "driver", ",", "text", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ")", ":", "try", ":", "element", "=", "driver", ".", "find_element", "(", "by", "=", "by", ",", "value", "=", "selector", ")", "return", "element", ".", "is_displayed", "(", ")", "and", "text", "in", "element", ".", "text", "except", "Exception", ":", "return", "False" ]
Returns whether the specified text is visible in the specified selector. @Params driver - the webdriver object (required) text - the text string to search for selector - the locator that is used (required) by - the method to search for the locator (Default: By.CSS_SELECTOR) @Returns Boolean (is text visible)
[ "Returns", "whether", "the", "specified", "text", "is", "visible", "in", "the", "specified", "selector", "." ]
python
train
36.4375
abilian/abilian-core
abilian/services/auth/models.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/auth/models.py#L18-L54
def get_active_for(self, user, user_agent=_MARK, ip_address=_MARK): """Return last known session for given user. :param user: user session :type user: `abilian.core.models.subjects.User` :param user_agent: *exact* user agent string to lookup, or `None` to have user_agent extracted from request object. If not provided at all, no filtering on user_agent. :type user_agent: string or None, or absent :param ip_address: client IP, or `None` to have ip_address extracted from request object (requires header 'X-Forwarded-For'). If not provided at all, no filtering on ip_address. :type ip_address: string or None, or absent :rtype: `LoginSession` or `None` if no session is found. """ conditions = [LoginSession.user == user] if user_agent is not _MARK: if user_agent is None: user_agent = request.environ.get("HTTP_USER_AGENT", "") conditions.append(LoginSession.user_agent == user_agent) if ip_address is not _MARK: if ip_address is None: ip_addresses = request.headers.getlist("X-Forwarded-For") ip_address = ip_addresses[0] if ip_addresses else request.remote_addr conditions.append(LoginSession.ip_address == ip_address) session = ( LoginSession.query.filter(*conditions) .order_by(LoginSession.id.desc()) .first() ) return session
[ "def", "get_active_for", "(", "self", ",", "user", ",", "user_agent", "=", "_MARK", ",", "ip_address", "=", "_MARK", ")", ":", "conditions", "=", "[", "LoginSession", ".", "user", "==", "user", "]", "if", "user_agent", "is", "not", "_MARK", ":", "if", "user_agent", "is", "None", ":", "user_agent", "=", "request", ".", "environ", ".", "get", "(", "\"HTTP_USER_AGENT\"", ",", "\"\"", ")", "conditions", ".", "append", "(", "LoginSession", ".", "user_agent", "==", "user_agent", ")", "if", "ip_address", "is", "not", "_MARK", ":", "if", "ip_address", "is", "None", ":", "ip_addresses", "=", "request", ".", "headers", ".", "getlist", "(", "\"X-Forwarded-For\"", ")", "ip_address", "=", "ip_addresses", "[", "0", "]", "if", "ip_addresses", "else", "request", ".", "remote_addr", "conditions", ".", "append", "(", "LoginSession", ".", "ip_address", "==", "ip_address", ")", "session", "=", "(", "LoginSession", ".", "query", ".", "filter", "(", "*", "conditions", ")", ".", "order_by", "(", "LoginSession", ".", "id", ".", "desc", "(", ")", ")", ".", "first", "(", ")", ")", "return", "session" ]
Return last known session for given user. :param user: user session :type user: `abilian.core.models.subjects.User` :param user_agent: *exact* user agent string to lookup, or `None` to have user_agent extracted from request object. If not provided at all, no filtering on user_agent. :type user_agent: string or None, or absent :param ip_address: client IP, or `None` to have ip_address extracted from request object (requires header 'X-Forwarded-For'). If not provided at all, no filtering on ip_address. :type ip_address: string or None, or absent :rtype: `LoginSession` or `None` if no session is found.
[ "Return", "last", "known", "session", "for", "given", "user", "." ]
python
train
40.216216
googleapis/oauth2client
oauth2client/transport.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/transport.py#L110-L134
def clean_headers(headers): """Forces header keys and values to be strings, i.e not unicode. The httplib module just concats the header keys and values in a way that may make the message header a unicode string, which, if it then tries to contatenate to a binary request body may result in a unicode decode error. Args: headers: dict, A dictionary of headers. Returns: The same dictionary but with all the keys converted to strings. """ clean = {} try: for k, v in six.iteritems(headers): if not isinstance(k, six.binary_type): k = str(k) if not isinstance(v, six.binary_type): v = str(v) clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v) except UnicodeEncodeError: from oauth2client.client import NonAsciiHeaderError raise NonAsciiHeaderError(k, ': ', v) return clean
[ "def", "clean_headers", "(", "headers", ")", ":", "clean", "=", "{", "}", "try", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "headers", ")", ":", "if", "not", "isinstance", "(", "k", ",", "six", ".", "binary_type", ")", ":", "k", "=", "str", "(", "k", ")", "if", "not", "isinstance", "(", "v", ",", "six", ".", "binary_type", ")", ":", "v", "=", "str", "(", "v", ")", "clean", "[", "_helpers", ".", "_to_bytes", "(", "k", ")", "]", "=", "_helpers", ".", "_to_bytes", "(", "v", ")", "except", "UnicodeEncodeError", ":", "from", "oauth2client", ".", "client", "import", "NonAsciiHeaderError", "raise", "NonAsciiHeaderError", "(", "k", ",", "': '", ",", "v", ")", "return", "clean" ]
Forces header keys and values to be strings, i.e not unicode. The httplib module just concats the header keys and values in a way that may make the message header a unicode string, which, if it then tries to contatenate to a binary request body may result in a unicode decode error. Args: headers: dict, A dictionary of headers. Returns: The same dictionary but with all the keys converted to strings.
[ "Forces", "header", "keys", "and", "values", "to", "be", "strings", "i", ".", "e", "not", "unicode", "." ]
python
valid
36.08
roamanalytics/mittens
mittens/np_mittens.py
https://github.com/roamanalytics/mittens/blob/dbf0c3f8d18651475cf7e21ab1ceb824c5f89150/mittens/np_mittens.py#L176-L200
def get_step(self, grad): """Computes the 'step' to take for the next gradient descent update. Returns the step rather than performing the update so that parameters can be updated in place rather than overwritten. Examples -------- >>> gradient = # ... >>> optimizer = AdaGradOptimizer(0.01) >>> params -= optimizer.get_step(gradient) Parameters ---------- grad Returns ------- np.array Size matches `grad`. """ if self._momentum is None: self._momentum = self.initial_accumulator_value * np.ones_like(grad) self._momentum += grad ** 2 return self.learning_rate * grad / np.sqrt(self._momentum)
[ "def", "get_step", "(", "self", ",", "grad", ")", ":", "if", "self", ".", "_momentum", "is", "None", ":", "self", ".", "_momentum", "=", "self", ".", "initial_accumulator_value", "*", "np", ".", "ones_like", "(", "grad", ")", "self", ".", "_momentum", "+=", "grad", "**", "2", "return", "self", ".", "learning_rate", "*", "grad", "/", "np", ".", "sqrt", "(", "self", ".", "_momentum", ")" ]
Computes the 'step' to take for the next gradient descent update. Returns the step rather than performing the update so that parameters can be updated in place rather than overwritten. Examples -------- >>> gradient = # ... >>> optimizer = AdaGradOptimizer(0.01) >>> params -= optimizer.get_step(gradient) Parameters ---------- grad Returns ------- np.array Size matches `grad`.
[ "Computes", "the", "step", "to", "take", "for", "the", "next", "gradient", "descent", "update", "." ]
python
train
29.68
lesscpy/lesscpy
lesscpy/plib/mixin.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/plib/mixin.py#L49-L79
def parse_args(self, args, scope): """Parse arguments to mixin. Add them to scope as variables. Sets upp special variable @arguments as well. args: args (list): arguments scope (Scope): current scope raises: SyntaxError """ arguments = list(zip(args, [' '] * len(args))) if args and args[0] else None zl = itertools.zip_longest if sys.version_info[ 0] == 3 else itertools.izip_longest if self.args: parsed = [ v if hasattr(v, 'parse') else v for v in copy.copy(self.args) ] args = args if isinstance(args, list) else [args] vars = [ self._parse_arg(var, arg, scope) for arg, var in zl([a for a in args], parsed) ] for var in vars: if var: var.parse(scope) if not arguments: arguments = [v.value for v in vars if v] if not arguments: arguments = '' Variable(['@arguments', None, arguments]).parse(scope)
[ "def", "parse_args", "(", "self", ",", "args", ",", "scope", ")", ":", "arguments", "=", "list", "(", "zip", "(", "args", ",", "[", "' '", "]", "*", "len", "(", "args", ")", ")", ")", "if", "args", "and", "args", "[", "0", "]", "else", "None", "zl", "=", "itertools", ".", "zip_longest", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", "else", "itertools", ".", "izip_longest", "if", "self", ".", "args", ":", "parsed", "=", "[", "v", "if", "hasattr", "(", "v", ",", "'parse'", ")", "else", "v", "for", "v", "in", "copy", ".", "copy", "(", "self", ".", "args", ")", "]", "args", "=", "args", "if", "isinstance", "(", "args", ",", "list", ")", "else", "[", "args", "]", "vars", "=", "[", "self", ".", "_parse_arg", "(", "var", ",", "arg", ",", "scope", ")", "for", "arg", ",", "var", "in", "zl", "(", "[", "a", "for", "a", "in", "args", "]", ",", "parsed", ")", "]", "for", "var", "in", "vars", ":", "if", "var", ":", "var", ".", "parse", "(", "scope", ")", "if", "not", "arguments", ":", "arguments", "=", "[", "v", ".", "value", "for", "v", "in", "vars", "if", "v", "]", "if", "not", "arguments", ":", "arguments", "=", "''", "Variable", "(", "[", "'@arguments'", ",", "None", ",", "arguments", "]", ")", ".", "parse", "(", "scope", ")" ]
Parse arguments to mixin. Add them to scope as variables. Sets upp special variable @arguments as well. args: args (list): arguments scope (Scope): current scope raises: SyntaxError
[ "Parse", "arguments", "to", "mixin", ".", "Add", "them", "to", "scope", "as", "variables", ".", "Sets", "upp", "special", "variable" ]
python
valid
36.548387
elastic/elasticsearch-py
elasticsearch/client/cluster.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/cluster.py#L141-L154
def put_settings(self, body=None, params=None): """ Update cluster wide specific settings. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_ :arg body: The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). :arg flat_settings: Return settings in flat format (default: false) :arg master_timeout: Explicit operation timeout for connection to master node :arg timeout: Explicit operation timeout """ return self.transport.perform_request('PUT', '/_cluster/settings', params=params, body=body)
[ "def", "put_settings", "(", "self", ",", "body", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "'PUT'", ",", "'/_cluster/settings'", ",", "params", "=", "params", ",", "body", "=", "body", ")" ]
Update cluster wide specific settings. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_ :arg body: The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). :arg flat_settings: Return settings in flat format (default: false) :arg master_timeout: Explicit operation timeout for connection to master node :arg timeout: Explicit operation timeout
[ "Update", "cluster", "wide", "specific", "settings", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "cluster", "-", "update", "-", "settings", ".", "html", ">", "_" ]
python
train
48.285714
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L813-L824
def get_distribute_verbatim_metadata(self): """Gets the metadata for the distribute verbatim rights flag. return: (osid.Metadata) - metadata for the distribution rights fields *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['distribute_verbatim']) metadata.update({'existing_boolean_values': self._my_map['distributeVerbatim']}) return Metadata(**metadata)
[ "def", "get_distribute_verbatim_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'distribute_verbatim'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_boolean_values'", ":", "self", ".", "_my_map", "[", "'distributeVerbatim'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Gets the metadata for the distribute verbatim rights flag. return: (osid.Metadata) - metadata for the distribution rights fields *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "the", "distribute", "verbatim", "rights", "flag", "." ]
python
train
46.5
lreis2415/PyGeoC
pygeoc/TauDEM.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L507-L518
def moveoutletstostrm(np, flowdir, streamRaster, outlet, modifiedOutlet, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): """Run move the given outlets to stream""" fname = TauDEM.func_name('moveoutletstostrm') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-p': flowdir, '-src': streamRaster, '-o': outlet}, workingdir, None, {'-om': modifiedOutlet}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
[ "def", "moveoutletstostrm", "(", "np", ",", "flowdir", ",", "streamRaster", ",", "outlet", ",", "modifiedOutlet", ",", "workingdir", "=", "None", ",", "mpiexedir", "=", "None", ",", "exedir", "=", "None", ",", "log_file", "=", "None", ",", "runtime_file", "=", "None", ",", "hostfile", "=", "None", ")", ":", "fname", "=", "TauDEM", ".", "func_name", "(", "'moveoutletstostrm'", ")", "return", "TauDEM", ".", "run", "(", "FileClass", ".", "get_executable_fullpath", "(", "fname", ",", "exedir", ")", ",", "{", "'-p'", ":", "flowdir", ",", "'-src'", ":", "streamRaster", ",", "'-o'", ":", "outlet", "}", ",", "workingdir", ",", "None", ",", "{", "'-om'", ":", "modifiedOutlet", "}", ",", "{", "'mpipath'", ":", "mpiexedir", ",", "'hostfile'", ":", "hostfile", ",", "'n'", ":", "np", "}", ",", "{", "'logfile'", ":", "log_file", ",", "'runtimefile'", ":", "runtime_file", "}", ")" ]
Run move the given outlets to stream
[ "Run", "move", "the", "given", "outlets", "to", "stream" ]
python
train
62.416667
atztogo/phonopy
phonopy/structure/symmetry.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/symmetry.py#L300-L313
def find_primitive(cell, symprec=1e-5): """ A primitive cell is searched in the input cell. When a primitive cell is found, an object of Atoms class of the primitive cell is returned. When not, None is returned. """ lattice, positions, numbers = spg.find_primitive(cell.totuple(), symprec) if lattice is None: return None else: return Atoms(numbers=numbers, scaled_positions=positions, cell=lattice, pbc=True)
[ "def", "find_primitive", "(", "cell", ",", "symprec", "=", "1e-5", ")", ":", "lattice", ",", "positions", ",", "numbers", "=", "spg", ".", "find_primitive", "(", "cell", ".", "totuple", "(", ")", ",", "symprec", ")", "if", "lattice", "is", "None", ":", "return", "None", "else", ":", "return", "Atoms", "(", "numbers", "=", "numbers", ",", "scaled_positions", "=", "positions", ",", "cell", "=", "lattice", ",", "pbc", "=", "True", ")" ]
A primitive cell is searched in the input cell. When a primitive cell is found, an object of Atoms class of the primitive cell is returned. When not, None is returned.
[ "A", "primitive", "cell", "is", "searched", "in", "the", "input", "cell", ".", "When", "a", "primitive", "cell", "is", "found", "an", "object", "of", "Atoms", "class", "of", "the", "primitive", "cell", "is", "returned", ".", "When", "not", "None", "is", "returned", "." ]
python
train
36.214286
IBMStreams/pypi.streamsx
streamsx/spl/op.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/spl/op.py#L371-L380
def output(self, value): """SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator. """ return super(Map, self).output(self.stream, value)
[ "def", "output", "(", "self", ",", "value", ")", ":", "return", "super", "(", "Map", ",", "self", ")", ".", "output", "(", "self", ".", "stream", ",", "value", ")" ]
SPL output port assignment expression. Arguments: value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
[ "SPL", "output", "port", "assignment", "expression", "." ]
python
train
41
frasertweedale/ledgertools
ltlib/config.py
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L84-L96
def get(self, name, acc=None, default=None): """Return the named config for the given account. If an account is given, first checks the account space for the name. If no account given, or if the name not found in the account space, look for the name in the global config space. If still not found, return the default, if given, otherwise ``None``. """ if acc in self.data['accounts'] and name in self.data['accounts'][acc]: return self.data['accounts'][acc][name] if name in self.data: return self.data[name] return default
[ "def", "get", "(", "self", ",", "name", ",", "acc", "=", "None", ",", "default", "=", "None", ")", ":", "if", "acc", "in", "self", ".", "data", "[", "'accounts'", "]", "and", "name", "in", "self", ".", "data", "[", "'accounts'", "]", "[", "acc", "]", ":", "return", "self", ".", "data", "[", "'accounts'", "]", "[", "acc", "]", "[", "name", "]", "if", "name", "in", "self", ".", "data", ":", "return", "self", ".", "data", "[", "name", "]", "return", "default" ]
Return the named config for the given account. If an account is given, first checks the account space for the name. If no account given, or if the name not found in the account space, look for the name in the global config space. If still not found, return the default, if given, otherwise ``None``.
[ "Return", "the", "named", "config", "for", "the", "given", "account", "." ]
python
train
46.846154
bahattincinic/apistar_shell
apistar_shell/commands.py
https://github.com/bahattincinic/apistar_shell/blob/8b291fc514d668d6f8ff159da488adae242a338a/apistar_shell/commands.py#L9-L17
def shell_sqlalchemy(session: SqlalchemySession, backend: ShellBackend): """ This command includes SQLAlchemy DB Session """ namespace = { 'session': session } namespace.update(backend.get_namespace()) embed(user_ns=namespace, header=backend.header)
[ "def", "shell_sqlalchemy", "(", "session", ":", "SqlalchemySession", ",", "backend", ":", "ShellBackend", ")", ":", "namespace", "=", "{", "'session'", ":", "session", "}", "namespace", ".", "update", "(", "backend", ".", "get_namespace", "(", ")", ")", "embed", "(", "user_ns", "=", "namespace", ",", "header", "=", "backend", ".", "header", ")" ]
This command includes SQLAlchemy DB Session
[ "This", "command", "includes", "SQLAlchemy", "DB", "Session" ]
python
train
30.777778
Azure/azure-event-hubs-python
azure/eventprocessorhost/partition_manager.py
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/partition_manager.py#L313-L318
def count_leases_by_owner(self, leases): # pylint: disable=no-self-use """ Returns a dictionary of leases by current owner. """ owners = [l.owner for l in leases] return dict(Counter(owners))
[ "def", "count_leases_by_owner", "(", "self", ",", "leases", ")", ":", "# pylint: disable=no-self-use", "owners", "=", "[", "l", ".", "owner", "for", "l", "in", "leases", "]", "return", "dict", "(", "Counter", "(", "owners", ")", ")" ]
Returns a dictionary of leases by current owner.
[ "Returns", "a", "dictionary", "of", "leases", "by", "current", "owner", "." ]
python
train
37.833333
saltstack/salt
salt/modules/gcp_addon.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gcp_addon.py#L49-L54
def _get_network(project_id, network_name, service): ''' Fetch network selfLink from network name. ''' return service.networks().get(project=project_id, network=network_name).execute()
[ "def", "_get_network", "(", "project_id", ",", "network_name", ",", "service", ")", ":", "return", "service", ".", "networks", "(", ")", ".", "get", "(", "project", "=", "project_id", ",", "network", "=", "network_name", ")", ".", "execute", "(", ")" ]
Fetch network selfLink from network name.
[ "Fetch", "network", "selfLink", "from", "network", "name", "." ]
python
train
38.166667
rootpy/rootpy
rootpy/tree/treemodel.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/treemodel.py#L104-L120
def get_attrs(cls): """ Get all class attributes ordered by definition """ ignore = dir(type('dummy', (object,), {})) + ['__metaclass__'] attrs = [ item for item in inspect.getmembers(cls) if item[0] not in ignore and not isinstance( item[1], ( types.FunctionType, types.MethodType, classmethod, staticmethod, property))] # sort by idx and use attribute name to break ties attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0])) return attrs
[ "def", "get_attrs", "(", "cls", ")", ":", "ignore", "=", "dir", "(", "type", "(", "'dummy'", ",", "(", "object", ",", ")", ",", "{", "}", ")", ")", "+", "[", "'__metaclass__'", "]", "attrs", "=", "[", "item", "for", "item", "in", "inspect", ".", "getmembers", "(", "cls", ")", "if", "item", "[", "0", "]", "not", "in", "ignore", "and", "not", "isinstance", "(", "item", "[", "1", "]", ",", "(", "types", ".", "FunctionType", ",", "types", ".", "MethodType", ",", "classmethod", ",", "staticmethod", ",", "property", ")", ")", "]", "# sort by idx and use attribute name to break ties", "attrs", ".", "sort", "(", "key", "=", "lambda", "attr", ":", "(", "getattr", "(", "attr", "[", "1", "]", ",", "'idx'", ",", "-", "1", ")", ",", "attr", "[", "0", "]", ")", ")", "return", "attrs" ]
Get all class attributes ordered by definition
[ "Get", "all", "class", "attributes", "ordered", "by", "definition" ]
python
train
37.705882
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L649-L691
def update_dataset_marker(self): """Update markers which are in the dataset. It always updates the list of events. Depending on the settings, it might add the markers to overview and traces. """ start_time = self.parent.overview.start_time markers = [] if self.parent.info.markers is not None: markers = self.parent.info.markers self.idx_marker.clearContents() self.idx_marker.setRowCount(len(markers)) for i, mrk in enumerate(markers): abs_time = (start_time + timedelta(seconds=mrk['start'])).strftime('%H:%M:%S') dur = timedelta(seconds=mrk['end'] - mrk['start']) duration = '{0:02d}.{1:03d}'.format(dur.seconds, round(dur.microseconds / 1000)) item_time = QTableWidgetItem(abs_time) item_duration = QTableWidgetItem(duration) item_name = QTableWidgetItem(mrk['name']) color = self.parent.value('marker_color') item_time.setForeground(QColor(color)) item_duration.setForeground(QColor(color)) item_name.setForeground(QColor(color)) self.idx_marker.setItem(i, 0, item_time) self.idx_marker.setItem(i, 1, item_duration) self.idx_marker.setItem(i, 2, item_name) # store information about the time as list (easy to access) marker_start = [mrk['start'] for mrk in markers] marker_end = [mrk['end'] for mrk in markers] self.idx_marker.setProperty('start', marker_start) self.idx_marker.setProperty('end', marker_end) if self.parent.traces.data is not None: self.parent.traces.display() self.parent.overview.display_markers()
[ "def", "update_dataset_marker", "(", "self", ")", ":", "start_time", "=", "self", ".", "parent", ".", "overview", ".", "start_time", "markers", "=", "[", "]", "if", "self", ".", "parent", ".", "info", ".", "markers", "is", "not", "None", ":", "markers", "=", "self", ".", "parent", ".", "info", ".", "markers", "self", ".", "idx_marker", ".", "clearContents", "(", ")", "self", ".", "idx_marker", ".", "setRowCount", "(", "len", "(", "markers", ")", ")", "for", "i", ",", "mrk", "in", "enumerate", "(", "markers", ")", ":", "abs_time", "=", "(", "start_time", "+", "timedelta", "(", "seconds", "=", "mrk", "[", "'start'", "]", ")", ")", ".", "strftime", "(", "'%H:%M:%S'", ")", "dur", "=", "timedelta", "(", "seconds", "=", "mrk", "[", "'end'", "]", "-", "mrk", "[", "'start'", "]", ")", "duration", "=", "'{0:02d}.{1:03d}'", ".", "format", "(", "dur", ".", "seconds", ",", "round", "(", "dur", ".", "microseconds", "/", "1000", ")", ")", "item_time", "=", "QTableWidgetItem", "(", "abs_time", ")", "item_duration", "=", "QTableWidgetItem", "(", "duration", ")", "item_name", "=", "QTableWidgetItem", "(", "mrk", "[", "'name'", "]", ")", "color", "=", "self", ".", "parent", ".", "value", "(", "'marker_color'", ")", "item_time", ".", "setForeground", "(", "QColor", "(", "color", ")", ")", "item_duration", ".", "setForeground", "(", "QColor", "(", "color", ")", ")", "item_name", ".", "setForeground", "(", "QColor", "(", "color", ")", ")", "self", ".", "idx_marker", ".", "setItem", "(", "i", ",", "0", ",", "item_time", ")", "self", ".", "idx_marker", ".", "setItem", "(", "i", ",", "1", ",", "item_duration", ")", "self", ".", "idx_marker", ".", "setItem", "(", "i", ",", "2", ",", "item_name", ")", "# store information about the time as list (easy to access)", "marker_start", "=", "[", "mrk", "[", "'start'", "]", "for", "mrk", "in", "markers", "]", "marker_end", "=", "[", "mrk", "[", "'end'", "]", "for", "mrk", "in", "markers", "]", "self", ".", "idx_marker", ".", "setProperty", "(", "'start'", ",", "marker_start", ")", "self", ".", "idx_marker", ".", "setProperty", "(", "'end'", ",", "marker_end", ")", "if", "self", ".", "parent", ".", "traces", ".", "data", "is", "not", "None", ":", "self", ".", "parent", ".", "traces", ".", "display", "(", ")", "self", ".", "parent", ".", "overview", ".", "display_markers", "(", ")" ]
Update markers which are in the dataset. It always updates the list of events. Depending on the settings, it might add the markers to overview and traces.
[ "Update", "markers", "which", "are", "in", "the", "dataset", ".", "It", "always", "updates", "the", "list", "of", "events", ".", "Depending", "on", "the", "settings", "it", "might", "add", "the", "markers", "to", "overview", "and", "traces", "." ]
python
train
41.27907
materialsproject/pymatgen
pymatgen/io/gaussian.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/gaussian.py#L266-L328
def from_string(contents): """ Creates GaussianInput from a string. Args: contents: String representing an Gaussian input file. Returns: GaussianInput object """ lines = [l.strip() for l in contents.split("\n")] link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)") link0_dict = {} for i, l in enumerate(lines): if link0_patt.match(l): m = link0_patt.match(l) link0_dict[m.group(1).strip("=")] = m.group(2) route_patt = re.compile(r"^#[sSpPnN]*.*") route = "" route_index = None for i, l in enumerate(lines): if route_patt.match(l): route += " " + l route_index = i # This condition allows for route cards spanning multiple lines elif (l == "" or l.isspace()) and route_index: break functional, basis_set, route_paras, dieze_tag = read_route_line(route) ind = 2 title = [] while lines[route_index + ind].strip(): title.append(lines[route_index + ind].strip()) ind += 1 title = ' '.join(title) ind += 1 toks = re.split(r"[,\s]+", lines[route_index + ind]) charge = int(toks[0]) spin_mult = int(toks[1]) coord_lines = [] spaces = 0 input_paras = {} ind += 1 for i in range(route_index + ind, len(lines)): if lines[i].strip() == "": spaces += 1 if spaces >= 2: d = lines[i].split("=") if len(d) == 2: input_paras[d[0]] = d[1] else: coord_lines.append(lines[i].strip()) mol = GaussianInput._parse_coords(coord_lines) mol.set_charge_and_spin(charge, spin_mult) return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult, title=title, functional=functional, basis_set=basis_set, route_parameters=route_paras, input_parameters=input_paras, link0_parameters=link0_dict, dieze_tag=dieze_tag)
[ "def", "from_string", "(", "contents", ")", ":", "lines", "=", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "contents", ".", "split", "(", "\"\\n\"", ")", "]", "link0_patt", "=", "re", ".", "compile", "(", "r\"^(%.+)\\s*=\\s*(.+)\"", ")", "link0_dict", "=", "{", "}", "for", "i", ",", "l", "in", "enumerate", "(", "lines", ")", ":", "if", "link0_patt", ".", "match", "(", "l", ")", ":", "m", "=", "link0_patt", ".", "match", "(", "l", ")", "link0_dict", "[", "m", ".", "group", "(", "1", ")", ".", "strip", "(", "\"=\"", ")", "]", "=", "m", ".", "group", "(", "2", ")", "route_patt", "=", "re", ".", "compile", "(", "r\"^#[sSpPnN]*.*\"", ")", "route", "=", "\"\"", "route_index", "=", "None", "for", "i", ",", "l", "in", "enumerate", "(", "lines", ")", ":", "if", "route_patt", ".", "match", "(", "l", ")", ":", "route", "+=", "\" \"", "+", "l", "route_index", "=", "i", "# This condition allows for route cards spanning multiple lines", "elif", "(", "l", "==", "\"\"", "or", "l", ".", "isspace", "(", ")", ")", "and", "route_index", ":", "break", "functional", ",", "basis_set", ",", "route_paras", ",", "dieze_tag", "=", "read_route_line", "(", "route", ")", "ind", "=", "2", "title", "=", "[", "]", "while", "lines", "[", "route_index", "+", "ind", "]", ".", "strip", "(", ")", ":", "title", ".", "append", "(", "lines", "[", "route_index", "+", "ind", "]", ".", "strip", "(", ")", ")", "ind", "+=", "1", "title", "=", "' '", ".", "join", "(", "title", ")", "ind", "+=", "1", "toks", "=", "re", ".", "split", "(", "r\"[,\\s]+\"", ",", "lines", "[", "route_index", "+", "ind", "]", ")", "charge", "=", "int", "(", "toks", "[", "0", "]", ")", "spin_mult", "=", "int", "(", "toks", "[", "1", "]", ")", "coord_lines", "=", "[", "]", "spaces", "=", "0", "input_paras", "=", "{", "}", "ind", "+=", "1", "for", "i", "in", "range", "(", "route_index", "+", "ind", ",", "len", "(", "lines", ")", ")", ":", "if", "lines", "[", "i", "]", ".", "strip", "(", ")", "==", "\"\"", ":", "spaces", "+=", "1", "if", "spaces", ">=", "2", ":", "d", "=", "lines", "[", "i", "]", ".", "split", "(", "\"=\"", ")", "if", "len", "(", "d", ")", "==", "2", ":", "input_paras", "[", "d", "[", "0", "]", "]", "=", "d", "[", "1", "]", "else", ":", "coord_lines", ".", "append", "(", "lines", "[", "i", "]", ".", "strip", "(", ")", ")", "mol", "=", "GaussianInput", ".", "_parse_coords", "(", "coord_lines", ")", "mol", ".", "set_charge_and_spin", "(", "charge", ",", "spin_mult", ")", "return", "GaussianInput", "(", "mol", ",", "charge", "=", "charge", ",", "spin_multiplicity", "=", "spin_mult", ",", "title", "=", "title", ",", "functional", "=", "functional", ",", "basis_set", "=", "basis_set", ",", "route_parameters", "=", "route_paras", ",", "input_parameters", "=", "input_paras", ",", "link0_parameters", "=", "link0_dict", ",", "dieze_tag", "=", "dieze_tag", ")" ]
Creates GaussianInput from a string. Args: contents: String representing an Gaussian input file. Returns: GaussianInput object
[ "Creates", "GaussianInput", "from", "a", "string", "." ]
python
train
35.507937
alephdata/memorious
memorious/operations/initializers.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L29-L67
def sequence(context, data): """Generate a sequence of numbers. It is the memorious equivalent of the xrange function, accepting the ``start``, ``stop`` and ``step`` parameters. This can run in two ways: * As a single function generating all numbers in the given range. * Recursively, generating numbers one by one with an optional ``delay``. The latter mode is useful in order to generate very large sequences without completely clogging up the user queue. If an optional ``tag`` is given, each number will be emitted only once across multiple runs of the crawler. """ number = data.get('number', context.params.get('start', 1)) stop = context.params.get('stop') step = context.params.get('step', 1) delay = context.params.get('delay') prefix = context.params.get('tag') while True: tag = None if prefix is None else '%s:%s' % (prefix, number) if tag is None or not context.check_tag(tag): context.emit(data={'number': number}) if tag is not None: context.set_tag(tag, True) number = number + step if step > 0 and number >= stop: break if step < 0 and number <= stop: break if delay is not None: context.recurse(data={'number': number}, delay=delay) break
[ "def", "sequence", "(", "context", ",", "data", ")", ":", "number", "=", "data", ".", "get", "(", "'number'", ",", "context", ".", "params", ".", "get", "(", "'start'", ",", "1", ")", ")", "stop", "=", "context", ".", "params", ".", "get", "(", "'stop'", ")", "step", "=", "context", ".", "params", ".", "get", "(", "'step'", ",", "1", ")", "delay", "=", "context", ".", "params", ".", "get", "(", "'delay'", ")", "prefix", "=", "context", ".", "params", ".", "get", "(", "'tag'", ")", "while", "True", ":", "tag", "=", "None", "if", "prefix", "is", "None", "else", "'%s:%s'", "%", "(", "prefix", ",", "number", ")", "if", "tag", "is", "None", "or", "not", "context", ".", "check_tag", "(", "tag", ")", ":", "context", ".", "emit", "(", "data", "=", "{", "'number'", ":", "number", "}", ")", "if", "tag", "is", "not", "None", ":", "context", ".", "set_tag", "(", "tag", ",", "True", ")", "number", "=", "number", "+", "step", "if", "step", ">", "0", "and", "number", ">=", "stop", ":", "break", "if", "step", "<", "0", "and", "number", "<=", "stop", ":", "break", "if", "delay", "is", "not", "None", ":", "context", ".", "recurse", "(", "data", "=", "{", "'number'", ":", "number", "}", ",", "delay", "=", "delay", ")", "break" ]
Generate a sequence of numbers. It is the memorious equivalent of the xrange function, accepting the ``start``, ``stop`` and ``step`` parameters. This can run in two ways: * As a single function generating all numbers in the given range. * Recursively, generating numbers one by one with an optional ``delay``. The latter mode is useful in order to generate very large sequences without completely clogging up the user queue. If an optional ``tag`` is given, each number will be emitted only once across multiple runs of the crawler.
[ "Generate", "a", "sequence", "of", "numbers", "." ]
python
train
33.897436
andreikop/qutepart
qutepart/completer.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/completer.py#L375-L387
def _updateWordSet(self): """Make a set of words, which shall be completed, from text """ self._wordSet = set(self._keywords) | set(self._customCompletions) start = time.time() for line in self._qpart.lines: for match in _wordRegExp.findall(line): self._wordSet.add(match) if time.time() - start > self._WORD_SET_UPDATE_MAX_TIME_SEC: """It is better to have incomplete word set, than to freeze the GUI""" break
[ "def", "_updateWordSet", "(", "self", ")", ":", "self", ".", "_wordSet", "=", "set", "(", "self", ".", "_keywords", ")", "|", "set", "(", "self", ".", "_customCompletions", ")", "start", "=", "time", ".", "time", "(", ")", "for", "line", "in", "self", ".", "_qpart", ".", "lines", ":", "for", "match", "in", "_wordRegExp", ".", "findall", "(", "line", ")", ":", "self", ".", "_wordSet", ".", "add", "(", "match", ")", "if", "time", ".", "time", "(", ")", "-", "start", ">", "self", ".", "_WORD_SET_UPDATE_MAX_TIME_SEC", ":", "\"\"\"It is better to have incomplete word set, than to freeze the GUI\"\"\"", "break" ]
Make a set of words, which shall be completed, from text
[ "Make", "a", "set", "of", "words", "which", "shall", "be", "completed", "from", "text" ]
python
train
39.384615
TissueMAPS/TmClient
src/python/tmclient/api.py
https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L645-L674
def rename_acquisition(self, plate_name, name, new_name): '''Renames an acquisition. Parameters ---------- plate_name: str name of the parent plate name: str name of the acquisition that should be renamed new_name: str name that should be given to the acquisition See also -------- :func:`tmserver.api.acquisition.update_acquisition` :class:`tmlib.models.acquisition.Acquisition` ''' logger.info( 'rename acquisistion "%s" of experiment "%s", plate "%s"', name, self.experiment_name, plate_name ) content = {'name': new_name} acquisition_id = self._get_acquisition_id(plate_name, name) url = self._build_api_url( '/experiments/{experiment_id}/acquisitions/{acquisition_id}'.format( experiment_id=self._experiment_id, acquisition_id=acquisition_id ) ) res = self._session.put(url, json=content) res.raise_for_status()
[ "def", "rename_acquisition", "(", "self", ",", "plate_name", ",", "name", ",", "new_name", ")", ":", "logger", ".", "info", "(", "'rename acquisistion \"%s\" of experiment \"%s\", plate \"%s\"'", ",", "name", ",", "self", ".", "experiment_name", ",", "plate_name", ")", "content", "=", "{", "'name'", ":", "new_name", "}", "acquisition_id", "=", "self", ".", "_get_acquisition_id", "(", "plate_name", ",", "name", ")", "url", "=", "self", ".", "_build_api_url", "(", "'/experiments/{experiment_id}/acquisitions/{acquisition_id}'", ".", "format", "(", "experiment_id", "=", "self", ".", "_experiment_id", ",", "acquisition_id", "=", "acquisition_id", ")", ")", "res", "=", "self", ".", "_session", ".", "put", "(", "url", ",", "json", "=", "content", ")", "res", ".", "raise_for_status", "(", ")" ]
Renames an acquisition. Parameters ---------- plate_name: str name of the parent plate name: str name of the acquisition that should be renamed new_name: str name that should be given to the acquisition See also -------- :func:`tmserver.api.acquisition.update_acquisition` :class:`tmlib.models.acquisition.Acquisition`
[ "Renames", "an", "acquisition", "." ]
python
train
34.7
google/tangent
tangent/cfg.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/cfg.py#L67-L77
def backlink(node): """Given a CFG with outgoing links, create incoming links.""" seen = set() to_see = [node] while to_see: node = to_see.pop() seen.add(node) for succ in node.next: succ.prev.add(node) if succ not in seen: to_see.append(succ)
[ "def", "backlink", "(", "node", ")", ":", "seen", "=", "set", "(", ")", "to_see", "=", "[", "node", "]", "while", "to_see", ":", "node", "=", "to_see", ".", "pop", "(", ")", "seen", ".", "add", "(", "node", ")", "for", "succ", "in", "node", ".", "next", ":", "succ", ".", "prev", ".", "add", "(", "node", ")", "if", "succ", "not", "in", "seen", ":", "to_see", ".", "append", "(", "succ", ")" ]
Given a CFG with outgoing links, create incoming links.
[ "Given", "a", "CFG", "with", "outgoing", "links", "create", "incoming", "links", "." ]
python
train
26.636364
Synerty/peek-plugin-base
peek_plugin_base/storage/StorageUtil.py
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/StorageUtil.py#L25-L43
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]): """ Make Orm Values Subquery :param ormSession: The orm session instance :param column: The column from the Declarative table, eg TableItem.colName :param values: A list of string or int values """ if isPostGreSQLDialect(ormSession.bind): return column.in_(values) if not isMssqlDialect(ormSession.bind): raise NotImplementedError() sql = _createMssqlSqlText(values) sub_qry = ormSession.query(column) # Any column, it just assigns a name sub_qry = sub_qry.from_statement(sql) return column.in_(sub_qry)
[ "def", "makeOrmValuesSubqueryCondition", "(", "ormSession", ",", "column", ",", "values", ":", "List", "[", "Union", "[", "int", ",", "str", "]", "]", ")", ":", "if", "isPostGreSQLDialect", "(", "ormSession", ".", "bind", ")", ":", "return", "column", ".", "in_", "(", "values", ")", "if", "not", "isMssqlDialect", "(", "ormSession", ".", "bind", ")", ":", "raise", "NotImplementedError", "(", ")", "sql", "=", "_createMssqlSqlText", "(", "values", ")", "sub_qry", "=", "ormSession", ".", "query", "(", "column", ")", "# Any column, it just assigns a name", "sub_qry", "=", "sub_qry", ".", "from_statement", "(", "sql", ")", "return", "column", ".", "in_", "(", "sub_qry", ")" ]
Make Orm Values Subquery :param ormSession: The orm session instance :param column: The column from the Declarative table, eg TableItem.colName :param values: A list of string or int values
[ "Make", "Orm", "Values", "Subquery" ]
python
train
33.578947
joferkington/mpldatacursor
mpldatacursor/datacursor.py
https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/datacursor.py#L467-L475
def hide(self): """Hides all annotation artists associated with the DataCursor. Returns self to allow "chaining". (e.g. ``datacursor.hide().disable()``)""" self._hidden = True for artist in self.annotations.values(): artist.set_visible(False) for fig in self.figures: fig.canvas.draw() return self
[ "def", "hide", "(", "self", ")", ":", "self", ".", "_hidden", "=", "True", "for", "artist", "in", "self", ".", "annotations", ".", "values", "(", ")", ":", "artist", ".", "set_visible", "(", "False", ")", "for", "fig", "in", "self", ".", "figures", ":", "fig", ".", "canvas", ".", "draw", "(", ")", "return", "self" ]
Hides all annotation artists associated with the DataCursor. Returns self to allow "chaining". (e.g. ``datacursor.hide().disable()``)
[ "Hides", "all", "annotation", "artists", "associated", "with", "the", "DataCursor", ".", "Returns", "self", "to", "allow", "chaining", ".", "(", "e", ".", "g", ".", "datacursor", ".", "hide", "()", ".", "disable", "()", ")" ]
python
train
40.111111
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/graphs/util.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/graphs/util.py#L54-L95
def _straight_line_vertices(adjacency_mat, node_coords, directed=False): """ Generate the vertices for straight lines between nodes. If it is a directed graph, it also generates the vertices which can be passed to an :class:`ArrowVisual`. Parameters ---------- adjacency_mat : array The adjacency matrix of the graph node_coords : array The current coordinates of all nodes in the graph directed : bool Wether the graph is directed. If this is true it will also generate the vertices for arrows which can be passed to :class:`ArrowVisual`. Returns ------- vertices : tuple Returns a tuple containing containing (`line_vertices`, `arrow_vertices`) """ if not issparse(adjacency_mat): adjacency_mat = np.asarray(adjacency_mat, float) if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] != adjacency_mat.shape[1]): raise ValueError("Adjacency matrix should be square.") arrow_vertices = np.array([]) edges = _get_edges(adjacency_mat) line_vertices = node_coords[edges.ravel()] if directed: arrows = np.array(list(_get_directed_edges(adjacency_mat))) arrow_vertices = node_coords[arrows.ravel()] arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4)) return line_vertices, arrow_vertices
[ "def", "_straight_line_vertices", "(", "adjacency_mat", ",", "node_coords", ",", "directed", "=", "False", ")", ":", "if", "not", "issparse", "(", "adjacency_mat", ")", ":", "adjacency_mat", "=", "np", ".", "asarray", "(", "adjacency_mat", ",", "float", ")", "if", "(", "adjacency_mat", ".", "ndim", "!=", "2", "or", "adjacency_mat", ".", "shape", "[", "0", "]", "!=", "adjacency_mat", ".", "shape", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "\"Adjacency matrix should be square.\"", ")", "arrow_vertices", "=", "np", ".", "array", "(", "[", "]", ")", "edges", "=", "_get_edges", "(", "adjacency_mat", ")", "line_vertices", "=", "node_coords", "[", "edges", ".", "ravel", "(", ")", "]", "if", "directed", ":", "arrows", "=", "np", ".", "array", "(", "list", "(", "_get_directed_edges", "(", "adjacency_mat", ")", ")", ")", "arrow_vertices", "=", "node_coords", "[", "arrows", ".", "ravel", "(", ")", "]", "arrow_vertices", "=", "arrow_vertices", ".", "reshape", "(", "(", "len", "(", "arrow_vertices", ")", "/", "2", ",", "4", ")", ")", "return", "line_vertices", ",", "arrow_vertices" ]
Generate the vertices for straight lines between nodes. If it is a directed graph, it also generates the vertices which can be passed to an :class:`ArrowVisual`. Parameters ---------- adjacency_mat : array The adjacency matrix of the graph node_coords : array The current coordinates of all nodes in the graph directed : bool Wether the graph is directed. If this is true it will also generate the vertices for arrows which can be passed to :class:`ArrowVisual`. Returns ------- vertices : tuple Returns a tuple containing containing (`line_vertices`, `arrow_vertices`)
[ "Generate", "the", "vertices", "for", "straight", "lines", "between", "nodes", "." ]
python
train
32.047619
shaiguitar/snowclient.py
snowclient/api.py
https://github.com/shaiguitar/snowclient.py/blob/6bb513576d3b37612a7a4da225140d134f3e1c82/snowclient/api.py#L50-L56
def list(self,table, **kparams): """ get a collection of records by table name. returns a dict (the json map) for python 3.4 """ result = self.table_api_get(table, **kparams) return self.to_records(result, table)
[ "def", "list", "(", "self", ",", "table", ",", "*", "*", "kparams", ")", ":", "result", "=", "self", ".", "table_api_get", "(", "table", ",", "*", "*", "kparams", ")", "return", "self", ".", "to_records", "(", "result", ",", "table", ")" ]
get a collection of records by table name. returns a dict (the json map) for python 3.4
[ "get", "a", "collection", "of", "records", "by", "table", "name", ".", "returns", "a", "dict", "(", "the", "json", "map", ")", "for", "python", "3", ".", "4" ]
python
train
36.285714
ejeschke/ginga
ginga/rv/plugins/Thumbs.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Thumbs.py#L546-L566
def have_thumbnail(self, fitsimage, image): """Returns True if we already have a thumbnail version of this image cached, False otherwise. """ chname = self.fv.get_channel_name(fitsimage) # Look up our version of the thumb idx = image.get('idx', None) path = image.get('path', None) if path is not None: path = os.path.abspath(path) name = iohelper.name_image_from_path(path, idx=idx) else: name = 'NoName' # get image name name = image.get('name', name) thumbkey = self.get_thumb_key(chname, name, path) with self.thmblock: return thumbkey in self.thumb_dict
[ "def", "have_thumbnail", "(", "self", ",", "fitsimage", ",", "image", ")", ":", "chname", "=", "self", ".", "fv", ".", "get_channel_name", "(", "fitsimage", ")", "# Look up our version of the thumb", "idx", "=", "image", ".", "get", "(", "'idx'", ",", "None", ")", "path", "=", "image", ".", "get", "(", "'path'", ",", "None", ")", "if", "path", "is", "not", "None", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "name", "=", "iohelper", ".", "name_image_from_path", "(", "path", ",", "idx", "=", "idx", ")", "else", ":", "name", "=", "'NoName'", "# get image name", "name", "=", "image", ".", "get", "(", "'name'", ",", "name", ")", "thumbkey", "=", "self", ".", "get_thumb_key", "(", "chname", ",", "name", ",", "path", ")", "with", "self", ".", "thmblock", ":", "return", "thumbkey", "in", "self", ".", "thumb_dict" ]
Returns True if we already have a thumbnail version of this image cached, False otherwise.
[ "Returns", "True", "if", "we", "already", "have", "a", "thumbnail", "version", "of", "this", "image", "cached", "False", "otherwise", "." ]
python
train
33
HiPERCAM/hcam_widgets
hcam_widgets/hcam.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/hcam.py#L286-L357
def setupNodding(self): """ Setup Nodding for GTC """ g = get_root(self).globals if not self.nod(): # re-enable clear mode box if not drift if not self.isDrift(): self.clear.enable() # clear existing nod pattern self.nodPattern = {} self.check() return # Do nothing if we're not at the GTC if g.cpars['telins_name'] != 'GTC': messagebox.showerror('Error', 'Cannot dither WHT') self.nod.set(False) self.nodPattern = {} return # check for drift mode and bomb out if self.isDrift(): messagebox.showerror('Error', 'Cannot dither telescope in drift mode') self.nod.set(False) self.nodPattern = {} return # check for clear not enabled and warn if not self.clear(): if not messagebox.askokcancel('Warning', 'Dithering telescope will enable clear mode. Continue?'): self.nod.set(False) self.nodPattern = {} return # Ask for nod pattern try: home = expanduser('~') fname = filedialog.askopenfilename( title='Open offsets text file', defaultextension='.txt', filetypes=[('text files', '.txt')], initialdir=home) if not fname: g.clog.warn('Aborted load from disk') raise ValueError ra, dec = np.loadtxt(fname).T if len(ra) != len(dec): g.clog.warn('Mismatched lengths of RA and Dec offsets') raise ValueError data = dict( ra=ra.tolist(), dec=dec.tolist() ) except: g.clog.warn('Setting dither pattern failed. Disabling dithering') self.nod.set(False) self.nodPattern = {} return # store nodding on ipars object self.nodPattern = data # enable clear mode self.clear.set(True) # update self.check()
[ "def", "setupNodding", "(", "self", ")", ":", "g", "=", "get_root", "(", "self", ")", ".", "globals", "if", "not", "self", ".", "nod", "(", ")", ":", "# re-enable clear mode box if not drift", "if", "not", "self", ".", "isDrift", "(", ")", ":", "self", ".", "clear", ".", "enable", "(", ")", "# clear existing nod pattern", "self", ".", "nodPattern", "=", "{", "}", "self", ".", "check", "(", ")", "return", "# Do nothing if we're not at the GTC", "if", "g", ".", "cpars", "[", "'telins_name'", "]", "!=", "'GTC'", ":", "messagebox", ".", "showerror", "(", "'Error'", ",", "'Cannot dither WHT'", ")", "self", ".", "nod", ".", "set", "(", "False", ")", "self", ".", "nodPattern", "=", "{", "}", "return", "# check for drift mode and bomb out", "if", "self", ".", "isDrift", "(", ")", ":", "messagebox", ".", "showerror", "(", "'Error'", ",", "'Cannot dither telescope in drift mode'", ")", "self", ".", "nod", ".", "set", "(", "False", ")", "self", ".", "nodPattern", "=", "{", "}", "return", "# check for clear not enabled and warn", "if", "not", "self", ".", "clear", "(", ")", ":", "if", "not", "messagebox", ".", "askokcancel", "(", "'Warning'", ",", "'Dithering telescope will enable clear mode. Continue?'", ")", ":", "self", ".", "nod", ".", "set", "(", "False", ")", "self", ".", "nodPattern", "=", "{", "}", "return", "# Ask for nod pattern", "try", ":", "home", "=", "expanduser", "(", "'~'", ")", "fname", "=", "filedialog", ".", "askopenfilename", "(", "title", "=", "'Open offsets text file'", ",", "defaultextension", "=", "'.txt'", ",", "filetypes", "=", "[", "(", "'text files'", ",", "'.txt'", ")", "]", ",", "initialdir", "=", "home", ")", "if", "not", "fname", ":", "g", ".", "clog", ".", "warn", "(", "'Aborted load from disk'", ")", "raise", "ValueError", "ra", ",", "dec", "=", "np", ".", "loadtxt", "(", "fname", ")", ".", "T", "if", "len", "(", "ra", ")", "!=", "len", "(", "dec", ")", ":", "g", ".", "clog", ".", "warn", "(", "'Mismatched lengths of RA and Dec offsets'", ")", "raise", "ValueError", "data", "=", "dict", "(", "ra", "=", "ra", ".", "tolist", "(", ")", ",", "dec", "=", "dec", ".", "tolist", "(", ")", ")", "except", ":", "g", ".", "clog", ".", "warn", "(", "'Setting dither pattern failed. Disabling dithering'", ")", "self", ".", "nod", ".", "set", "(", "False", ")", "self", ".", "nodPattern", "=", "{", "}", "return", "# store nodding on ipars object", "self", ".", "nodPattern", "=", "data", "# enable clear mode", "self", ".", "clear", ".", "set", "(", "True", ")", "# update", "self", ".", "check", "(", ")" ]
Setup Nodding for GTC
[ "Setup", "Nodding", "for", "GTC" ]
python
train
30.027778
iamteem/redisco
redisco/containers.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/containers.py#L173-L181
def copy(self, key): """Copy the set to another key and return the new Set. WARNING: If the key exists, it overwrites it. """ copy = Set(key=key, db=self.db) copy.clear() copy |= self return copy
[ "def", "copy", "(", "self", ",", "key", ")", ":", "copy", "=", "Set", "(", "key", "=", "key", ",", "db", "=", "self", ".", "db", ")", "copy", ".", "clear", "(", ")", "copy", "|=", "self", "return", "copy" ]
Copy the set to another key and return the new Set. WARNING: If the key exists, it overwrites it.
[ "Copy", "the", "set", "to", "another", "key", "and", "return", "the", "new", "Set", "." ]
python
train
27.111111
ml4ai/delphi
delphi/AnalysisGraph.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/AnalysisGraph.py#L721-L764
def parameterize( self, country: Optional[str] = "South Sudan", state: Optional[str] = None, year: Optional[int] = None, month: Optional[int] = None, unit: Optional[str] = None, fallback_aggaxes: List[str] = ["year", "month"], aggfunc: Callable = np.mean, ): """ Parameterize the analysis graph. Args: country year month fallback_aggaxes: An iterable of strings denoting the axes upon which to perform fallback aggregation if the desired constraints cannot be met. aggfunc: The function that will be called to perform the aggregation if there are multiple matches. """ valid_axes = ("country", "state", "year", "month") if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)): raise ValueError( "All elements of the fallback_aggaxes set must be one of the " f"following: {valid_axes}" ) for n in self.nodes(data=True): for indicator in n[1]["indicators"].values(): indicator.mean, indicator.unit = get_indicator_value( indicator, country, state, year, month, unit, fallback_aggaxes, aggfunc, ) indicator.stdev = 0.1 * abs(indicator.mean)
[ "def", "parameterize", "(", "self", ",", "country", ":", "Optional", "[", "str", "]", "=", "\"South Sudan\"", ",", "state", ":", "Optional", "[", "str", "]", "=", "None", ",", "year", ":", "Optional", "[", "int", "]", "=", "None", ",", "month", ":", "Optional", "[", "int", "]", "=", "None", ",", "unit", ":", "Optional", "[", "str", "]", "=", "None", ",", "fallback_aggaxes", ":", "List", "[", "str", "]", "=", "[", "\"year\"", ",", "\"month\"", "]", ",", "aggfunc", ":", "Callable", "=", "np", ".", "mean", ",", ")", ":", "valid_axes", "=", "(", "\"country\"", ",", "\"state\"", ",", "\"year\"", ",", "\"month\"", ")", "if", "any", "(", "map", "(", "lambda", "axis", ":", "axis", "not", "in", "valid_axes", ",", "fallback_aggaxes", ")", ")", ":", "raise", "ValueError", "(", "\"All elements of the fallback_aggaxes set must be one of the \"", "f\"following: {valid_axes}\"", ")", "for", "n", "in", "self", ".", "nodes", "(", "data", "=", "True", ")", ":", "for", "indicator", "in", "n", "[", "1", "]", "[", "\"indicators\"", "]", ".", "values", "(", ")", ":", "indicator", ".", "mean", ",", "indicator", ".", "unit", "=", "get_indicator_value", "(", "indicator", ",", "country", ",", "state", ",", "year", ",", "month", ",", "unit", ",", "fallback_aggaxes", ",", "aggfunc", ",", ")", "indicator", ".", "stdev", "=", "0.1", "*", "abs", "(", "indicator", ".", "mean", ")" ]
Parameterize the analysis graph. Args: country year month fallback_aggaxes: An iterable of strings denoting the axes upon which to perform fallback aggregation if the desired constraints cannot be met. aggfunc: The function that will be called to perform the aggregation if there are multiple matches.
[ "Parameterize", "the", "analysis", "graph", "." ]
python
train
34.295455
cga-harvard/Hypermap-Registry
pavement.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/pavement.py#L50-L64
def kill_process(procname, scriptname): """kill WSGI processes that may be running in development""" # from http://stackoverflow.com/a/2940878 import signal import subprocess p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.decode().splitlines(): if procname in line and scriptname in line: pid = int(line.split()[1]) info('Stopping %s %s %d' % (procname, scriptname, pid)) os.kill(pid, signal.SIGKILL)
[ "def", "kill_process", "(", "procname", ",", "scriptname", ")", ":", "# from http://stackoverflow.com/a/2940878", "import", "signal", "import", "subprocess", "p", "=", "subprocess", ".", "Popen", "(", "[", "'ps'", ",", "'aux'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "for", "line", "in", "out", ".", "decode", "(", ")", ".", "splitlines", "(", ")", ":", "if", "procname", "in", "line", "and", "scriptname", "in", "line", ":", "pid", "=", "int", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "info", "(", "'Stopping %s %s %d'", "%", "(", "procname", ",", "scriptname", ",", "pid", ")", ")", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGKILL", ")" ]
kill WSGI processes that may be running in development
[ "kill", "WSGI", "processes", "that", "may", "be", "running", "in", "development" ]
python
train
34.466667
nicolargo/glances
glances/stats_client_snmp.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats_client_snmp.py#L57-L82
def check_snmp(self): """Chek if SNMP is available on the server.""" # Import the SNMP client class from glances.snmp import GlancesSNMPClient # Create an instance of the SNMP client clientsnmp = GlancesSNMPClient(host=self.args.client, port=self.args.snmp_port, version=self.args.snmp_version, community=self.args.snmp_community, user=self.args.snmp_user, auth=self.args.snmp_auth) # If we cannot grab the hostname, then exit... ret = clientsnmp.get_by_oid("1.3.6.1.2.1.1.5.0") != {} if ret: # Get the OS name (need to grab the good OID...) oid_os_name = clientsnmp.get_by_oid("1.3.6.1.2.1.1.1.0") try: self.system_name = self.get_system_name(oid_os_name['1.3.6.1.2.1.1.1.0']) logger.info("SNMP system name detected: {}".format(self.system_name)) except KeyError: self.system_name = None logger.warning("Cannot detect SNMP system name") return ret
[ "def", "check_snmp", "(", "self", ")", ":", "# Import the SNMP client class", "from", "glances", ".", "snmp", "import", "GlancesSNMPClient", "# Create an instance of the SNMP client", "clientsnmp", "=", "GlancesSNMPClient", "(", "host", "=", "self", ".", "args", ".", "client", ",", "port", "=", "self", ".", "args", ".", "snmp_port", ",", "version", "=", "self", ".", "args", ".", "snmp_version", ",", "community", "=", "self", ".", "args", ".", "snmp_community", ",", "user", "=", "self", ".", "args", ".", "snmp_user", ",", "auth", "=", "self", ".", "args", ".", "snmp_auth", ")", "# If we cannot grab the hostname, then exit...", "ret", "=", "clientsnmp", ".", "get_by_oid", "(", "\"1.3.6.1.2.1.1.5.0\"", ")", "!=", "{", "}", "if", "ret", ":", "# Get the OS name (need to grab the good OID...)", "oid_os_name", "=", "clientsnmp", ".", "get_by_oid", "(", "\"1.3.6.1.2.1.1.1.0\"", ")", "try", ":", "self", ".", "system_name", "=", "self", ".", "get_system_name", "(", "oid_os_name", "[", "'1.3.6.1.2.1.1.1.0'", "]", ")", "logger", ".", "info", "(", "\"SNMP system name detected: {}\"", ".", "format", "(", "self", ".", "system_name", ")", ")", "except", "KeyError", ":", "self", ".", "system_name", "=", "None", "logger", ".", "warning", "(", "\"Cannot detect SNMP system name\"", ")", "return", "ret" ]
Chek if SNMP is available on the server.
[ "Chek", "if", "SNMP", "is", "available", "on", "the", "server", "." ]
python
train
46.346154
horazont/aioxmpp
aioxmpp/rsm/xso.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/rsm/xso.py#L239-L254
def next_page(self, max_=None): """ Return a query set which requests the page after this response. :param max_: Maximum number of items to return. :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request the next page. Must be called on a result set which has :attr:`last` set. """ result = type(self)() result.after = After(self.last.value) result.max_ = max_ return result
[ "def", "next_page", "(", "self", ",", "max_", "=", "None", ")", ":", "result", "=", "type", "(", "self", ")", "(", ")", "result", ".", "after", "=", "After", "(", "self", ".", "last", ".", "value", ")", "result", ".", "max_", "=", "max_", "return", "result" ]
Return a query set which requests the page after this response. :param max_: Maximum number of items to return. :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request the next page. Must be called on a result set which has :attr:`last` set.
[ "Return", "a", "query", "set", "which", "requests", "the", "page", "after", "this", "response", "." ]
python
train
32.4375
gitpython-developers/GitPython
git/config.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/config.py#L376-L437
def read(self): """Reads the data stored in the files we have been initialized with. It will ignore files that cannot be read, possibly leaving an empty configuration :return: Nothing :raise IOError: if a file cannot be handled""" if self._is_initialized: return self._is_initialized = True if not isinstance(self._file_or_files, (tuple, list)): files_to_read = [self._file_or_files] else: files_to_read = list(self._file_or_files) # end assure we have a copy of the paths to handle seen = set(files_to_read) num_read_include_files = 0 while files_to_read: file_path = files_to_read.pop(0) fp = file_path file_ok = False if hasattr(fp, "seek"): self._read(fp, fp.name) else: # assume a path if it is not a file-object try: with open(file_path, 'rb') as fp: file_ok = True self._read(fp, fp.name) except IOError: continue # Read includes and append those that we didn't handle yet # We expect all paths to be normalized and absolute (and will assure that is the case) if self._has_includes(): for _, include_path in self.items('include'): if include_path.startswith('~'): include_path = osp.expanduser(include_path) if not osp.isabs(include_path): if not file_ok: continue # end ignore relative paths if we don't know the configuration file path assert osp.isabs(file_path), "Need absolute paths to be sure our cycle checks will work" include_path = osp.join(osp.dirname(file_path), include_path) # end make include path absolute include_path = osp.normpath(include_path) if include_path in seen or not os.access(include_path, os.R_OK): continue seen.add(include_path) # insert included file to the top to be considered first files_to_read.insert(0, include_path) num_read_include_files += 1 # each include path in configuration file # end handle includes # END for each file object to read # If there was no file included, we can safely write back (potentially) the configuration file # without altering it's meaning if num_read_include_files == 0: self._merge_includes = False
[ "def", "read", "(", "self", ")", ":", "if", "self", ".", "_is_initialized", ":", "return", "self", ".", "_is_initialized", "=", "True", "if", "not", "isinstance", "(", "self", ".", "_file_or_files", ",", "(", "tuple", ",", "list", ")", ")", ":", "files_to_read", "=", "[", "self", ".", "_file_or_files", "]", "else", ":", "files_to_read", "=", "list", "(", "self", ".", "_file_or_files", ")", "# end assure we have a copy of the paths to handle", "seen", "=", "set", "(", "files_to_read", ")", "num_read_include_files", "=", "0", "while", "files_to_read", ":", "file_path", "=", "files_to_read", ".", "pop", "(", "0", ")", "fp", "=", "file_path", "file_ok", "=", "False", "if", "hasattr", "(", "fp", ",", "\"seek\"", ")", ":", "self", ".", "_read", "(", "fp", ",", "fp", ".", "name", ")", "else", ":", "# assume a path if it is not a file-object", "try", ":", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "fp", ":", "file_ok", "=", "True", "self", ".", "_read", "(", "fp", ",", "fp", ".", "name", ")", "except", "IOError", ":", "continue", "# Read includes and append those that we didn't handle yet", "# We expect all paths to be normalized and absolute (and will assure that is the case)", "if", "self", ".", "_has_includes", "(", ")", ":", "for", "_", ",", "include_path", "in", "self", ".", "items", "(", "'include'", ")", ":", "if", "include_path", ".", "startswith", "(", "'~'", ")", ":", "include_path", "=", "osp", ".", "expanduser", "(", "include_path", ")", "if", "not", "osp", ".", "isabs", "(", "include_path", ")", ":", "if", "not", "file_ok", ":", "continue", "# end ignore relative paths if we don't know the configuration file path", "assert", "osp", ".", "isabs", "(", "file_path", ")", ",", "\"Need absolute paths to be sure our cycle checks will work\"", "include_path", "=", "osp", ".", "join", "(", "osp", ".", "dirname", "(", "file_path", ")", ",", "include_path", ")", "# end make include path absolute", "include_path", "=", "osp", ".", "normpath", "(", "include_path", ")", "if", "include_path", "in", "seen", "or", "not", "os", ".", "access", "(", "include_path", ",", "os", ".", "R_OK", ")", ":", "continue", "seen", ".", "add", "(", "include_path", ")", "# insert included file to the top to be considered first", "files_to_read", ".", "insert", "(", "0", ",", "include_path", ")", "num_read_include_files", "+=", "1", "# each include path in configuration file", "# end handle includes", "# END for each file object to read", "# If there was no file included, we can safely write back (potentially) the configuration file", "# without altering it's meaning", "if", "num_read_include_files", "==", "0", ":", "self", ".", "_merge_includes", "=", "False" ]
Reads the data stored in the files we have been initialized with. It will ignore files that cannot be read, possibly leaving an empty configuration :return: Nothing :raise IOError: if a file cannot be handled
[ "Reads", "the", "data", "stored", "in", "the", "files", "we", "have", "been", "initialized", "with", ".", "It", "will", "ignore", "files", "that", "cannot", "be", "read", "possibly", "leaving", "an", "empty", "configuration" ]
python
train
44.225806