repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
oceanprotocol/squid-py
squid_py/ddo/service.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/service.py#L134-L151
def as_dictionary(self): """Return the service as a python dictionary.""" values = { 'type': self._type, self.SERVICE_ENDPOINT: self._service_endpoint, } if self._consume_endpoint is not None: values[self.CONSUME_ENDPOINT] = self._consume_endpoint if self._values: # add extra service values to the dictionary for name, value in self._values.items(): if isinstance(value, object) and hasattr(value, 'as_dictionary'): value = value.as_dictionary() elif isinstance(value, list): value = [v.as_dictionary() if hasattr(v, 'as_dictionary') else v for v in value] values[name] = value return values
[ "def", "as_dictionary", "(", "self", ")", ":", "values", "=", "{", "'type'", ":", "self", ".", "_type", ",", "self", ".", "SERVICE_ENDPOINT", ":", "self", ".", "_service_endpoint", ",", "}", "if", "self", ".", "_consume_endpoint", "is", "not", "None", ":", "values", "[", "self", ".", "CONSUME_ENDPOINT", "]", "=", "self", ".", "_consume_endpoint", "if", "self", ".", "_values", ":", "# add extra service values to the dictionary", "for", "name", ",", "value", "in", "self", ".", "_values", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "object", ")", "and", "hasattr", "(", "value", ",", "'as_dictionary'", ")", ":", "value", "=", "value", ".", "as_dictionary", "(", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "[", "v", ".", "as_dictionary", "(", ")", "if", "hasattr", "(", "v", ",", "'as_dictionary'", ")", "else", "v", "for", "v", "in", "value", "]", "values", "[", "name", "]", "=", "value", "return", "values" ]
Return the service as a python dictionary.
[ "Return", "the", "service", "as", "a", "python", "dictionary", "." ]
python
train
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L42-L46
def empty_bar_plot(ax): ''' Delete all axis ticks and labels ''' plt.sca(ax) plt.setp(plt.gca(),xticks=[],xticklabels=[]) return ax
[ "def", "empty_bar_plot", "(", "ax", ")", ":", "plt", ".", "sca", "(", "ax", ")", "plt", ".", "setp", "(", "plt", ".", "gca", "(", ")", ",", "xticks", "=", "[", "]", ",", "xticklabels", "=", "[", "]", ")", "return", "ax" ]
Delete all axis ticks and labels
[ "Delete", "all", "axis", "ticks", "and", "labels" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Ambiente.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ambiente.py#L888-L911
def get_all_rules(self, id_env): """Save an environment rule :param id_env: Environment id :return: Estrutura: :: { 'rules': [{'id': < id >, 'environment': < Environment Object >, 'content': < content >, 'name': < name >, 'custom': < custom > },... ]} :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta. """ url = 'rule/all/' + str(id_env) code, xml = self.submit(None, 'GET', url) return self.response(code, xml, ['rules'])
[ "def", "get_all_rules", "(", "self", ",", "id_env", ")", ":", "url", "=", "'rule/all/'", "+", "str", "(", "id_env", ")", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ",", "[", "'rules'", "]", ")" ]
Save an environment rule :param id_env: Environment id :return: Estrutura: :: { 'rules': [{'id': < id >, 'environment': < Environment Object >, 'content': < content >, 'name': < name >, 'custom': < custom > },... ]} :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta.
[ "Save", "an", "environment", "rule" ]
python
train
wakatime/wakatime
wakatime/stats.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L349-L372
def customize_lexer_priority(file_name, accuracy, lexer): """Customize lexer priority""" priority = lexer.priority lexer_name = lexer.name.lower().replace('sharp', '#') if lexer_name in LANGUAGES: priority = LANGUAGES[lexer_name] elif lexer_name == 'matlab': available_extensions = extensions_in_same_folder(file_name) if '.mat' in available_extensions: accuracy += 0.01 if '.h' not in available_extensions: accuracy += 0.01 elif lexer_name == 'objective-c': available_extensions = extensions_in_same_folder(file_name) if '.mat' in available_extensions: accuracy -= 0.01 else: accuracy += 0.01 if '.h' in available_extensions: accuracy += 0.01 return (accuracy, priority, lexer)
[ "def", "customize_lexer_priority", "(", "file_name", ",", "accuracy", ",", "lexer", ")", ":", "priority", "=", "lexer", ".", "priority", "lexer_name", "=", "lexer", ".", "name", ".", "lower", "(", ")", ".", "replace", "(", "'sharp'", ",", "'#'", ")", "if", "lexer_name", "in", "LANGUAGES", ":", "priority", "=", "LANGUAGES", "[", "lexer_name", "]", "elif", "lexer_name", "==", "'matlab'", ":", "available_extensions", "=", "extensions_in_same_folder", "(", "file_name", ")", "if", "'.mat'", "in", "available_extensions", ":", "accuracy", "+=", "0.01", "if", "'.h'", "not", "in", "available_extensions", ":", "accuracy", "+=", "0.01", "elif", "lexer_name", "==", "'objective-c'", ":", "available_extensions", "=", "extensions_in_same_folder", "(", "file_name", ")", "if", "'.mat'", "in", "available_extensions", ":", "accuracy", "-=", "0.01", "else", ":", "accuracy", "+=", "0.01", "if", "'.h'", "in", "available_extensions", ":", "accuracy", "+=", "0.01", "return", "(", "accuracy", ",", "priority", ",", "lexer", ")" ]
Customize lexer priority
[ "Customize", "lexer", "priority" ]
python
train
grycap/RADL
radl/radl.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L312-L316
def hasFeature(self, prop, check_softs=False): """Return if there is a property with that name.""" return prop in self.props or (check_softs and any([fs.hasFeature(prop) for fs in self.props.get(SoftFeatures.SOFT, [])]))
[ "def", "hasFeature", "(", "self", ",", "prop", ",", "check_softs", "=", "False", ")", ":", "return", "prop", "in", "self", ".", "props", "or", "(", "check_softs", "and", "any", "(", "[", "fs", ".", "hasFeature", "(", "prop", ")", "for", "fs", "in", "self", ".", "props", ".", "get", "(", "SoftFeatures", ".", "SOFT", ",", "[", "]", ")", "]", ")", ")" ]
Return if there is a property with that name.
[ "Return", "if", "there", "is", "a", "property", "with", "that", "name", "." ]
python
train
PMEAL/porespy
porespy/visualization/__plots__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/visualization/__plots__.py#L6-L40
def show_mesh(mesh): r""" Visualizes the mesh of a region as obtained by ``get_mesh`` function in the ``metrics`` submodule. Parameters ---------- mesh : tuple A mesh returned by ``skimage.measure.marching_cubes`` Returns ------- fig : Matplotlib figure A handle to a matplotlib 3D axis """ lim_max = sp.amax(mesh.verts, axis=0) lim_min = sp.amin(mesh.verts, axis=0) # Display resulting triangular mesh using Matplotlib. fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Fancy indexing: `verts[faces]` to generate a collection of triangles mesh = Poly3DCollection(mesh.verts[mesh.faces]) mesh.set_edgecolor('k') ax.add_collection3d(mesh) ax.set_xlabel("x-axis") ax.set_ylabel("y-axis") ax.set_zlabel("z-axis") ax.set_xlim(lim_min[0], lim_max[0]) ax.set_ylim(lim_min[1], lim_max[1]) ax.set_zlim(lim_min[2], lim_max[2]) return fig
[ "def", "show_mesh", "(", "mesh", ")", ":", "lim_max", "=", "sp", ".", "amax", "(", "mesh", ".", "verts", ",", "axis", "=", "0", ")", "lim_min", "=", "sp", ".", "amin", "(", "mesh", ".", "verts", ",", "axis", "=", "0", ")", "# Display resulting triangular mesh using Matplotlib.", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ",", "projection", "=", "'3d'", ")", "# Fancy indexing: `verts[faces]` to generate a collection of triangles", "mesh", "=", "Poly3DCollection", "(", "mesh", ".", "verts", "[", "mesh", ".", "faces", "]", ")", "mesh", ".", "set_edgecolor", "(", "'k'", ")", "ax", ".", "add_collection3d", "(", "mesh", ")", "ax", ".", "set_xlabel", "(", "\"x-axis\"", ")", "ax", ".", "set_ylabel", "(", "\"y-axis\"", ")", "ax", ".", "set_zlabel", "(", "\"z-axis\"", ")", "ax", ".", "set_xlim", "(", "lim_min", "[", "0", "]", ",", "lim_max", "[", "0", "]", ")", "ax", ".", "set_ylim", "(", "lim_min", "[", "1", "]", ",", "lim_max", "[", "1", "]", ")", "ax", ".", "set_zlim", "(", "lim_min", "[", "2", "]", ",", "lim_max", "[", "2", "]", ")", "return", "fig" ]
r""" Visualizes the mesh of a region as obtained by ``get_mesh`` function in the ``metrics`` submodule. Parameters ---------- mesh : tuple A mesh returned by ``skimage.measure.marching_cubes`` Returns ------- fig : Matplotlib figure A handle to a matplotlib 3D axis
[ "r", "Visualizes", "the", "mesh", "of", "a", "region", "as", "obtained", "by", "get_mesh", "function", "in", "the", "metrics", "submodule", "." ]
python
train
ssato/python-anyconfig
src/anyconfig/backend/ini.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/ini.py#L152-L165
def _dumps_itr(cnf, dkey=DEFAULTSECT): """ :param cnf: Configuration data to dump """ for sect, params in iteritems(cnf): yield "[%s]" % sect for key, val in iteritems(params): if sect != dkey and dkey in cnf and cnf[dkey].get(key) == val: continue # It should be in [DEFAULT] section. yield "%s = %s" % (key, _to_s(val)) yield ''
[ "def", "_dumps_itr", "(", "cnf", ",", "dkey", "=", "DEFAULTSECT", ")", ":", "for", "sect", ",", "params", "in", "iteritems", "(", "cnf", ")", ":", "yield", "\"[%s]\"", "%", "sect", "for", "key", ",", "val", "in", "iteritems", "(", "params", ")", ":", "if", "sect", "!=", "dkey", "and", "dkey", "in", "cnf", "and", "cnf", "[", "dkey", "]", ".", "get", "(", "key", ")", "==", "val", ":", "continue", "# It should be in [DEFAULT] section.", "yield", "\"%s = %s\"", "%", "(", "key", ",", "_to_s", "(", "val", ")", ")", "yield", "''" ]
:param cnf: Configuration data to dump
[ ":", "param", "cnf", ":", "Configuration", "data", "to", "dump" ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L685-L706
def _gen_net_xml(name, bridge, forward, vport, tag=None): ''' Generate the XML string to define a libvirt network ''' context = { 'name': name, 'bridge': bridge, 'forward': forward, 'vport': vport, 'tag': tag, } fn_ = 'libvirt_network.jinja' try: template = JINJA.get_template(fn_) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', fn_) return '' return template.render(**context)
[ "def", "_gen_net_xml", "(", "name", ",", "bridge", ",", "forward", ",", "vport", ",", "tag", "=", "None", ")", ":", "context", "=", "{", "'name'", ":", "name", ",", "'bridge'", ":", "bridge", ",", "'forward'", ":", "forward", ",", "'vport'", ":", "vport", ",", "'tag'", ":", "tag", ",", "}", "fn_", "=", "'libvirt_network.jinja'", "try", ":", "template", "=", "JINJA", ".", "get_template", "(", "fn_", ")", "except", "jinja2", ".", "exceptions", ".", "TemplateNotFound", ":", "log", ".", "error", "(", "'Could not load template %s'", ",", "fn_", ")", "return", "''", "return", "template", ".", "render", "(", "*", "*", "context", ")" ]
Generate the XML string to define a libvirt network
[ "Generate", "the", "XML", "string", "to", "define", "a", "libvirt", "network" ]
python
train
shymonk/django-datatable
table/views.py
https://github.com/shymonk/django-datatable/blob/f20a6ed2ce31aa7488ff85b4b0e80fe1ad94ec44/table/views.py#L119-L149
def get_context_data(self, **kwargs): """ Get context data for datatable server-side response. See http://www.datatables.net/usage/server-side """ sEcho = self.query_data["sEcho"] context = super(BaseListView, self).get_context_data(**kwargs) queryset = context["object_list"] if queryset is not None: total_length = self.get_queryset_length(queryset) queryset = self.filter_queryset(queryset) display_length = self.get_queryset_length(queryset) queryset = self.sort_queryset(queryset) queryset = self.paging_queryset(queryset) values_list = self.convert_queryset_to_values_list(queryset) context = { "sEcho": sEcho, "iTotalRecords": total_length, "iTotalDisplayRecords": display_length, "aaData": values_list, } else: context = { "sEcho": sEcho, "iTotalRecords": 0, "iTotalDisplayRecords": 0, "aaData": [], } return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "sEcho", "=", "self", ".", "query_data", "[", "\"sEcho\"", "]", "context", "=", "super", "(", "BaseListView", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "queryset", "=", "context", "[", "\"object_list\"", "]", "if", "queryset", "is", "not", "None", ":", "total_length", "=", "self", ".", "get_queryset_length", "(", "queryset", ")", "queryset", "=", "self", ".", "filter_queryset", "(", "queryset", ")", "display_length", "=", "self", ".", "get_queryset_length", "(", "queryset", ")", "queryset", "=", "self", ".", "sort_queryset", "(", "queryset", ")", "queryset", "=", "self", ".", "paging_queryset", "(", "queryset", ")", "values_list", "=", "self", ".", "convert_queryset_to_values_list", "(", "queryset", ")", "context", "=", "{", "\"sEcho\"", ":", "sEcho", ",", "\"iTotalRecords\"", ":", "total_length", ",", "\"iTotalDisplayRecords\"", ":", "display_length", ",", "\"aaData\"", ":", "values_list", ",", "}", "else", ":", "context", "=", "{", "\"sEcho\"", ":", "sEcho", ",", "\"iTotalRecords\"", ":", "0", ",", "\"iTotalDisplayRecords\"", ":", "0", ",", "\"aaData\"", ":", "[", "]", ",", "}", "return", "context" ]
Get context data for datatable server-side response. See http://www.datatables.net/usage/server-side
[ "Get", "context", "data", "for", "datatable", "server", "-", "side", "response", ".", "See", "http", ":", "//", "www", ".", "datatables", ".", "net", "/", "usage", "/", "server", "-", "side" ]
python
valid
gwastro/pycbc-glue
pycbc_glue/ligolw/utils/segments.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/segments.py#L504-L517
def has_segment_tables(xmldoc, name = None): """ Return True if the document contains a complete set of segment tables. Returns False otherwise. If name is given and not None then the return value is True only if the document's segment tables, if present, contain a segment list by that name. """ try: names = lsctables.SegmentDefTable.get_table(xmldoc).getColumnByName("name") lsctables.SegmentTable.get_table(xmldoc) lsctables.SegmentSumTable.get_table(xmldoc) except (ValueError, KeyError): return False return name is None or name in names
[ "def", "has_segment_tables", "(", "xmldoc", ",", "name", "=", "None", ")", ":", "try", ":", "names", "=", "lsctables", ".", "SegmentDefTable", ".", "get_table", "(", "xmldoc", ")", ".", "getColumnByName", "(", "\"name\"", ")", "lsctables", ".", "SegmentTable", ".", "get_table", "(", "xmldoc", ")", "lsctables", ".", "SegmentSumTable", ".", "get_table", "(", "xmldoc", ")", "except", "(", "ValueError", ",", "KeyError", ")", ":", "return", "False", "return", "name", "is", "None", "or", "name", "in", "names" ]
Return True if the document contains a complete set of segment tables. Returns False otherwise. If name is given and not None then the return value is True only if the document's segment tables, if present, contain a segment list by that name.
[ "Return", "True", "if", "the", "document", "contains", "a", "complete", "set", "of", "segment", "tables", ".", "Returns", "False", "otherwise", ".", "If", "name", "is", "given", "and", "not", "None", "then", "the", "return", "value", "is", "True", "only", "if", "the", "document", "s", "segment", "tables", "if", "present", "contain", "a", "segment", "list", "by", "that", "name", "." ]
python
train
EpistasisLab/tpot
tpot/builtins/feature_set_selector.py
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/feature_set_selector.py#L66-L114
def fit(self, X, y=None): """Fit FeatureSetSelector for feature selection Parameters ---------- X: array-like of shape (n_samples, n_features) The training input samples. y: array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). Returns ------- self: object Returns a copy of the estimator """ subset_df = pd.read_csv(self.subset_list, header=0, index_col=0) if isinstance(self.sel_subset, int): self.sel_subset_name = subset_df.index[self.sel_subset] elif isinstance(self.sel_subset, str): self.sel_subset_name = self.sel_subset else: # list or tuple self.sel_subset_name = [] for s in self.sel_subset: if isinstance(s, int): self.sel_subset_name.append(subset_df.index[s]) else: self.sel_subset_name.append(s) sel_features = subset_df.loc[self.sel_subset_name, 'Features'] if not isinstance(sel_features, str): sel_features = ";".join(sel_features.tolist()) sel_uniq_features = set(sel_features.split(';')) if isinstance(X, pd.DataFrame): # use columns' names self.feature_names = list(X.columns.values) self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names)))) self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list] elif isinstance(X, np.ndarray): # use index self.feature_names = list(range(X.shape[1])) sel_uniq_features = [int(val) for val in sel_uniq_features] self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names)))) self.feat_list_idx = self.feat_list if not len(self.feat_list): raise ValueError('No feature is found on the subset list!') return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "subset_df", "=", "pd", ".", "read_csv", "(", "self", ".", "subset_list", ",", "header", "=", "0", ",", "index_col", "=", "0", ")", "if", "isinstance", "(", "self", ".", "sel_subset", ",", "int", ")", ":", "self", ".", "sel_subset_name", "=", "subset_df", ".", "index", "[", "self", ".", "sel_subset", "]", "elif", "isinstance", "(", "self", ".", "sel_subset", ",", "str", ")", ":", "self", ".", "sel_subset_name", "=", "self", ".", "sel_subset", "else", ":", "# list or tuple", "self", ".", "sel_subset_name", "=", "[", "]", "for", "s", "in", "self", ".", "sel_subset", ":", "if", "isinstance", "(", "s", ",", "int", ")", ":", "self", ".", "sel_subset_name", ".", "append", "(", "subset_df", ".", "index", "[", "s", "]", ")", "else", ":", "self", ".", "sel_subset_name", ".", "append", "(", "s", ")", "sel_features", "=", "subset_df", ".", "loc", "[", "self", ".", "sel_subset_name", ",", "'Features'", "]", "if", "not", "isinstance", "(", "sel_features", ",", "str", ")", ":", "sel_features", "=", "\";\"", ".", "join", "(", "sel_features", ".", "tolist", "(", ")", ")", "sel_uniq_features", "=", "set", "(", "sel_features", ".", "split", "(", "';'", ")", ")", "if", "isinstance", "(", "X", ",", "pd", ".", "DataFrame", ")", ":", "# use columns' names", "self", ".", "feature_names", "=", "list", "(", "X", ".", "columns", ".", "values", ")", "self", ".", "feat_list", "=", "sorted", "(", "list", "(", "set", "(", "sel_uniq_features", ")", ".", "intersection", "(", "set", "(", "self", ".", "feature_names", ")", ")", ")", ")", "self", ".", "feat_list_idx", "=", "[", "list", "(", "X", ".", "columns", ")", ".", "index", "(", "feat_name", ")", "for", "feat_name", "in", "self", ".", "feat_list", "]", "elif", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", ":", "# use index", "self", ".", "feature_names", "=", "list", "(", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ")", "sel_uniq_features", "=", "[", "int", "(", "val", ")", "for", "val", "in", "sel_uniq_features", "]", "self", ".", "feat_list", "=", "sorted", "(", "list", "(", "set", "(", "sel_uniq_features", ")", ".", "intersection", "(", "set", "(", "self", ".", "feature_names", ")", ")", ")", ")", "self", ".", "feat_list_idx", "=", "self", ".", "feat_list", "if", "not", "len", "(", "self", ".", "feat_list", ")", ":", "raise", "ValueError", "(", "'No feature is found on the subset list!'", ")", "return", "self" ]
Fit FeatureSetSelector for feature selection Parameters ---------- X: array-like of shape (n_samples, n_features) The training input samples. y: array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). Returns ------- self: object Returns a copy of the estimator
[ "Fit", "FeatureSetSelector", "for", "feature", "selection" ]
python
train
log2timeline/plaso
plaso/storage/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/interface.py#L1683-L1701
def ReadPreprocessingInformation(self, knowledge_base): """Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ if not self._storage_file: raise IOError('Unable to read from closed storage writer.') self._storage_file.ReadPreprocessingInformation(knowledge_base)
[ "def", "ReadPreprocessingInformation", "(", "self", ",", "knowledge_base", ")", ":", "if", "not", "self", ".", "_storage_file", ":", "raise", "IOError", "(", "'Unable to read from closed storage writer.'", ")", "self", ".", "_storage_file", ".", "ReadPreprocessingInformation", "(", "knowledge_base", ")" ]
Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
[ "Reads", "preprocessing", "information", "." ]
python
train
apache/spark
python/pyspark/sql/context.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L194-L203
def registerJavaFunction(self, name, javaClassName, returnType=None): """An alias for :func:`spark.udf.registerJavaFunction`. See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`. .. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead. """ warnings.warn( "Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.", DeprecationWarning) return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
[ "def", "registerJavaFunction", "(", "self", ",", "name", ",", "javaClassName", ",", "returnType", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.\"", ",", "DeprecationWarning", ")", "return", "self", ".", "sparkSession", ".", "udf", ".", "registerJavaFunction", "(", "name", ",", "javaClassName", ",", "returnType", ")" ]
An alias for :func:`spark.udf.registerJavaFunction`. See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`. .. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead.
[ "An", "alias", "for", ":", "func", ":", "spark", ".", "udf", ".", "registerJavaFunction", ".", "See", ":", "meth", ":", "pyspark", ".", "sql", ".", "UDFRegistration", ".", "registerJavaFunction", "." ]
python
train
b3j0f/utils
b3j0f/utils/runtime.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/runtime.py#L279-L309
def getcodeobj(consts, intcode, newcodeobj, oldcodeobj): """Get code object from decompiled code. :param list consts: constants to add in the result. :param list intcode: list of byte code to use. :param newcodeobj: new code object with empty body. :param oldcodeobj: old code object. :return: new code object to produce.""" # get code string if PY3: codestr = bytes(intcode) else: codestr = reduce(lambda x, y: x + y, (chr(b) for b in intcode)) # get vargs vargs = [ newcodeobj.co_argcount, newcodeobj.co_nlocals, newcodeobj.co_stacksize, newcodeobj.co_flags, codestr, tuple(consts), newcodeobj.co_names, newcodeobj.co_varnames, newcodeobj.co_filename, newcodeobj.co_name, newcodeobj.co_firstlineno, newcodeobj.co_lnotab, oldcodeobj.co_freevars, newcodeobj.co_cellvars ] if PY3: vargs.insert(1, newcodeobj.co_kwonlyargcount) # instanciate a new newcodeobj object result = type(newcodeobj)(*vargs) return result
[ "def", "getcodeobj", "(", "consts", ",", "intcode", ",", "newcodeobj", ",", "oldcodeobj", ")", ":", "# get code string", "if", "PY3", ":", "codestr", "=", "bytes", "(", "intcode", ")", "else", ":", "codestr", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "(", "chr", "(", "b", ")", "for", "b", "in", "intcode", ")", ")", "# get vargs", "vargs", "=", "[", "newcodeobj", ".", "co_argcount", ",", "newcodeobj", ".", "co_nlocals", ",", "newcodeobj", ".", "co_stacksize", ",", "newcodeobj", ".", "co_flags", ",", "codestr", ",", "tuple", "(", "consts", ")", ",", "newcodeobj", ".", "co_names", ",", "newcodeobj", ".", "co_varnames", ",", "newcodeobj", ".", "co_filename", ",", "newcodeobj", ".", "co_name", ",", "newcodeobj", ".", "co_firstlineno", ",", "newcodeobj", ".", "co_lnotab", ",", "oldcodeobj", ".", "co_freevars", ",", "newcodeobj", ".", "co_cellvars", "]", "if", "PY3", ":", "vargs", ".", "insert", "(", "1", ",", "newcodeobj", ".", "co_kwonlyargcount", ")", "# instanciate a new newcodeobj object", "result", "=", "type", "(", "newcodeobj", ")", "(", "*", "vargs", ")", "return", "result" ]
Get code object from decompiled code. :param list consts: constants to add in the result. :param list intcode: list of byte code to use. :param newcodeobj: new code object with empty body. :param oldcodeobj: old code object. :return: new code object to produce.
[ "Get", "code", "object", "from", "decompiled", "code", "." ]
python
train
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L357-L367
def bottom(self, objects: Set[Object]) -> Set[Object]: """ Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for each box. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set: Set[Object] = set() for _, box_objects in objects_per_box.items(): max_y_loc = max([obj.y_loc for obj in box_objects]) return_set.update(set([obj for obj in box_objects if obj.y_loc == max_y_loc])) return return_set
[ "def", "bottom", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ")", "->", "Set", "[", "Object", "]", ":", "objects_per_box", "=", "self", ".", "_separate_objects_by_boxes", "(", "objects", ")", "return_set", ":", "Set", "[", "Object", "]", "=", "set", "(", ")", "for", "_", ",", "box_objects", "in", "objects_per_box", ".", "items", "(", ")", ":", "max_y_loc", "=", "max", "(", "[", "obj", ".", "y_loc", "for", "obj", "in", "box_objects", "]", ")", "return_set", ".", "update", "(", "set", "(", "[", "obj", "for", "obj", "in", "box_objects", "if", "obj", ".", "y_loc", "==", "max_y_loc", "]", ")", ")", "return", "return_set" ]
Return the bottom most objects(i.e. maximum y_loc). The comparison is done separately for each box.
[ "Return", "the", "bottom", "most", "objects", "(", "i", ".", "e", ".", "maximum", "y_loc", ")", ".", "The", "comparison", "is", "done", "separately", "for", "each", "box", "." ]
python
train
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/mech_cisco_nexus.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/mech_cisco_nexus.py#L1452-L1522
def _delete_port_channel_resources(self, host_id, switch_ip, intf_type, nexus_port, port_id): '''This determines if port channel id needs to be freed.''' # if this connection is not a port-channel, nothing to do. if intf_type != 'port-channel': return # Check if this driver created it and its no longer needed. try: vpc = nxos_db.get_switch_vpc_alloc( switch_ip, nexus_port) except excep.NexusVPCAllocNotFound: # This can occur for non-baremetal configured # port-channels. Nothing more to do. LOG.debug("Switch %s portchannel %s vpc entry not " "found in vpcid alloc table.", switch_ip, nexus_port) return # if this isn't one which was allocated or learned, # don't do any further processing. if not vpc.active: LOG.debug("Switch %s portchannel %s vpc entry not " "active.", switch_ip, nexus_port) return # Is this port-channel still in use? # If so, nothing more to do. try: nxos_db.get_nexus_switchport_binding(port_id, switch_ip) LOG.debug("Switch %s portchannel %s port entries " "in use. Skipping port-channel clean-up.", switch_ip, nexus_port) return except excep.NexusPortBindingNotFound: pass # need to get ethernet interface name try: mapping = nxos_db.get_switch_and_host_mappings( host_id, switch_ip) eth_type, eth_port = nexus_help.split_interface_name( mapping[0].if_id) except excep.NexusHostMappingNotFound: LOG.warning("Switch %s hostid %s host_mapping not " "found. Skipping port-channel clean-up.", switch_ip, host_id) return # Remove the channel group from ethernet interface # and remove port channel from this switch. if not vpc.learned: self.driver.delete_ch_grp_to_interface( switch_ip, eth_type, eth_port, nexus_port) self.driver.delete_port_channel(switch_ip, nexus_port) try: nxos_db.free_vpcid_for_switch(nexus_port, switch_ip) LOG.info("Released portchannel %s resources for " "switch %s", nexus_port, switch_ip) except excep.NexusVPCAllocNotFound: # Not all learned port channels will be in this db when # they're outside the configured vpc_pool so # this exception may be possible. LOG.warning("Failed to free vpcid %s for switch %s " "since it did not exist in table.", nexus_port, switch_ip)
[ "def", "_delete_port_channel_resources", "(", "self", ",", "host_id", ",", "switch_ip", ",", "intf_type", ",", "nexus_port", ",", "port_id", ")", ":", "# if this connection is not a port-channel, nothing to do.", "if", "intf_type", "!=", "'port-channel'", ":", "return", "# Check if this driver created it and its no longer needed.", "try", ":", "vpc", "=", "nxos_db", ".", "get_switch_vpc_alloc", "(", "switch_ip", ",", "nexus_port", ")", "except", "excep", ".", "NexusVPCAllocNotFound", ":", "# This can occur for non-baremetal configured", "# port-channels. Nothing more to do.", "LOG", ".", "debug", "(", "\"Switch %s portchannel %s vpc entry not \"", "\"found in vpcid alloc table.\"", ",", "switch_ip", ",", "nexus_port", ")", "return", "# if this isn't one which was allocated or learned,", "# don't do any further processing.", "if", "not", "vpc", ".", "active", ":", "LOG", ".", "debug", "(", "\"Switch %s portchannel %s vpc entry not \"", "\"active.\"", ",", "switch_ip", ",", "nexus_port", ")", "return", "# Is this port-channel still in use?", "# If so, nothing more to do.", "try", ":", "nxos_db", ".", "get_nexus_switchport_binding", "(", "port_id", ",", "switch_ip", ")", "LOG", ".", "debug", "(", "\"Switch %s portchannel %s port entries \"", "\"in use. Skipping port-channel clean-up.\"", ",", "switch_ip", ",", "nexus_port", ")", "return", "except", "excep", ".", "NexusPortBindingNotFound", ":", "pass", "# need to get ethernet interface name", "try", ":", "mapping", "=", "nxos_db", ".", "get_switch_and_host_mappings", "(", "host_id", ",", "switch_ip", ")", "eth_type", ",", "eth_port", "=", "nexus_help", ".", "split_interface_name", "(", "mapping", "[", "0", "]", ".", "if_id", ")", "except", "excep", ".", "NexusHostMappingNotFound", ":", "LOG", ".", "warning", "(", "\"Switch %s hostid %s host_mapping not \"", "\"found. Skipping port-channel clean-up.\"", ",", "switch_ip", ",", "host_id", ")", "return", "# Remove the channel group from ethernet interface", "# and remove port channel from this switch.", "if", "not", "vpc", ".", "learned", ":", "self", ".", "driver", ".", "delete_ch_grp_to_interface", "(", "switch_ip", ",", "eth_type", ",", "eth_port", ",", "nexus_port", ")", "self", ".", "driver", ".", "delete_port_channel", "(", "switch_ip", ",", "nexus_port", ")", "try", ":", "nxos_db", ".", "free_vpcid_for_switch", "(", "nexus_port", ",", "switch_ip", ")", "LOG", ".", "info", "(", "\"Released portchannel %s resources for \"", "\"switch %s\"", ",", "nexus_port", ",", "switch_ip", ")", "except", "excep", ".", "NexusVPCAllocNotFound", ":", "# Not all learned port channels will be in this db when", "# they're outside the configured vpc_pool so", "# this exception may be possible.", "LOG", ".", "warning", "(", "\"Failed to free vpcid %s for switch %s \"", "\"since it did not exist in table.\"", ",", "nexus_port", ",", "switch_ip", ")" ]
This determines if port channel id needs to be freed.
[ "This", "determines", "if", "port", "channel", "id", "needs", "to", "be", "freed", "." ]
python
train
bibanon/BASC-py4chan
basc_py4chan/board.py
https://github.com/bibanon/BASC-py4chan/blob/88e4866d73853e1025e549fbbe9744e750522359/basc_py4chan/board.py#L145-L158
def thread_exists(self, thread_id): """Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board. """ return self._requests_session.head( self._url.thread_api_url( thread_id=thread_id ) ).ok
[ "def", "thread_exists", "(", "self", ",", "thread_id", ")", ":", "return", "self", ".", "_requests_session", ".", "head", "(", "self", ".", "_url", ".", "thread_api_url", "(", "thread_id", "=", "thread_id", ")", ")", ".", "ok" ]
Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board.
[ "Check", "if", "a", "thread", "exists", "or", "has", "404", "d", "." ]
python
train
FelixSchwarz/pymta
pymta/command_parser.py
https://github.com/FelixSchwarz/pymta/blob/1884accc3311e6c2e89259784f9592314f6d34fc/pymta/command_parser.py#L92-L98
def multiline_push(self, code, lines): """Send a multi-message to the peer (using the correct SMTP line terminators (usually only called from the SMTPSession).""" for line in lines[:-1]: answer = '%s-%s' % (code, line) self.push(answer) self.push(code, lines[-1])
[ "def", "multiline_push", "(", "self", ",", "code", ",", "lines", ")", ":", "for", "line", "in", "lines", "[", ":", "-", "1", "]", ":", "answer", "=", "'%s-%s'", "%", "(", "code", ",", "line", ")", "self", ".", "push", "(", "answer", ")", "self", ".", "push", "(", "code", ",", "lines", "[", "-", "1", "]", ")" ]
Send a multi-message to the peer (using the correct SMTP line terminators (usually only called from the SMTPSession).
[ "Send", "a", "multi", "-", "message", "to", "the", "peer", "(", "using", "the", "correct", "SMTP", "line", "terminators", "(", "usually", "only", "called", "from", "the", "SMTPSession", ")", "." ]
python
train
metachris/logzero
logzero/__init__.py
https://github.com/metachris/logzero/blob/b5d49fc2b118c370994c4ae5360d7c246d43ddc8/logzero/__init__.py#L320-L331
def reset_default_logger(): """ Resets the internal default logger to the initial configuration """ global logger global _loglevel global _logfile global _formatter _loglevel = logging.DEBUG _logfile = None _formatter = None logger = setup_logger(name=LOGZERO_DEFAULT_LOGGER, logfile=_logfile, level=_loglevel, formatter=_formatter)
[ "def", "reset_default_logger", "(", ")", ":", "global", "logger", "global", "_loglevel", "global", "_logfile", "global", "_formatter", "_loglevel", "=", "logging", ".", "DEBUG", "_logfile", "=", "None", "_formatter", "=", "None", "logger", "=", "setup_logger", "(", "name", "=", "LOGZERO_DEFAULT_LOGGER", ",", "logfile", "=", "_logfile", ",", "level", "=", "_loglevel", ",", "formatter", "=", "_formatter", ")" ]
Resets the internal default logger to the initial configuration
[ "Resets", "the", "internal", "default", "logger", "to", "the", "initial", "configuration" ]
python
train
evhub/coconut
coconut/compiler/matching.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/matching.py#L549-L555
def match_set(self, tokens, item): """Matches a set.""" match, = tokens self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Set)") self.add_check("_coconut.len(" + item + ") == " + str(len(match))) for const in match: self.add_check(const + " in " + item)
[ "def", "match_set", "(", "self", ",", "tokens", ",", "item", ")", ":", "match", ",", "=", "tokens", "self", ".", "add_check", "(", "\"_coconut.isinstance(\"", "+", "item", "+", "\", _coconut.abc.Set)\"", ")", "self", ".", "add_check", "(", "\"_coconut.len(\"", "+", "item", "+", "\") == \"", "+", "str", "(", "len", "(", "match", ")", ")", ")", "for", "const", "in", "match", ":", "self", ".", "add_check", "(", "const", "+", "\" in \"", "+", "item", ")" ]
Matches a set.
[ "Matches", "a", "set", "." ]
python
train
hiposfer/o2g
o2g/gtfs/gtfs_dummy.py
https://github.com/hiposfer/o2g/blob/1165ba75a5eb64b3091e9b71ebd589507ae1ebf3/o2g/gtfs/gtfs_dummy.py#L14-L39
def create_dummy_data(routes, stops): """Create `calendar`, `stop_times`, `trips` and `shapes`. :return: DummyData namedtuple """ # Build stops per route auxiliary map stops_per_route = defaultdict(lambda: []) stops_map = {} for s in stops: if not s.route_id: continue stops_per_route[s.route_id].append(s) stops_map[s.stop_id] = s calendar = _create_dummy_calendar() trips = \ _create_dummy_trips( routes, stops_per_route, calendar) stop_times = _create_dummy_stoptimes(trips, stops_per_route) frequencies = _create_dummy_frequencies(trips) return DummyData(calendar, stop_times, trips, frequencies)
[ "def", "create_dummy_data", "(", "routes", ",", "stops", ")", ":", "# Build stops per route auxiliary map", "stops_per_route", "=", "defaultdict", "(", "lambda", ":", "[", "]", ")", "stops_map", "=", "{", "}", "for", "s", "in", "stops", ":", "if", "not", "s", ".", "route_id", ":", "continue", "stops_per_route", "[", "s", ".", "route_id", "]", ".", "append", "(", "s", ")", "stops_map", "[", "s", ".", "stop_id", "]", "=", "s", "calendar", "=", "_create_dummy_calendar", "(", ")", "trips", "=", "_create_dummy_trips", "(", "routes", ",", "stops_per_route", ",", "calendar", ")", "stop_times", "=", "_create_dummy_stoptimes", "(", "trips", ",", "stops_per_route", ")", "frequencies", "=", "_create_dummy_frequencies", "(", "trips", ")", "return", "DummyData", "(", "calendar", ",", "stop_times", ",", "trips", ",", "frequencies", ")" ]
Create `calendar`, `stop_times`, `trips` and `shapes`. :return: DummyData namedtuple
[ "Create", "calendar", "stop_times", "trips", "and", "shapes", "." ]
python
test
JensRantil/rewind
rewind/server/eventstores.py
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L575-L589
def add_event(self, key, event): """Add an event and its corresponding key to the store.""" assert isinstance(key, str) assert isinstance(event, bytes) if all([char.isalnum() or char == '-' for char in key]): safe_key = key else: raise ValueError("Key must be alphanumeric or a dash (-):" " {0}".format(key)) safe_event = base64.encodestring(event).decode().strip() data = "{0}\t{1}\n".format(safe_key, safe_event) # Important to make a single atomic write here self._hasher.update(data.encode()) self.f.write(data)
[ "def", "add_event", "(", "self", ",", "key", ",", "event", ")", ":", "assert", "isinstance", "(", "key", ",", "str", ")", "assert", "isinstance", "(", "event", ",", "bytes", ")", "if", "all", "(", "[", "char", ".", "isalnum", "(", ")", "or", "char", "==", "'-'", "for", "char", "in", "key", "]", ")", ":", "safe_key", "=", "key", "else", ":", "raise", "ValueError", "(", "\"Key must be alphanumeric or a dash (-):\"", "\" {0}\"", ".", "format", "(", "key", ")", ")", "safe_event", "=", "base64", ".", "encodestring", "(", "event", ")", ".", "decode", "(", ")", ".", "strip", "(", ")", "data", "=", "\"{0}\\t{1}\\n\"", ".", "format", "(", "safe_key", ",", "safe_event", ")", "# Important to make a single atomic write here", "self", ".", "_hasher", ".", "update", "(", "data", ".", "encode", "(", ")", ")", "self", ".", "f", ".", "write", "(", "data", ")" ]
Add an event and its corresponding key to the store.
[ "Add", "an", "event", "and", "its", "corresponding", "key", "to", "the", "store", "." ]
python
train
pywbem/pywbem
pywbem/cim_operations.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_operations.py#L2158-L2183
def _iparam_namespace_from_objectname(self, objectname, arg_name): # pylint: disable=invalid-name, """ Determine the namespace from an object name, that can be a class name string, a CIMClassName or CIMInstanceName object, or `None`. The default namespace of the connection object is used, if needed. Return the so determined namespace for use as an argument to imethodcall(). """ if isinstance(objectname, (CIMClassName, CIMInstanceName)): namespace = objectname.namespace elif isinstance(objectname, six.string_types): namespace = None elif objectname is None: namespace = objectname else: raise TypeError( _format("The {0!A} argument of the WBEMConnection operation " "has invalid type {1} (must be None, a string, a " "CIMClassName, or a CIMInstanceName)", arg_name, type(objectname))) if namespace is None: namespace = self.default_namespace return namespace
[ "def", "_iparam_namespace_from_objectname", "(", "self", ",", "objectname", ",", "arg_name", ")", ":", "# pylint: disable=invalid-name,", "if", "isinstance", "(", "objectname", ",", "(", "CIMClassName", ",", "CIMInstanceName", ")", ")", ":", "namespace", "=", "objectname", ".", "namespace", "elif", "isinstance", "(", "objectname", ",", "six", ".", "string_types", ")", ":", "namespace", "=", "None", "elif", "objectname", "is", "None", ":", "namespace", "=", "objectname", "else", ":", "raise", "TypeError", "(", "_format", "(", "\"The {0!A} argument of the WBEMConnection operation \"", "\"has invalid type {1} (must be None, a string, a \"", "\"CIMClassName, or a CIMInstanceName)\"", ",", "arg_name", ",", "type", "(", "objectname", ")", ")", ")", "if", "namespace", "is", "None", ":", "namespace", "=", "self", ".", "default_namespace", "return", "namespace" ]
Determine the namespace from an object name, that can be a class name string, a CIMClassName or CIMInstanceName object, or `None`. The default namespace of the connection object is used, if needed. Return the so determined namespace for use as an argument to imethodcall().
[ "Determine", "the", "namespace", "from", "an", "object", "name", "that", "can", "be", "a", "class", "name", "string", "a", "CIMClassName", "or", "CIMInstanceName", "object", "or", "None", ".", "The", "default", "namespace", "of", "the", "connection", "object", "is", "used", "if", "needed", "." ]
python
train
gwpy/gwpy
gwpy/io/mp.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/mp.py#L34-L106
def read_multi(flatten, cls, source, *args, **kwargs): """Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ---------- flatten : `callable` a method to take a list of ``cls`` instances, and combine them into a single ``cls`` instance cls : `type` the object type to read source : `str`, `list` of `str`, ... the input data source, can be of in many different forms *args positional arguments to pass to the reader **kwargs keyword arguments to pass to the reader """ verbose = kwargs.pop('verbose', False) # parse input as a list of files try: # try and map to a list of file-like objects files = file_list(source) except ValueError: # otherwise treat as single file files = [source] path = None # to pass to get_read_format() else: path = files[0] if files else None # determine input format (so we don't have to do it multiple times) if kwargs.get('format', None) is None: kwargs['format'] = get_read_format(cls, path, (source,) + args, kwargs) # calculate maximum number of processes nproc = min(kwargs.pop('nproc', 1), len(files)) # define multiprocessing method def _read_single_file(fobj): try: return fobj, io_read(cls, fobj, *args, **kwargs) # pylint: disable=broad-except,redefine-in-handler except Exception as exc: if nproc == 1: raise if isinstance(exc, SAXException): # SAXExceptions don't pickle return fobj, exc.getException() # pylint: disable=no-member return fobj, exc # format verbosity if verbose is True: verbose = 'Reading ({})'.format(kwargs['format']) # read files output = mp_utils.multiprocess_with_queues( nproc, _read_single_file, files, verbose=verbose, unit='files') # raise exceptions (from multiprocessing, single process raises inline) for fobj, exc in output: if isinstance(exc, Exception): exc.args = ('Failed to read %s: %s' % (fobj, str(exc)),) raise exc # return combined object _, out = zip(*output) return flatten(out)
[ "def", "read_multi", "(", "flatten", ",", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "verbose", "=", "kwargs", ".", "pop", "(", "'verbose'", ",", "False", ")", "# parse input as a list of files", "try", ":", "# try and map to a list of file-like objects", "files", "=", "file_list", "(", "source", ")", "except", "ValueError", ":", "# otherwise treat as single file", "files", "=", "[", "source", "]", "path", "=", "None", "# to pass to get_read_format()", "else", ":", "path", "=", "files", "[", "0", "]", "if", "files", "else", "None", "# determine input format (so we don't have to do it multiple times)", "if", "kwargs", ".", "get", "(", "'format'", ",", "None", ")", "is", "None", ":", "kwargs", "[", "'format'", "]", "=", "get_read_format", "(", "cls", ",", "path", ",", "(", "source", ",", ")", "+", "args", ",", "kwargs", ")", "# calculate maximum number of processes", "nproc", "=", "min", "(", "kwargs", ".", "pop", "(", "'nproc'", ",", "1", ")", ",", "len", "(", "files", ")", ")", "# define multiprocessing method", "def", "_read_single_file", "(", "fobj", ")", ":", "try", ":", "return", "fobj", ",", "io_read", "(", "cls", ",", "fobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# pylint: disable=broad-except,redefine-in-handler", "except", "Exception", "as", "exc", ":", "if", "nproc", "==", "1", ":", "raise", "if", "isinstance", "(", "exc", ",", "SAXException", ")", ":", "# SAXExceptions don't pickle", "return", "fobj", ",", "exc", ".", "getException", "(", ")", "# pylint: disable=no-member", "return", "fobj", ",", "exc", "# format verbosity", "if", "verbose", "is", "True", ":", "verbose", "=", "'Reading ({})'", ".", "format", "(", "kwargs", "[", "'format'", "]", ")", "# read files", "output", "=", "mp_utils", ".", "multiprocess_with_queues", "(", "nproc", ",", "_read_single_file", ",", "files", ",", "verbose", "=", "verbose", ",", "unit", "=", "'files'", ")", "# raise exceptions (from multiprocessing, single process raises inline)", "for", "fobj", ",", "exc", "in", "output", ":", "if", "isinstance", "(", "exc", ",", "Exception", ")", ":", "exc", ".", "args", "=", "(", "'Failed to read %s: %s'", "%", "(", "fobj", ",", "str", "(", "exc", ")", ")", ",", ")", "raise", "exc", "# return combined object", "_", ",", "out", "=", "zip", "(", "*", "output", ")", "return", "flatten", "(", "out", ")" ]
Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ---------- flatten : `callable` a method to take a list of ``cls`` instances, and combine them into a single ``cls`` instance cls : `type` the object type to read source : `str`, `list` of `str`, ... the input data source, can be of in many different forms *args positional arguments to pass to the reader **kwargs keyword arguments to pass to the reader
[ "Read", "sources", "into", "a", "cls", "with", "multiprocessing" ]
python
train
nanoporetech/ont_fast5_api
ont_fast5_api/fast5_file.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L706-L714
def _add_group(self, group, attrs): """ :param group: group_name :param attrs: :return: """ self.handle.create_group(group) if attrs is not None: self._add_attributes(group, attrs)
[ "def", "_add_group", "(", "self", ",", "group", ",", "attrs", ")", ":", "self", ".", "handle", ".", "create_group", "(", "group", ")", "if", "attrs", "is", "not", "None", ":", "self", ".", "_add_attributes", "(", "group", ",", "attrs", ")" ]
:param group: group_name :param attrs: :return:
[ ":", "param", "group", ":", "group_name", ":", "param", "attrs", ":", ":", "return", ":" ]
python
train
mosdef-hub/mbuild
mbuild/formats/hoomdxml.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/formats/hoomdxml.py#L311-L329
def _write_rigid_information(xml_file, rigid_bodies): """Write rigid body information. Parameters ---------- xml_file : file object The file object of the hoomdxml file being written rigid_bodies : list, len=n_particles The rigid body that each particle belongs to (-1 for none) """ if not all(body is None for body in rigid_bodies): xml_file.write('<body>\n') for body in rigid_bodies: if body is None: body = -1 xml_file.write('{}\n'.format(int(body))) xml_file.write('</body>\n')
[ "def", "_write_rigid_information", "(", "xml_file", ",", "rigid_bodies", ")", ":", "if", "not", "all", "(", "body", "is", "None", "for", "body", "in", "rigid_bodies", ")", ":", "xml_file", ".", "write", "(", "'<body>\\n'", ")", "for", "body", "in", "rigid_bodies", ":", "if", "body", "is", "None", ":", "body", "=", "-", "1", "xml_file", ".", "write", "(", "'{}\\n'", ".", "format", "(", "int", "(", "body", ")", ")", ")", "xml_file", ".", "write", "(", "'</body>\\n'", ")" ]
Write rigid body information. Parameters ---------- xml_file : file object The file object of the hoomdxml file being written rigid_bodies : list, len=n_particles The rigid body that each particle belongs to (-1 for none)
[ "Write", "rigid", "body", "information", "." ]
python
train
tcalmant/ipopo
pelix/ldapfilter.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ldapfilter.py#L660-L704
def _compute_comparator(string, idx): # type: (str, int) -> Optional[Callable[[Any, Any], bool]] """ Tries to compute the LDAP comparator at the given index Valid operators are : * = : equality * <= : less than * >= : greater than * ~= : approximate :param string: A LDAP filter string :param idx: An index in the given string :return: The corresponding operator, None if unknown """ part1 = string[idx] try: part2 = string[idx + 1] except IndexError: # String is too short (no comparison) return None if part1 == "=": # Equality return _comparator_eq elif part2 != "=": # It's a "strict" operator if part1 == "<": # Strictly lesser return _comparator_lt elif part1 == ">": # Strictly greater return _comparator_gt else: if part1 == "<": # Less or equal return _comparator_le elif part1 == ">": # Greater or equal return _comparator_ge elif part1 == "~": # Approximate equality return _comparator_approximate return None
[ "def", "_compute_comparator", "(", "string", ",", "idx", ")", ":", "# type: (str, int) -> Optional[Callable[[Any, Any], bool]]", "part1", "=", "string", "[", "idx", "]", "try", ":", "part2", "=", "string", "[", "idx", "+", "1", "]", "except", "IndexError", ":", "# String is too short (no comparison)", "return", "None", "if", "part1", "==", "\"=\"", ":", "# Equality", "return", "_comparator_eq", "elif", "part2", "!=", "\"=\"", ":", "# It's a \"strict\" operator", "if", "part1", "==", "\"<\"", ":", "# Strictly lesser", "return", "_comparator_lt", "elif", "part1", "==", "\">\"", ":", "# Strictly greater", "return", "_comparator_gt", "else", ":", "if", "part1", "==", "\"<\"", ":", "# Less or equal", "return", "_comparator_le", "elif", "part1", "==", "\">\"", ":", "# Greater or equal", "return", "_comparator_ge", "elif", "part1", "==", "\"~\"", ":", "# Approximate equality", "return", "_comparator_approximate", "return", "None" ]
Tries to compute the LDAP comparator at the given index Valid operators are : * = : equality * <= : less than * >= : greater than * ~= : approximate :param string: A LDAP filter string :param idx: An index in the given string :return: The corresponding operator, None if unknown
[ "Tries", "to", "compute", "the", "LDAP", "comparator", "at", "the", "given", "index" ]
python
train
has2k1/plydata
plydata/types.py
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/types.py#L49-L61
def group_indices(self): """ Return group indices """ # No groups if not self.plydata_groups: return np.ones(len(self), dtype=int) grouper = self.groupby() indices = np.empty(len(self), dtype=int) for i, (_, idx) in enumerate(sorted(grouper.indices.items())): indices[idx] = i return indices
[ "def", "group_indices", "(", "self", ")", ":", "# No groups", "if", "not", "self", ".", "plydata_groups", ":", "return", "np", ".", "ones", "(", "len", "(", "self", ")", ",", "dtype", "=", "int", ")", "grouper", "=", "self", ".", "groupby", "(", ")", "indices", "=", "np", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", "=", "int", ")", "for", "i", ",", "(", "_", ",", "idx", ")", "in", "enumerate", "(", "sorted", "(", "grouper", ".", "indices", ".", "items", "(", ")", ")", ")", ":", "indices", "[", "idx", "]", "=", "i", "return", "indices" ]
Return group indices
[ "Return", "group", "indices" ]
python
train
pandas-dev/pandas
pandas/core/sparse/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L951-L960
def to_manager(sdf, columns, index): """ create and return the block manager from a dataframe of series, columns, index """ # from BlockManager perspective axes = [ensure_index(columns), ensure_index(index)] return create_block_manager_from_arrays( [sdf[c] for c in columns], columns, axes)
[ "def", "to_manager", "(", "sdf", ",", "columns", ",", "index", ")", ":", "# from BlockManager perspective", "axes", "=", "[", "ensure_index", "(", "columns", ")", ",", "ensure_index", "(", "index", ")", "]", "return", "create_block_manager_from_arrays", "(", "[", "sdf", "[", "c", "]", "for", "c", "in", "columns", "]", ",", "columns", ",", "axes", ")" ]
create and return the block manager from a dataframe of series, columns, index
[ "create", "and", "return", "the", "block", "manager", "from", "a", "dataframe", "of", "series", "columns", "index" ]
python
train
saltstack/salt
salt/modules/win_lgpo.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_lgpo.py#L4553-L4568
def _driver_signing_reg_reverse_conversion(cls, val, **kwargs): ''' converts the string value seen in the GUI to the correct registry value for secedit ''' if val is not None: if val.upper() == 'SILENTLY SUCCEED': return ','.join(['3', '0']) elif val.upper() == 'WARN BUT ALLOW INSTALLATION': return ','.join(['3', chr(1)]) elif val.upper() == 'DO NOT ALLOW INSTALLATION': return ','.join(['3', chr(2)]) else: return 'Invalid Value' else: return 'Not Defined'
[ "def", "_driver_signing_reg_reverse_conversion", "(", "cls", ",", "val", ",", "*", "*", "kwargs", ")", ":", "if", "val", "is", "not", "None", ":", "if", "val", ".", "upper", "(", ")", "==", "'SILENTLY SUCCEED'", ":", "return", "','", ".", "join", "(", "[", "'3'", ",", "'0'", "]", ")", "elif", "val", ".", "upper", "(", ")", "==", "'WARN BUT ALLOW INSTALLATION'", ":", "return", "','", ".", "join", "(", "[", "'3'", ",", "chr", "(", "1", ")", "]", ")", "elif", "val", ".", "upper", "(", ")", "==", "'DO NOT ALLOW INSTALLATION'", ":", "return", "','", ".", "join", "(", "[", "'3'", ",", "chr", "(", "2", ")", "]", ")", "else", ":", "return", "'Invalid Value'", "else", ":", "return", "'Not Defined'" ]
converts the string value seen in the GUI to the correct registry value for secedit
[ "converts", "the", "string", "value", "seen", "in", "the", "GUI", "to", "the", "correct", "registry", "value", "for", "secedit" ]
python
train
scanny/python-pptx
pptx/parts/image.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/parts/image.py#L161-L179
def from_file(cls, image_file): """ Return a new |Image| object loaded from *image_file*, which can be either a path (string) or a file-like object. """ if is_string(image_file): # treat image_file as a path with open(image_file, 'rb') as f: blob = f.read() filename = os.path.basename(image_file) else: # assume image_file is a file-like object # ---reposition file cursor if it has one--- if callable(getattr(image_file, 'seek')): image_file.seek(0) blob = image_file.read() filename = None return cls.from_blob(blob, filename)
[ "def", "from_file", "(", "cls", ",", "image_file", ")", ":", "if", "is_string", "(", "image_file", ")", ":", "# treat image_file as a path", "with", "open", "(", "image_file", ",", "'rb'", ")", "as", "f", ":", "blob", "=", "f", ".", "read", "(", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "image_file", ")", "else", ":", "# assume image_file is a file-like object", "# ---reposition file cursor if it has one---", "if", "callable", "(", "getattr", "(", "image_file", ",", "'seek'", ")", ")", ":", "image_file", ".", "seek", "(", "0", ")", "blob", "=", "image_file", ".", "read", "(", ")", "filename", "=", "None", "return", "cls", ".", "from_blob", "(", "blob", ",", "filename", ")" ]
Return a new |Image| object loaded from *image_file*, which can be either a path (string) or a file-like object.
[ "Return", "a", "new", "|Image|", "object", "loaded", "from", "*", "image_file", "*", "which", "can", "be", "either", "a", "path", "(", "string", ")", "or", "a", "file", "-", "like", "object", "." ]
python
train
TrafficSenseMSD/SumoTools
sumolib/net/lane.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/sumolib/net/lane.py#L70-L78
def addJunctionPos(shape, fromPos, toPos): """Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality""" result = list(shape) if fromPos != shape[0]: result = [fromPos] + result if toPos != shape[-1]: result.append(toPos) return result
[ "def", "addJunctionPos", "(", "shape", ",", "fromPos", ",", "toPos", ")", ":", "result", "=", "list", "(", "shape", ")", "if", "fromPos", "!=", "shape", "[", "0", "]", ":", "result", "=", "[", "fromPos", "]", "+", "result", "if", "toPos", "!=", "shape", "[", "-", "1", "]", ":", "result", ".", "append", "(", "toPos", ")", "return", "result" ]
Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality
[ "Extends", "shape", "with", "the", "given", "positions", "in", "case", "they", "differ", "from", "the", "existing", "endpoints", ".", "assumes", "that", "shape", "and", "positions", "have", "the", "same", "dimensionality" ]
python
train
phoebe-project/phoebe2
phoebe/parameters/constraint.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/constraint.py#L798-L857
def comp_sma(b, component, solve_for=None, **kwargs): """ Create a constraint for the star's semi-major axes WITHIN its parent orbit. This is NOT the same as the semi-major axes OF the parent orbit If 'sma' does not exist in the component, it will be created :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str component: the label of the star in which this constraint should be built :parameter str solve_for: if 'sma@star' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 'sma@orbit', 'q') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function) """ hier = b.get_hierarchy() if not len(hier.get_value()): # TODO: change to custom error type to catch in bundle.add_component # TODO: check whether the problem is 0 hierarchies or more than 1 raise NotImplementedError("constraint for comp_sma requires hierarchy") component_ps = _get_system_ps(b, component) parentorbit = hier.get_parent_of(component) parentorbit_ps = _get_system_ps(b, parentorbit) metawargs = component_ps.meta metawargs.pop('qualifier') compsma_def = FloatParameter(qualifier='sma', value=4.0, default_unit=u.solRad, description='Semi major axis of the component in the orbit') compsma, created = b.get_or_create('sma', compsma_def, **metawargs) metawargs = parentorbit_ps.meta metawargs.pop('qualifier') sma = b.get_parameter(qualifier='sma', **metawargs) q = b.get_parameter(qualifier='q', **metawargs) # NOTE: similar logic is also in dynamics.keplerian.dynamics_from_bundle to # handle nested hierarchical orbits. If changing any of the logic here, # it should be changed there as well. if hier.get_primary_or_secondary(component) == 'primary': qthing = (1. + 1./q) else: qthing = (1. + q) if solve_for in [None, compsma]: lhs = compsma rhs = sma / qthing elif solve_for == sma: lhs = sma rhs = compsma * qthing else: raise NotImplementedError return lhs, rhs, {'component': component}
[ "def", "comp_sma", "(", "b", ",", "component", ",", "solve_for", "=", "None", ",", "*", "*", "kwargs", ")", ":", "hier", "=", "b", ".", "get_hierarchy", "(", ")", "if", "not", "len", "(", "hier", ".", "get_value", "(", ")", ")", ":", "# TODO: change to custom error type to catch in bundle.add_component", "# TODO: check whether the problem is 0 hierarchies or more than 1", "raise", "NotImplementedError", "(", "\"constraint for comp_sma requires hierarchy\"", ")", "component_ps", "=", "_get_system_ps", "(", "b", ",", "component", ")", "parentorbit", "=", "hier", ".", "get_parent_of", "(", "component", ")", "parentorbit_ps", "=", "_get_system_ps", "(", "b", ",", "parentorbit", ")", "metawargs", "=", "component_ps", ".", "meta", "metawargs", ".", "pop", "(", "'qualifier'", ")", "compsma_def", "=", "FloatParameter", "(", "qualifier", "=", "'sma'", ",", "value", "=", "4.0", ",", "default_unit", "=", "u", ".", "solRad", ",", "description", "=", "'Semi major axis of the component in the orbit'", ")", "compsma", ",", "created", "=", "b", ".", "get_or_create", "(", "'sma'", ",", "compsma_def", ",", "*", "*", "metawargs", ")", "metawargs", "=", "parentorbit_ps", ".", "meta", "metawargs", ".", "pop", "(", "'qualifier'", ")", "sma", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'sma'", ",", "*", "*", "metawargs", ")", "q", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'q'", ",", "*", "*", "metawargs", ")", "# NOTE: similar logic is also in dynamics.keplerian.dynamics_from_bundle to", "# handle nested hierarchical orbits. If changing any of the logic here,", "# it should be changed there as well.", "if", "hier", ".", "get_primary_or_secondary", "(", "component", ")", "==", "'primary'", ":", "qthing", "=", "(", "1.", "+", "1.", "/", "q", ")", "else", ":", "qthing", "=", "(", "1.", "+", "q", ")", "if", "solve_for", "in", "[", "None", ",", "compsma", "]", ":", "lhs", "=", "compsma", "rhs", "=", "sma", "/", "qthing", "elif", "solve_for", "==", "sma", ":", "lhs", "=", "sma", "rhs", "=", "compsma", "*", "qthing", "else", ":", "raise", "NotImplementedError", "return", "lhs", ",", "rhs", ",", "{", "'component'", ":", "component", "}" ]
Create a constraint for the star's semi-major axes WITHIN its parent orbit. This is NOT the same as the semi-major axes OF the parent orbit If 'sma' does not exist in the component, it will be created :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str component: the label of the star in which this constraint should be built :parameter str solve_for: if 'sma@star' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 'sma@orbit', 'q') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function)
[ "Create", "a", "constraint", "for", "the", "star", "s", "semi", "-", "major", "axes", "WITHIN", "its", "parent", "orbit", ".", "This", "is", "NOT", "the", "same", "as", "the", "semi", "-", "major", "axes", "OF", "the", "parent", "orbit" ]
python
train
wummel/dosage
dosagelib/director.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/director.py#L183-L189
def finish(): """Print warning about interrupt and empty the job queue.""" out.warn("Interrupted!") for t in threads: t.stop() jobs.clear() out.warn("Waiting for download threads to finish.")
[ "def", "finish", "(", ")", ":", "out", ".", "warn", "(", "\"Interrupted!\"", ")", "for", "t", "in", "threads", ":", "t", ".", "stop", "(", ")", "jobs", ".", "clear", "(", ")", "out", ".", "warn", "(", "\"Waiting for download threads to finish.\"", ")" ]
Print warning about interrupt and empty the job queue.
[ "Print", "warning", "about", "interrupt", "and", "empty", "the", "job", "queue", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/genotype_phenotype.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/genotype_phenotype.py#L208-L224
def _formatOntologyTermObject(self, terms, element_type): """ Formats the ontology term object for query """ elementClause = None if not isinstance(terms, collections.Iterable): terms = [terms] elements = [] for term in terms: if term.term_id: elements.append('?{} = <{}> '.format( element_type, term.term_id)) else: elements.append('?{} = <{}> '.format( element_type, self._toNamespaceURL(term.term))) elementClause = "({})".format(" || ".join(elements)) return elementClause
[ "def", "_formatOntologyTermObject", "(", "self", ",", "terms", ",", "element_type", ")", ":", "elementClause", "=", "None", "if", "not", "isinstance", "(", "terms", ",", "collections", ".", "Iterable", ")", ":", "terms", "=", "[", "terms", "]", "elements", "=", "[", "]", "for", "term", "in", "terms", ":", "if", "term", ".", "term_id", ":", "elements", ".", "append", "(", "'?{} = <{}> '", ".", "format", "(", "element_type", ",", "term", ".", "term_id", ")", ")", "else", ":", "elements", ".", "append", "(", "'?{} = <{}> '", ".", "format", "(", "element_type", ",", "self", ".", "_toNamespaceURL", "(", "term", ".", "term", ")", ")", ")", "elementClause", "=", "\"({})\"", ".", "format", "(", "\" || \"", ".", "join", "(", "elements", ")", ")", "return", "elementClause" ]
Formats the ontology term object for query
[ "Formats", "the", "ontology", "term", "object", "for", "query" ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L1120-L1133
def get_all_items_of_invoice(self, invoice_id): """ Get all items of invoice This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param invoice_id: the invoice id :return: list """ return self._iterate_through_pages( get_function=self.get_items_of_invoice_per_page, resource=INVOICE_ITEMS, **{'invoice_id': invoice_id} )
[ "def", "get_all_items_of_invoice", "(", "self", ",", "invoice_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_items_of_invoice_per_page", ",", "resource", "=", "INVOICE_ITEMS", ",", "*", "*", "{", "'invoice_id'", ":", "invoice_id", "}", ")" ]
Get all items of invoice This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param invoice_id: the invoice id :return: list
[ "Get", "all", "items", "of", "invoice", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
python
train
deepmind/pysc2
pysc2/env/lan_sc2_env.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/env/lan_sc2_env.py#L142-L154
def read_tcp_size(conn, size): """Read `size` number of bytes from `conn`, retrying as needed.""" chunks = [] bytes_read = 0 while bytes_read < size: chunk = conn.recv(size - bytes_read) if not chunk: if bytes_read > 0: logging.warning("Incomplete read: %s of %s.", bytes_read, size) return chunks.append(chunk) bytes_read += len(chunk) return b"".join(chunks)
[ "def", "read_tcp_size", "(", "conn", ",", "size", ")", ":", "chunks", "=", "[", "]", "bytes_read", "=", "0", "while", "bytes_read", "<", "size", ":", "chunk", "=", "conn", ".", "recv", "(", "size", "-", "bytes_read", ")", "if", "not", "chunk", ":", "if", "bytes_read", ">", "0", ":", "logging", ".", "warning", "(", "\"Incomplete read: %s of %s.\"", ",", "bytes_read", ",", "size", ")", "return", "chunks", ".", "append", "(", "chunk", ")", "bytes_read", "+=", "len", "(", "chunk", ")", "return", "b\"\"", ".", "join", "(", "chunks", ")" ]
Read `size` number of bytes from `conn`, retrying as needed.
[ "Read", "size", "number", "of", "bytes", "from", "conn", "retrying", "as", "needed", "." ]
python
train
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L313-L328
def retrieve(self, id) : """ Retrieve a single deal Returns a single deal available to the user, according to the unique deal ID provided If the specified deal does not exist, the request will return an error :calls: ``get /deals/{id}`` :param int id: Unique identifier of a Deal. :return: Dictionary that support attriubte-style access and represent Deal resource. :rtype: dict """ _, _, deal = self.http_client.get("/deals/{id}".format(id=id)) deal["value"] = Coercion.to_decimal(deal["value"]) return deal
[ "def", "retrieve", "(", "self", ",", "id", ")", ":", "_", ",", "_", ",", "deal", "=", "self", ".", "http_client", ".", "get", "(", "\"/deals/{id}\"", ".", "format", "(", "id", "=", "id", ")", ")", "deal", "[", "\"value\"", "]", "=", "Coercion", ".", "to_decimal", "(", "deal", "[", "\"value\"", "]", ")", "return", "deal" ]
Retrieve a single deal Returns a single deal available to the user, according to the unique deal ID provided If the specified deal does not exist, the request will return an error :calls: ``get /deals/{id}`` :param int id: Unique identifier of a Deal. :return: Dictionary that support attriubte-style access and represent Deal resource. :rtype: dict
[ "Retrieve", "a", "single", "deal" ]
python
train
saltstack/salt
salt/modules/keystoneng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystoneng.py#L680-L692
def endpoint_list(auth=None, **kwargs): ''' List endpoints CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_list ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_endpoints(**kwargs)
[ "def", "endpoint_list", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "*", "*", "kwargs", ")", "return", "cloud", ".", "list_endpoints", "(", "*", "*", "kwargs", ")" ]
List endpoints CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_list
[ "List", "endpoints" ]
python
train
e7dal/bubble3
bubble3/util/cli_misc.py
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/bubble3/util/cli_misc.py#L217-L264
def make_uniq_for_step(ctx, ukeys, step, stage, full_data, clean_missing_after_seconds, to_uniq): """initially just a copy from UNIQ_PULL""" # TODO: # this still seems to work ok for Storage types json/bubble, # for DS we need to reload de dumped step to uniqify if not ukeys: return to_uniq else: uniq_data = bubble_lod_load(ctx, step, stage) ctx.say('Creating uniq identifiers for [' + step + '] information', 0) ctx.gbc.say('uniq_data:', stuff=uniq_data, verbosity=1000) # TODO:make: data->keyed.items uniq_step_res = make_uniq(ctx=ctx, ldict=to_uniq, keyed=uniq_data, uniqstr=ukeys, tag=step, full_data=full_data, remove_missing_after_seconds=clean_missing_after_seconds) ctx.gbc.say('uniq_step_res:', stuff=uniq_step_res, verbosity=1000) to_uniq_newest = get_newest_uniq(ctx.gbc, uniq_step_res) # TODO: selected pulled only from slice of uniq # PROBLEM: slice of pull is not equal to slice of newest uniq, # can only select keys from newest, from slice of pulled # need a uid list from to_transform # to_transform = get_gen_slice(gbc, to_transform_newest, amount, index) # for now not a big problem, as with 'pump' there should be no problem to_uniq = to_uniq_newest # todo make keyed.items->data uniq_res_list = get_uniq_list(ctx.gbc, uniq_step_res) reset = True pfr = bubble_lod_dump(ctx=ctx, step=step, stage=stage, full_data=full_data, reset=reset, data_gen=uniq_res_list) ctx.gbc.say('saved uniq ' + step + ' data res:', stuff=pfr, verbosity=700) return to_uniq
[ "def", "make_uniq_for_step", "(", "ctx", ",", "ukeys", ",", "step", ",", "stage", ",", "full_data", ",", "clean_missing_after_seconds", ",", "to_uniq", ")", ":", "# TODO:", "# this still seems to work ok for Storage types json/bubble,", "# for DS we need to reload de dumped step to uniqify", "if", "not", "ukeys", ":", "return", "to_uniq", "else", ":", "uniq_data", "=", "bubble_lod_load", "(", "ctx", ",", "step", ",", "stage", ")", "ctx", ".", "say", "(", "'Creating uniq identifiers for ['", "+", "step", "+", "'] information'", ",", "0", ")", "ctx", ".", "gbc", ".", "say", "(", "'uniq_data:'", ",", "stuff", "=", "uniq_data", ",", "verbosity", "=", "1000", ")", "# TODO:make: data->keyed.items", "uniq_step_res", "=", "make_uniq", "(", "ctx", "=", "ctx", ",", "ldict", "=", "to_uniq", ",", "keyed", "=", "uniq_data", ",", "uniqstr", "=", "ukeys", ",", "tag", "=", "step", ",", "full_data", "=", "full_data", ",", "remove_missing_after_seconds", "=", "clean_missing_after_seconds", ")", "ctx", ".", "gbc", ".", "say", "(", "'uniq_step_res:'", ",", "stuff", "=", "uniq_step_res", ",", "verbosity", "=", "1000", ")", "to_uniq_newest", "=", "get_newest_uniq", "(", "ctx", ".", "gbc", ",", "uniq_step_res", ")", "# TODO: selected pulled only from slice of uniq", "# PROBLEM: slice of pull is not equal to slice of newest uniq,", "# can only select keys from newest, from slice of pulled", "# need a uid list from to_transform", "# to_transform = get_gen_slice(gbc, to_transform_newest, amount, index)", "# for now not a big problem, as with 'pump' there should be no problem", "to_uniq", "=", "to_uniq_newest", "# todo make keyed.items->data", "uniq_res_list", "=", "get_uniq_list", "(", "ctx", ".", "gbc", ",", "uniq_step_res", ")", "reset", "=", "True", "pfr", "=", "bubble_lod_dump", "(", "ctx", "=", "ctx", ",", "step", "=", "step", ",", "stage", "=", "stage", ",", "full_data", "=", "full_data", ",", "reset", "=", "reset", ",", "data_gen", "=", "uniq_res_list", ")", "ctx", ".", "gbc", ".", "say", "(", "'saved uniq '", "+", "step", "+", "' data res:'", ",", "stuff", "=", "pfr", ",", "verbosity", "=", "700", ")", "return", "to_uniq" ]
initially just a copy from UNIQ_PULL
[ "initially", "just", "a", "copy", "from", "UNIQ_PULL" ]
python
train
jazzband/django-ddp
dddp/api.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/api.py#L54-L103
def api_endpoint(path_or_func=None, decorate=True): """ Decorator to mark a method as an API endpoint for later registration. Args: path_or_func: either the function to be decorated or its API path. decorate (bool): Apply API_ENDPOINT_DECORATORS if True (default). Returns: Callable: Decorated function (with optionally applied decorators). Examples: >>> from dddp.api import APIMixin, api_endpoint >>> class Counter(APIMixin): ... value = 0 ... ... # default API path matches function name 'increment'. ... @api_endpoint ... def increment(self, amount): ... '''Increment counter value by `amount`.''' ... self.value += amount ... return self.value ... ... # excplicitly set API path to 'Decrement'. ... @api_endpoint('Decrement') ... def decrement(self, amount): ... '''Decrement counter value by `amount`.''' ... self.value -= amount ... return self.value """ def maybe_decorated(func): """Apply API_ENDPOINT_DECORATORS to func.""" if decorate: for decorator in API_ENDPOINT_DECORATORS: func = decorator()(func) return func if callable(path_or_func): path_or_func.api_path = path_or_func.__name__ return maybe_decorated(path_or_func) else: def _api_endpoint(func): """Decorator inner.""" if path_or_func is None: func.api_path = func.__name__ else: func.api_path = path_or_func return maybe_decorated(func) return _api_endpoint
[ "def", "api_endpoint", "(", "path_or_func", "=", "None", ",", "decorate", "=", "True", ")", ":", "def", "maybe_decorated", "(", "func", ")", ":", "\"\"\"Apply API_ENDPOINT_DECORATORS to func.\"\"\"", "if", "decorate", ":", "for", "decorator", "in", "API_ENDPOINT_DECORATORS", ":", "func", "=", "decorator", "(", ")", "(", "func", ")", "return", "func", "if", "callable", "(", "path_or_func", ")", ":", "path_or_func", ".", "api_path", "=", "path_or_func", ".", "__name__", "return", "maybe_decorated", "(", "path_or_func", ")", "else", ":", "def", "_api_endpoint", "(", "func", ")", ":", "\"\"\"Decorator inner.\"\"\"", "if", "path_or_func", "is", "None", ":", "func", ".", "api_path", "=", "func", ".", "__name__", "else", ":", "func", ".", "api_path", "=", "path_or_func", "return", "maybe_decorated", "(", "func", ")", "return", "_api_endpoint" ]
Decorator to mark a method as an API endpoint for later registration. Args: path_or_func: either the function to be decorated or its API path. decorate (bool): Apply API_ENDPOINT_DECORATORS if True (default). Returns: Callable: Decorated function (with optionally applied decorators). Examples: >>> from dddp.api import APIMixin, api_endpoint >>> class Counter(APIMixin): ... value = 0 ... ... # default API path matches function name 'increment'. ... @api_endpoint ... def increment(self, amount): ... '''Increment counter value by `amount`.''' ... self.value += amount ... return self.value ... ... # excplicitly set API path to 'Decrement'. ... @api_endpoint('Decrement') ... def decrement(self, amount): ... '''Decrement counter value by `amount`.''' ... self.value -= amount ... return self.value
[ "Decorator", "to", "mark", "a", "method", "as", "an", "API", "endpoint", "for", "later", "registration", "." ]
python
test
LEMS/pylems
lems/sim/runnable.py
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/sim/runnable.py#L597-L793
def copy(self): """ Make a copy of this runnable. @return: Copy of this runnable. @rtype: lems.sim.runnable.Runnable """ if self.debug: print("Coping....."+self.id) r = Runnable(self.id, self.component, self.parent) copies = dict() # Copy simulation time parameters r.time_step = self.time_step r.time_completed = self.time_completed r.time_total = self.time_total # Plasticity and state stack (?) r.plastic = self.plastic r.state_stack = Stack() # Copy variables (GG - Faster using the add_* methods?) for v in self.instance_variables: r.instance_variables.append(v) r.__dict__[v] = self.__dict__[v] r.__dict__[v + '_shadow'] = self.__dict__[v + '_shadow'] for v in self.derived_variables: r.derived_variables.append(v) r.__dict__[v] = self.__dict__[v] r.__dict__[v + '_shadow'] = self.__dict__[v + '_shadow'] # Copy array elements for child in self.array: child_copy = child.copy() child_copy.parent = r r.array.append(child_copy) copies[child.uid] = child_copy # Copy attachment def for att in self.attachments: atn = self.attachments[att] r.attachments[att] = atn r.__dict__[atn] = [] # Copy children for uid in self.uchildren: child = self.uchildren[uid] child_copy = child.copy() child_copy.parent = r copies[child.uid] = child_copy r.add_child(child_copy.id, child_copy) # For typerefs try: idx = [k for k in self.__dict__ if self.__dict__[k] == child][0] r.__dict__[idx] = child_copy except: pass # For groups and attachments: try: idx = [k for k in self.__dict__ if child in self.__dict__[k]][0] if idx not in r.__dict__: r.__dict__[idx] = [] r.__dict__[idx].append(child_copy) except: pass # Copy event ports for port in self.event_in_ports: r.event_in_ports.append(port) r.event_in_counters[port] = 0 for port in self.event_out_ports: r.event_out_ports.append(port) r.event_out_callbacks[port] = self.event_out_callbacks[port] for ec in r.component.structure.event_connections: if self.debug: print("--- Fixing event_connection: %s in %s"%(ec.toxml(), id(r))) source = r.parent.resolve_path(ec.from_) target = r.parent.resolve_path(ec.to) if ec.receiver: # Will throw error... receiver_template = self.build_runnable(ec.receiver, target) #receiver = copy.deepcopy(receiver_template) receiver = receiver_template.copy() receiver.id = "{0}__{1}__".format(component.id, receiver_template.id) if ec.receiver_container: target.add_attachment(receiver, ec.receiver_container) target.add_child(receiver_template.id, receiver) target = receiver else: source = r.resolve_path(ec.from_) target = r.resolve_path(ec.to) source_port = ec.source_port target_port = ec.target_port if not source_port: if len(source.event_out_ports) == 1: source_port = source.event_out_ports[0] else: raise SimBuildError(("No source event port " "uniquely identifiable" " in '{0}'").format(source.id)) if not target_port: if len(target.event_in_ports) == 1: target_port = target.event_in_ports[0] else: raise SimBuildError(("No destination event port " "uniquely identifiable " "in '{0}'").format(target)) if self.debug: print("register_event_out_callback\n Source: %s, %s (port: %s) \n -> %s, %s (port: %s)"%(source, id(source), source_port, target, id(target), target_port)) source.register_event_out_callback(\ source_port, lambda: target.inc_event_in(target_port)) # Copy methods if getattr(self, "update_kinetic_scheme", None): r.update_kinetic_scheme = self.update_kinetic_scheme if getattr(self, "run_startup_event_handlers", None): r.run_startup_event_handlers = self.run_startup_event_handlers if getattr(self, "run_preprocessing_event_handlers", None): r.run_preprocessing_event_handlers = self.run_preprocessing_event_handlers if getattr(self, "run_postprocessing_event_handlers", None): r.run_postprocessing_event_handlers = self.run_postprocessing_event_handlers if getattr(self, "update_state_variables", None): r.update_state_variables = self.update_state_variables if getattr(self, "update_derived_variables", None): r.update_derived_variables = self.update_derived_variables #r.update_shadow_variables = self.update_shadow_variables if getattr(self, "update_derived_parameters", None): r.update_derived_parameters = self.update_derived_parameters for rn in self.regimes: r.add_regime(self.regimes[rn]) r.current_regime = self.current_regime # Copy groups for gn in self.groups: g = self.__dict__[gn] for c in g: if c.uid in copies: r.add_child_to_group(gn, copies[c.uid]) else: c2 = c.copy() c2.parent = r copies[c.uid] = c2 r.add_child_to_group(gn, c2) # Copy remaining runnable references. for k in self.__dict__: if k == 'parent': continue c = self.__dict__[k] if isinstance(c, Runnable): if c.uid in copies: r.__dict__[k] = copies[c.uid] else: c2 = c.copy() c2.parent = r copies[c.uid] = c2 r.__dict__[k] = c2 # Copy text fields for k in self.__dict__: if not k in r.__dict__: c = self.__dict__[k] if self.debug: print("Adding remaining field: %s = %s"%(k,c)) r.__dict__[k] = c if self.debug: print('########################################') keys = list(self.__dict__.keys()) keys.sort() print(len(keys)) for k in keys: print(k, self.__dict__[k]) print('----------------------------------------') keys = list(r.__dict__.keys()) keys.sort() print(len(keys)) for k in keys: print(k, r.__dict__[k]) print('########################################') print('') print('') print('') print('') if self.debug: print("Finished coping..."+self.id) return r
[ "def", "copy", "(", "self", ")", ":", "if", "self", ".", "debug", ":", "print", "(", "\"Coping.....\"", "+", "self", ".", "id", ")", "r", "=", "Runnable", "(", "self", ".", "id", ",", "self", ".", "component", ",", "self", ".", "parent", ")", "copies", "=", "dict", "(", ")", "# Copy simulation time parameters", "r", ".", "time_step", "=", "self", ".", "time_step", "r", ".", "time_completed", "=", "self", ".", "time_completed", "r", ".", "time_total", "=", "self", ".", "time_total", "# Plasticity and state stack (?)", "r", ".", "plastic", "=", "self", ".", "plastic", "r", ".", "state_stack", "=", "Stack", "(", ")", "# Copy variables (GG - Faster using the add_* methods?)", "for", "v", "in", "self", ".", "instance_variables", ":", "r", ".", "instance_variables", ".", "append", "(", "v", ")", "r", ".", "__dict__", "[", "v", "]", "=", "self", ".", "__dict__", "[", "v", "]", "r", ".", "__dict__", "[", "v", "+", "'_shadow'", "]", "=", "self", ".", "__dict__", "[", "v", "+", "'_shadow'", "]", "for", "v", "in", "self", ".", "derived_variables", ":", "r", ".", "derived_variables", ".", "append", "(", "v", ")", "r", ".", "__dict__", "[", "v", "]", "=", "self", ".", "__dict__", "[", "v", "]", "r", ".", "__dict__", "[", "v", "+", "'_shadow'", "]", "=", "self", ".", "__dict__", "[", "v", "+", "'_shadow'", "]", "# Copy array elements", "for", "child", "in", "self", ".", "array", ":", "child_copy", "=", "child", ".", "copy", "(", ")", "child_copy", ".", "parent", "=", "r", "r", ".", "array", ".", "append", "(", "child_copy", ")", "copies", "[", "child", ".", "uid", "]", "=", "child_copy", "# Copy attachment def", "for", "att", "in", "self", ".", "attachments", ":", "atn", "=", "self", ".", "attachments", "[", "att", "]", "r", ".", "attachments", "[", "att", "]", "=", "atn", "r", ".", "__dict__", "[", "atn", "]", "=", "[", "]", "# Copy children", "for", "uid", "in", "self", ".", "uchildren", ":", "child", "=", "self", ".", "uchildren", "[", "uid", "]", "child_copy", "=", "child", ".", "copy", "(", ")", "child_copy", ".", "parent", "=", "r", "copies", "[", "child", ".", "uid", "]", "=", "child_copy", "r", ".", "add_child", "(", "child_copy", ".", "id", ",", "child_copy", ")", "# For typerefs", "try", ":", "idx", "=", "[", "k", "for", "k", "in", "self", ".", "__dict__", "if", "self", ".", "__dict__", "[", "k", "]", "==", "child", "]", "[", "0", "]", "r", ".", "__dict__", "[", "idx", "]", "=", "child_copy", "except", ":", "pass", "# For groups and attachments:", "try", ":", "idx", "=", "[", "k", "for", "k", "in", "self", ".", "__dict__", "if", "child", "in", "self", ".", "__dict__", "[", "k", "]", "]", "[", "0", "]", "if", "idx", "not", "in", "r", ".", "__dict__", ":", "r", ".", "__dict__", "[", "idx", "]", "=", "[", "]", "r", ".", "__dict__", "[", "idx", "]", ".", "append", "(", "child_copy", ")", "except", ":", "pass", "# Copy event ports", "for", "port", "in", "self", ".", "event_in_ports", ":", "r", ".", "event_in_ports", ".", "append", "(", "port", ")", "r", ".", "event_in_counters", "[", "port", "]", "=", "0", "for", "port", "in", "self", ".", "event_out_ports", ":", "r", ".", "event_out_ports", ".", "append", "(", "port", ")", "r", ".", "event_out_callbacks", "[", "port", "]", "=", "self", ".", "event_out_callbacks", "[", "port", "]", "for", "ec", "in", "r", ".", "component", ".", "structure", ".", "event_connections", ":", "if", "self", ".", "debug", ":", "print", "(", "\"--- Fixing event_connection: %s in %s\"", "%", "(", "ec", ".", "toxml", "(", ")", ",", "id", "(", "r", ")", ")", ")", "source", "=", "r", ".", "parent", ".", "resolve_path", "(", "ec", ".", "from_", ")", "target", "=", "r", ".", "parent", ".", "resolve_path", "(", "ec", ".", "to", ")", "if", "ec", ".", "receiver", ":", "# Will throw error...", "receiver_template", "=", "self", ".", "build_runnable", "(", "ec", ".", "receiver", ",", "target", ")", "#receiver = copy.deepcopy(receiver_template)", "receiver", "=", "receiver_template", ".", "copy", "(", ")", "receiver", ".", "id", "=", "\"{0}__{1}__\"", ".", "format", "(", "component", ".", "id", ",", "receiver_template", ".", "id", ")", "if", "ec", ".", "receiver_container", ":", "target", ".", "add_attachment", "(", "receiver", ",", "ec", ".", "receiver_container", ")", "target", ".", "add_child", "(", "receiver_template", ".", "id", ",", "receiver", ")", "target", "=", "receiver", "else", ":", "source", "=", "r", ".", "resolve_path", "(", "ec", ".", "from_", ")", "target", "=", "r", ".", "resolve_path", "(", "ec", ".", "to", ")", "source_port", "=", "ec", ".", "source_port", "target_port", "=", "ec", ".", "target_port", "if", "not", "source_port", ":", "if", "len", "(", "source", ".", "event_out_ports", ")", "==", "1", ":", "source_port", "=", "source", ".", "event_out_ports", "[", "0", "]", "else", ":", "raise", "SimBuildError", "(", "(", "\"No source event port \"", "\"uniquely identifiable\"", "\" in '{0}'\"", ")", ".", "format", "(", "source", ".", "id", ")", ")", "if", "not", "target_port", ":", "if", "len", "(", "target", ".", "event_in_ports", ")", "==", "1", ":", "target_port", "=", "target", ".", "event_in_ports", "[", "0", "]", "else", ":", "raise", "SimBuildError", "(", "(", "\"No destination event port \"", "\"uniquely identifiable \"", "\"in '{0}'\"", ")", ".", "format", "(", "target", ")", ")", "if", "self", ".", "debug", ":", "print", "(", "\"register_event_out_callback\\n Source: %s, %s (port: %s) \\n -> %s, %s (port: %s)\"", "%", "(", "source", ",", "id", "(", "source", ")", ",", "source_port", ",", "target", ",", "id", "(", "target", ")", ",", "target_port", ")", ")", "source", ".", "register_event_out_callback", "(", "source_port", ",", "lambda", ":", "target", ".", "inc_event_in", "(", "target_port", ")", ")", "# Copy methods", "if", "getattr", "(", "self", ",", "\"update_kinetic_scheme\"", ",", "None", ")", ":", "r", ".", "update_kinetic_scheme", "=", "self", ".", "update_kinetic_scheme", "if", "getattr", "(", "self", ",", "\"run_startup_event_handlers\"", ",", "None", ")", ":", "r", ".", "run_startup_event_handlers", "=", "self", ".", "run_startup_event_handlers", "if", "getattr", "(", "self", ",", "\"run_preprocessing_event_handlers\"", ",", "None", ")", ":", "r", ".", "run_preprocessing_event_handlers", "=", "self", ".", "run_preprocessing_event_handlers", "if", "getattr", "(", "self", ",", "\"run_postprocessing_event_handlers\"", ",", "None", ")", ":", "r", ".", "run_postprocessing_event_handlers", "=", "self", ".", "run_postprocessing_event_handlers", "if", "getattr", "(", "self", ",", "\"update_state_variables\"", ",", "None", ")", ":", "r", ".", "update_state_variables", "=", "self", ".", "update_state_variables", "if", "getattr", "(", "self", ",", "\"update_derived_variables\"", ",", "None", ")", ":", "r", ".", "update_derived_variables", "=", "self", ".", "update_derived_variables", "#r.update_shadow_variables = self.update_shadow_variables", "if", "getattr", "(", "self", ",", "\"update_derived_parameters\"", ",", "None", ")", ":", "r", ".", "update_derived_parameters", "=", "self", ".", "update_derived_parameters", "for", "rn", "in", "self", ".", "regimes", ":", "r", ".", "add_regime", "(", "self", ".", "regimes", "[", "rn", "]", ")", "r", ".", "current_regime", "=", "self", ".", "current_regime", "# Copy groups", "for", "gn", "in", "self", ".", "groups", ":", "g", "=", "self", ".", "__dict__", "[", "gn", "]", "for", "c", "in", "g", ":", "if", "c", ".", "uid", "in", "copies", ":", "r", ".", "add_child_to_group", "(", "gn", ",", "copies", "[", "c", ".", "uid", "]", ")", "else", ":", "c2", "=", "c", ".", "copy", "(", ")", "c2", ".", "parent", "=", "r", "copies", "[", "c", ".", "uid", "]", "=", "c2", "r", ".", "add_child_to_group", "(", "gn", ",", "c2", ")", "# Copy remaining runnable references.", "for", "k", "in", "self", ".", "__dict__", ":", "if", "k", "==", "'parent'", ":", "continue", "c", "=", "self", ".", "__dict__", "[", "k", "]", "if", "isinstance", "(", "c", ",", "Runnable", ")", ":", "if", "c", ".", "uid", "in", "copies", ":", "r", ".", "__dict__", "[", "k", "]", "=", "copies", "[", "c", ".", "uid", "]", "else", ":", "c2", "=", "c", ".", "copy", "(", ")", "c2", ".", "parent", "=", "r", "copies", "[", "c", ".", "uid", "]", "=", "c2", "r", ".", "__dict__", "[", "k", "]", "=", "c2", "# Copy text fields", "for", "k", "in", "self", ".", "__dict__", ":", "if", "not", "k", "in", "r", ".", "__dict__", ":", "c", "=", "self", ".", "__dict__", "[", "k", "]", "if", "self", ".", "debug", ":", "print", "(", "\"Adding remaining field: %s = %s\"", "%", "(", "k", ",", "c", ")", ")", "r", ".", "__dict__", "[", "k", "]", "=", "c", "if", "self", ".", "debug", ":", "print", "(", "'########################################'", ")", "keys", "=", "list", "(", "self", ".", "__dict__", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "print", "(", "len", "(", "keys", ")", ")", "for", "k", "in", "keys", ":", "print", "(", "k", ",", "self", ".", "__dict__", "[", "k", "]", ")", "print", "(", "'----------------------------------------'", ")", "keys", "=", "list", "(", "r", ".", "__dict__", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "print", "(", "len", "(", "keys", ")", ")", "for", "k", "in", "keys", ":", "print", "(", "k", ",", "r", ".", "__dict__", "[", "k", "]", ")", "print", "(", "'########################################'", ")", "print", "(", "''", ")", "print", "(", "''", ")", "print", "(", "''", ")", "print", "(", "''", ")", "if", "self", ".", "debug", ":", "print", "(", "\"Finished coping...\"", "+", "self", ".", "id", ")", "return", "r" ]
Make a copy of this runnable. @return: Copy of this runnable. @rtype: lems.sim.runnable.Runnable
[ "Make", "a", "copy", "of", "this", "runnable", "." ]
python
train
odlgroup/odl
odl/operator/pspace_ops.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/pspace_ops.py#L230-L283
def _convert_to_spmatrix(operators): """Convert an array-like object of operators to a sparse matrix.""" # Lazy import to improve `import odl` time import scipy.sparse # Convert ops to sparse representation. This is not trivial because # operators can be indexable themselves and give the wrong impression # of an extra dimension. So we have to infer the shape manually # first and extract the indices of nonzero positions. nrows = len(operators) ncols = None irow, icol, data = [], [], [] for i, row in enumerate(operators): try: iter(row) except TypeError: raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, got {!r} (row {} = {!r} is not iterable)' ''.format(operators, i, row)) if isinstance(row, Operator): raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, but row {} is an `Operator` {!r}' ''.format(i, row)) if ncols is None: ncols = len(row) elif len(row) != ncols: raise ValueError( 'all rows in `operators` must have the same length, but ' 'length {} of row {} differs from previous common length ' '{}'.format(len(row), i, ncols)) for j, col in enumerate(row): if col is None or col is 0: pass elif isinstance(col, Operator): irow.append(i) icol.append(j) data.append(col) else: raise ValueError( '`operators` must be a matrix of `Operator` objects, ' '`0` or `None`, got entry {!r} at ({}, {})' ''.format(col, i, j)) # Create object array explicitly, threby avoiding erroneous conversion # in `coo_matrix.__init__` data_arr = np.empty(len(data), dtype=object) data_arr[:] = data return scipy.sparse.coo_matrix((data_arr, (irow, icol)), shape=(nrows, ncols))
[ "def", "_convert_to_spmatrix", "(", "operators", ")", ":", "# Lazy import to improve `import odl` time", "import", "scipy", ".", "sparse", "# Convert ops to sparse representation. This is not trivial because", "# operators can be indexable themselves and give the wrong impression", "# of an extra dimension. So we have to infer the shape manually", "# first and extract the indices of nonzero positions.", "nrows", "=", "len", "(", "operators", ")", "ncols", "=", "None", "irow", ",", "icol", ",", "data", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "i", ",", "row", "in", "enumerate", "(", "operators", ")", ":", "try", ":", "iter", "(", "row", ")", "except", "TypeError", ":", "raise", "ValueError", "(", "'`operators` must be a matrix of `Operator` objects, `0` '", "'or `None`, got {!r} (row {} = {!r} is not iterable)'", "''", ".", "format", "(", "operators", ",", "i", ",", "row", ")", ")", "if", "isinstance", "(", "row", ",", "Operator", ")", ":", "raise", "ValueError", "(", "'`operators` must be a matrix of `Operator` objects, `0` '", "'or `None`, but row {} is an `Operator` {!r}'", "''", ".", "format", "(", "i", ",", "row", ")", ")", "if", "ncols", "is", "None", ":", "ncols", "=", "len", "(", "row", ")", "elif", "len", "(", "row", ")", "!=", "ncols", ":", "raise", "ValueError", "(", "'all rows in `operators` must have the same length, but '", "'length {} of row {} differs from previous common length '", "'{}'", ".", "format", "(", "len", "(", "row", ")", ",", "i", ",", "ncols", ")", ")", "for", "j", ",", "col", "in", "enumerate", "(", "row", ")", ":", "if", "col", "is", "None", "or", "col", "is", "0", ":", "pass", "elif", "isinstance", "(", "col", ",", "Operator", ")", ":", "irow", ".", "append", "(", "i", ")", "icol", ".", "append", "(", "j", ")", "data", ".", "append", "(", "col", ")", "else", ":", "raise", "ValueError", "(", "'`operators` must be a matrix of `Operator` objects, '", "'`0` or `None`, got entry {!r} at ({}, {})'", "''", ".", "format", "(", "col", ",", "i", ",", "j", ")", ")", "# Create object array explicitly, threby avoiding erroneous conversion", "# in `coo_matrix.__init__`", "data_arr", "=", "np", ".", "empty", "(", "len", "(", "data", ")", ",", "dtype", "=", "object", ")", "data_arr", "[", ":", "]", "=", "data", "return", "scipy", ".", "sparse", ".", "coo_matrix", "(", "(", "data_arr", ",", "(", "irow", ",", "icol", ")", ")", ",", "shape", "=", "(", "nrows", ",", "ncols", ")", ")" ]
Convert an array-like object of operators to a sparse matrix.
[ "Convert", "an", "array", "-", "like", "object", "of", "operators", "to", "a", "sparse", "matrix", "." ]
python
train
CivicSpleen/ambry
ambry/orm/partition.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/partition.py#L598-L609
def local_datafile(self): """Return the datafile for this partition, from the build directory, the remote, or the warehouse""" from ambry_sources import MPRowsFile from fs.errors import ResourceNotFoundError from ambry.orm.exc import NotFoundError try: return MPRowsFile(self._bundle.build_fs, self.cache_key) except ResourceNotFoundError: raise NotFoundError( 'Could not locate data file for partition {} (local)'.format(self.identity.fqname))
[ "def", "local_datafile", "(", "self", ")", ":", "from", "ambry_sources", "import", "MPRowsFile", "from", "fs", ".", "errors", "import", "ResourceNotFoundError", "from", "ambry", ".", "orm", ".", "exc", "import", "NotFoundError", "try", ":", "return", "MPRowsFile", "(", "self", ".", "_bundle", ".", "build_fs", ",", "self", ".", "cache_key", ")", "except", "ResourceNotFoundError", ":", "raise", "NotFoundError", "(", "'Could not locate data file for partition {} (local)'", ".", "format", "(", "self", ".", "identity", ".", "fqname", ")", ")" ]
Return the datafile for this partition, from the build directory, the remote, or the warehouse
[ "Return", "the", "datafile", "for", "this", "partition", "from", "the", "build", "directory", "the", "remote", "or", "the", "warehouse" ]
python
train
andycasey/sick
sick/models/base.py
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/base.py#L277-L284
def _format_data(self, data): """ Sort the data in blue wavelengths to red, and ignore any spectra that have entirely non-finite or negative fluxes. """ return [spectrum for spectrum in \ sorted(data if isinstance(data, (list, tuple)) else [data], key=lambda x: x.disp[0]) if np.any(np.isfinite(spectrum.flux))]
[ "def", "_format_data", "(", "self", ",", "data", ")", ":", "return", "[", "spectrum", "for", "spectrum", "in", "sorted", "(", "data", "if", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", "else", "[", "data", "]", ",", "key", "=", "lambda", "x", ":", "x", ".", "disp", "[", "0", "]", ")", "if", "np", ".", "any", "(", "np", ".", "isfinite", "(", "spectrum", ".", "flux", ")", ")", "]" ]
Sort the data in blue wavelengths to red, and ignore any spectra that have entirely non-finite or negative fluxes.
[ "Sort", "the", "data", "in", "blue", "wavelengths", "to", "red", "and", "ignore", "any", "spectra", "that", "have", "entirely", "non", "-", "finite", "or", "negative", "fluxes", "." ]
python
train
bgyori/pykqml
kqml/kqml_list.py
https://github.com/bgyori/pykqml/blob/c18b39868626215deb634567c6bd7c0838e443c0/kqml/kqml_list.py#L44-L72
def get(self, keyword): """Return the element of the list after the given keyword. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). Returns ------- obj : KQMLObject The object corresponding to the keyword parameter Example: kl = KQMLList.from_string('(FAILURE :reason INVALID_PARAMETER)') kl.get('reason') # KQMLToken('INVALID_PARAMETER') """ if not keyword.startswith(':'): keyword = ':' + keyword for i, s in enumerate(self.data): if s.to_string().upper() == keyword.upper(): if i < len(self.data)-1: return self.data[i+1] else: return None return None
[ "def", "get", "(", "self", ",", "keyword", ")", ":", "if", "not", "keyword", ".", "startswith", "(", "':'", ")", ":", "keyword", "=", "':'", "+", "keyword", "for", "i", ",", "s", "in", "enumerate", "(", "self", ".", "data", ")", ":", "if", "s", ".", "to_string", "(", ")", ".", "upper", "(", ")", "==", "keyword", ".", "upper", "(", ")", ":", "if", "i", "<", "len", "(", "self", ".", "data", ")", "-", "1", ":", "return", "self", ".", "data", "[", "i", "+", "1", "]", "else", ":", "return", "None", "return", "None" ]
Return the element of the list after the given keyword. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). Returns ------- obj : KQMLObject The object corresponding to the keyword parameter Example: kl = KQMLList.from_string('(FAILURE :reason INVALID_PARAMETER)') kl.get('reason') # KQMLToken('INVALID_PARAMETER')
[ "Return", "the", "element", "of", "the", "list", "after", "the", "given", "keyword", "." ]
python
train
LionelAuroux/pyrser
pyrser/passes/topython.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/passes/topython.py#L49-L62
def visit_Call(self, node: parsing.Call) -> ast.expr: """Generates python code calling the function. fn(*args) """ return ast.Call( ast.Attribute( ast.Name('self', ast.Load), node.callObject.__name__, ast.Load()), [ast.Str(param) for param in node.params], [], None, None)
[ "def", "visit_Call", "(", "self", ",", "node", ":", "parsing", ".", "Call", ")", "->", "ast", ".", "expr", ":", "return", "ast", ".", "Call", "(", "ast", ".", "Attribute", "(", "ast", ".", "Name", "(", "'self'", ",", "ast", ".", "Load", ")", ",", "node", ".", "callObject", ".", "__name__", ",", "ast", ".", "Load", "(", ")", ")", ",", "[", "ast", ".", "Str", "(", "param", ")", "for", "param", "in", "node", ".", "params", "]", ",", "[", "]", ",", "None", ",", "None", ")" ]
Generates python code calling the function. fn(*args)
[ "Generates", "python", "code", "calling", "the", "function", "." ]
python
test
seung-lab/cloud-volume
cloudvolume/compression.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/compression.py#L9-L34
def decompress(content, encoding, filename='N/A'): """ Decompress file content. Required: content (bytes): a file to be compressed encoding: None (no compression) or 'gzip' Optional: filename (str:default:'N/A'): Used for debugging messages Raises: NotImplementedError if an unsupported codec is specified. compression.EncodeError if the encoder has an issue Return: decompressed content """ try: encoding = (encoding or '').lower() if encoding == '': return content elif encoding == 'gzip': return gunzip(content) except DecompressionError as err: print("Filename: " + str(filename)) raise raise NotImplementedError(str(encoding) + ' is not currently supported. Supported Options: None, gzip')
[ "def", "decompress", "(", "content", ",", "encoding", ",", "filename", "=", "'N/A'", ")", ":", "try", ":", "encoding", "=", "(", "encoding", "or", "''", ")", ".", "lower", "(", ")", "if", "encoding", "==", "''", ":", "return", "content", "elif", "encoding", "==", "'gzip'", ":", "return", "gunzip", "(", "content", ")", "except", "DecompressionError", "as", "err", ":", "print", "(", "\"Filename: \"", "+", "str", "(", "filename", ")", ")", "raise", "raise", "NotImplementedError", "(", "str", "(", "encoding", ")", "+", "' is not currently supported. Supported Options: None, gzip'", ")" ]
Decompress file content. Required: content (bytes): a file to be compressed encoding: None (no compression) or 'gzip' Optional: filename (str:default:'N/A'): Used for debugging messages Raises: NotImplementedError if an unsupported codec is specified. compression.EncodeError if the encoder has an issue Return: decompressed content
[ "Decompress", "file", "content", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L16915-L16952
def insert_usb_device_filter(self, position, filter_p): """Inserts the given USB device to the specified position in the list of filters. Positions are numbered starting from @c 0. If the specified position is equal to or greater than the number of elements in the list, the filter is added at the end of the collection. Duplicates are not allowed, so an attempt to insert a filter already in the list is an error. If USB functionality is not available in the given edition of VirtualBox, this method will set the result code to @c E_NOTIMPL. :py:func:`usb_device_filters` in position of type int Position to insert the filter to. in filter_p of type :class:`IHostUSBDeviceFilter` USB device filter to insert. raises :class:`VBoxErrorInvalidObjectState` USB device filter is not created within this VirtualBox instance. raises :class:`OleErrorInvalidarg` USB device filter already in list. """ if not isinstance(position, baseinteger): raise TypeError("position can only be an instance of type baseinteger") if not isinstance(filter_p, IHostUSBDeviceFilter): raise TypeError("filter_p can only be an instance of type IHostUSBDeviceFilter") self._call("insertUSBDeviceFilter", in_p=[position, filter_p])
[ "def", "insert_usb_device_filter", "(", "self", ",", "position", ",", "filter_p", ")", ":", "if", "not", "isinstance", "(", "position", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"position can only be an instance of type baseinteger\"", ")", "if", "not", "isinstance", "(", "filter_p", ",", "IHostUSBDeviceFilter", ")", ":", "raise", "TypeError", "(", "\"filter_p can only be an instance of type IHostUSBDeviceFilter\"", ")", "self", ".", "_call", "(", "\"insertUSBDeviceFilter\"", ",", "in_p", "=", "[", "position", ",", "filter_p", "]", ")" ]
Inserts the given USB device to the specified position in the list of filters. Positions are numbered starting from @c 0. If the specified position is equal to or greater than the number of elements in the list, the filter is added at the end of the collection. Duplicates are not allowed, so an attempt to insert a filter already in the list is an error. If USB functionality is not available in the given edition of VirtualBox, this method will set the result code to @c E_NOTIMPL. :py:func:`usb_device_filters` in position of type int Position to insert the filter to. in filter_p of type :class:`IHostUSBDeviceFilter` USB device filter to insert. raises :class:`VBoxErrorInvalidObjectState` USB device filter is not created within this VirtualBox instance. raises :class:`OleErrorInvalidarg` USB device filter already in list.
[ "Inserts", "the", "given", "USB", "device", "to", "the", "specified", "position", "in", "the", "list", "of", "filters", ".", "Positions", "are", "numbered", "starting", "from", "@c", "0", ".", "If", "the", "specified", "position", "is", "equal", "to", "or", "greater", "than", "the", "number", "of", "elements", "in", "the", "list", "the", "filter", "is", "added", "at", "the", "end", "of", "the", "collection", ".", "Duplicates", "are", "not", "allowed", "so", "an", "attempt", "to", "insert", "a", "filter", "already", "in", "the", "list", "is", "an", "error", ".", "If", "USB", "functionality", "is", "not", "available", "in", "the", "given", "edition", "of", "VirtualBox", "this", "method", "will", "set", "the", "result", "code", "to", "@c", "E_NOTIMPL", ".", ":", "py", ":", "func", ":", "usb_device_filters" ]
python
train
The-Politico/politico-civic-ap-loader
aploader/management/commands/initialize_election_date.py
https://github.com/The-Politico/politico-civic-ap-loader/blob/4afeebb62da4b8f22da63711e7176bf4527bccfb/aploader/management/commands/initialize_election_date.py#L195-L213
def get_or_create_party(self, row): """ Gets or creates the Party object based on AP code of the row of election data. All parties that aren't Democratic or Republican are aggregable. """ if row["party"] in ["Dem", "GOP"]: aggregable = False else: aggregable = True defaults = {"label": row["party"], "aggregate_candidates": aggregable} party, created = government.Party.objects.get_or_create( ap_code=row["party"], defaults=defaults ) return party
[ "def", "get_or_create_party", "(", "self", ",", "row", ")", ":", "if", "row", "[", "\"party\"", "]", "in", "[", "\"Dem\"", ",", "\"GOP\"", "]", ":", "aggregable", "=", "False", "else", ":", "aggregable", "=", "True", "defaults", "=", "{", "\"label\"", ":", "row", "[", "\"party\"", "]", ",", "\"aggregate_candidates\"", ":", "aggregable", "}", "party", ",", "created", "=", "government", ".", "Party", ".", "objects", ".", "get_or_create", "(", "ap_code", "=", "row", "[", "\"party\"", "]", ",", "defaults", "=", "defaults", ")", "return", "party" ]
Gets or creates the Party object based on AP code of the row of election data. All parties that aren't Democratic or Republican are aggregable.
[ "Gets", "or", "creates", "the", "Party", "object", "based", "on", "AP", "code", "of", "the", "row", "of", "election", "data", "." ]
python
train
saltstack/salt
salt/client/mixins.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/mixins.py#L525-L540
def asynchronous(self, fun, low, user='UNKNOWN', pub=None): ''' Execute the function in a multiprocess and return the event tag to use to watch for the return ''' async_pub = pub if pub is not None else self._gen_async_pub() proc = salt.utils.process.SignalHandlingMultiprocessingProcess( target=self._proc_function, args=(fun, low, user, async_pub['tag'], async_pub['jid'])) with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers proc.start() proc.join() # MUST join, otherwise we leave zombies all over return async_pub
[ "def", "asynchronous", "(", "self", ",", "fun", ",", "low", ",", "user", "=", "'UNKNOWN'", ",", "pub", "=", "None", ")", ":", "async_pub", "=", "pub", "if", "pub", "is", "not", "None", "else", "self", ".", "_gen_async_pub", "(", ")", "proc", "=", "salt", ".", "utils", ".", "process", ".", "SignalHandlingMultiprocessingProcess", "(", "target", "=", "self", ".", "_proc_function", ",", "args", "=", "(", "fun", ",", "low", ",", "user", ",", "async_pub", "[", "'tag'", "]", ",", "async_pub", "[", "'jid'", "]", ")", ")", "with", "salt", ".", "utils", ".", "process", ".", "default_signals", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIGTERM", ")", ":", "# Reset current signals before starting the process in", "# order not to inherit the current signal handlers", "proc", ".", "start", "(", ")", "proc", ".", "join", "(", ")", "# MUST join, otherwise we leave zombies all over", "return", "async_pub" ]
Execute the function in a multiprocess and return the event tag to use to watch for the return
[ "Execute", "the", "function", "in", "a", "multiprocess", "and", "return", "the", "event", "tag", "to", "use", "to", "watch", "for", "the", "return" ]
python
train
avelkoski/FRB
fred/utils/__init__.py
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/utils/__init__.py#L4-L19
def query_params(*frb_fred_params): """ Decorator that pops all accepted parameters from method's kwargs and puts them in the params argument. Modeled after elasticsearch-py client utils strategy. See https://github.com/elastic/elasticsearch-py/blob/3400179153cc13b6ae2c26734337202569bdfd80/elasticsearch/client/utils.py """ def _wrapper(func): @wraps(func) def _wrapped(*args, **kwargs): params = kwargs.pop('params', {}) for p in frb_fred_params: if p in kwargs: params[p] = kwargs.pop(p) return func(*args,params=params,**kwargs) return _wrapped return _wrapper
[ "def", "query_params", "(", "*", "frb_fred_params", ")", ":", "def", "_wrapper", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "_wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "kwargs", ".", "pop", "(", "'params'", ",", "{", "}", ")", "for", "p", "in", "frb_fred_params", ":", "if", "p", "in", "kwargs", ":", "params", "[", "p", "]", "=", "kwargs", ".", "pop", "(", "p", ")", "return", "func", "(", "*", "args", ",", "params", "=", "params", ",", "*", "*", "kwargs", ")", "return", "_wrapped", "return", "_wrapper" ]
Decorator that pops all accepted parameters from method's kwargs and puts them in the params argument. Modeled after elasticsearch-py client utils strategy. See https://github.com/elastic/elasticsearch-py/blob/3400179153cc13b6ae2c26734337202569bdfd80/elasticsearch/client/utils.py
[ "Decorator", "that", "pops", "all", "accepted", "parameters", "from", "method", "s", "kwargs", "and", "puts", "them", "in", "the", "params", "argument", ".", "Modeled", "after", "elasticsearch", "-", "py", "client", "utils", "strategy", ".", "See", "https", ":", "//", "github", ".", "com", "/", "elastic", "/", "elasticsearch", "-", "py", "/", "blob", "/", "3400179153cc13b6ae2c26734337202569bdfd80", "/", "elasticsearch", "/", "client", "/", "utils", ".", "py" ]
python
train
thumbor/thumbor
thumbor/handlers/__init__.py
https://github.com/thumbor/thumbor/blob/558ccdd6e3bc29e1c9ee3687372c4b3eb05ac607/thumbor/handlers/__init__.py#L146-L224
def get_image(self): """ This function is called after the PRE_LOAD filters have been applied. It applies the AFTER_LOAD filters on the result, then crops the image. """ try: result = yield self._fetch( self.context.request.image_url ) if not result.successful: if result.loader_error == LoaderResult.ERROR_NOT_FOUND: self._error(404) return elif result.loader_error == LoaderResult.ERROR_UPSTREAM: # Return a Bad Gateway status if the error came from upstream self._error(502) return elif result.loader_error == LoaderResult.ERROR_TIMEOUT: # Return a Gateway Timeout status if upstream timed out (i.e. 599) self._error(504) return elif isinstance(result.loader_error, int): self._error(result.loader_error) return elif hasattr(result, 'engine_error') and result.engine_error == EngineResult.COULD_NOT_LOAD_IMAGE: self._error(400) return else: self._error(500) return except Exception as e: msg = '[BaseHandler] get_image failed for url `{url}`. error: `{error}`'.format( url=self.context.request.image_url, error=e ) self.log_exception(*sys.exc_info()) if 'cannot identify image file' in e.message: logger.warning(msg) self._error(400) else: logger.error(msg) self._error(500) return normalized = result.normalized buffer = result.buffer engine = result.engine req = self.context.request if engine is None: if buffer is None: self._error(504) return engine = self.context.request.engine try: engine.load(buffer, self.context.request.extension) except Exception: self._error(504) return self.context.transformer = Transformer(self.context) def transform(): self.normalize_crops(normalized, req, engine) if req.meta: self.context.transformer.engine = \ self.context.request.engine = \ JSONEngine(engine, req.image_url, req.meta_callback) self.context.transformer.transform(self.after_transform) self.filters_runner.apply_filters(thumbor.filters.PHASE_AFTER_LOAD, transform)
[ "def", "get_image", "(", "self", ")", ":", "try", ":", "result", "=", "yield", "self", ".", "_fetch", "(", "self", ".", "context", ".", "request", ".", "image_url", ")", "if", "not", "result", ".", "successful", ":", "if", "result", ".", "loader_error", "==", "LoaderResult", ".", "ERROR_NOT_FOUND", ":", "self", ".", "_error", "(", "404", ")", "return", "elif", "result", ".", "loader_error", "==", "LoaderResult", ".", "ERROR_UPSTREAM", ":", "# Return a Bad Gateway status if the error came from upstream", "self", ".", "_error", "(", "502", ")", "return", "elif", "result", ".", "loader_error", "==", "LoaderResult", ".", "ERROR_TIMEOUT", ":", "# Return a Gateway Timeout status if upstream timed out (i.e. 599)", "self", ".", "_error", "(", "504", ")", "return", "elif", "isinstance", "(", "result", ".", "loader_error", ",", "int", ")", ":", "self", ".", "_error", "(", "result", ".", "loader_error", ")", "return", "elif", "hasattr", "(", "result", ",", "'engine_error'", ")", "and", "result", ".", "engine_error", "==", "EngineResult", ".", "COULD_NOT_LOAD_IMAGE", ":", "self", ".", "_error", "(", "400", ")", "return", "else", ":", "self", ".", "_error", "(", "500", ")", "return", "except", "Exception", "as", "e", ":", "msg", "=", "'[BaseHandler] get_image failed for url `{url}`. error: `{error}`'", ".", "format", "(", "url", "=", "self", ".", "context", ".", "request", ".", "image_url", ",", "error", "=", "e", ")", "self", ".", "log_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "if", "'cannot identify image file'", "in", "e", ".", "message", ":", "logger", ".", "warning", "(", "msg", ")", "self", ".", "_error", "(", "400", ")", "else", ":", "logger", ".", "error", "(", "msg", ")", "self", ".", "_error", "(", "500", ")", "return", "normalized", "=", "result", ".", "normalized", "buffer", "=", "result", ".", "buffer", "engine", "=", "result", ".", "engine", "req", "=", "self", ".", "context", ".", "request", "if", "engine", "is", "None", ":", "if", "buffer", "is", "None", ":", "self", ".", "_error", "(", "504", ")", "return", "engine", "=", "self", ".", "context", ".", "request", ".", "engine", "try", ":", "engine", ".", "load", "(", "buffer", ",", "self", ".", "context", ".", "request", ".", "extension", ")", "except", "Exception", ":", "self", ".", "_error", "(", "504", ")", "return", "self", ".", "context", ".", "transformer", "=", "Transformer", "(", "self", ".", "context", ")", "def", "transform", "(", ")", ":", "self", ".", "normalize_crops", "(", "normalized", ",", "req", ",", "engine", ")", "if", "req", ".", "meta", ":", "self", ".", "context", ".", "transformer", ".", "engine", "=", "self", ".", "context", ".", "request", ".", "engine", "=", "JSONEngine", "(", "engine", ",", "req", ".", "image_url", ",", "req", ".", "meta_callback", ")", "self", ".", "context", ".", "transformer", ".", "transform", "(", "self", ".", "after_transform", ")", "self", ".", "filters_runner", ".", "apply_filters", "(", "thumbor", ".", "filters", ".", "PHASE_AFTER_LOAD", ",", "transform", ")" ]
This function is called after the PRE_LOAD filters have been applied. It applies the AFTER_LOAD filters on the result, then crops the image.
[ "This", "function", "is", "called", "after", "the", "PRE_LOAD", "filters", "have", "been", "applied", ".", "It", "applies", "the", "AFTER_LOAD", "filters", "on", "the", "result", "then", "crops", "the", "image", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L2059-L2066
def getElementDeclaration(self, attribute=None): """If attribute is None, "ref" is assumed, return the corresponding representation of the global element declaration (ElementDeclaration), To maintain backwards compat, if attribute is provided call base class method. """ if attribute: return XMLSchemaComponent.getElementDeclaration(self, attribute) return XMLSchemaComponent.getElementDeclaration(self, 'ref')
[ "def", "getElementDeclaration", "(", "self", ",", "attribute", "=", "None", ")", ":", "if", "attribute", ":", "return", "XMLSchemaComponent", ".", "getElementDeclaration", "(", "self", ",", "attribute", ")", "return", "XMLSchemaComponent", ".", "getElementDeclaration", "(", "self", ",", "'ref'", ")" ]
If attribute is None, "ref" is assumed, return the corresponding representation of the global element declaration (ElementDeclaration), To maintain backwards compat, if attribute is provided call base class method.
[ "If", "attribute", "is", "None", "ref", "is", "assumed", "return", "the", "corresponding", "representation", "of", "the", "global", "element", "declaration", "(", "ElementDeclaration", ")", "To", "maintain", "backwards", "compat", "if", "attribute", "is", "provided", "call", "base", "class", "method", "." ]
python
train
RJT1990/pyflux
pyflux/gas/gasrank.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasrank.py#L258-L292
def _model_abilities_one_components(self,beta): """ Creates the structure of the model - store abilities Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- theta : np.array Contains the predicted values for the time series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the scores for the time series """ parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) scale, shape, skewness = self._get_scale_and_shape(parm) state_vectors = np.zeros(shape=(self.max_team+1)) state_vectors_store = np.zeros(shape=(int(np.max(self.home_count)+50),int(self.max_team+1))) theta = np.zeros(shape=(self.data.shape[0])) for t in range(0,self.data.shape[0]): theta[t] = parm[0] + state_vectors[self.home_id[t]] - state_vectors[self.away_id[t]] state_vectors[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_store[int(self.home_count[t]), self.home_id[t]] = state_vectors_store[max(0,int(self.home_count[t])-1), self.home_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_store[int(self.away_count[t]), self.away_id[t]] = state_vectors_store[max(0,int(self.away_count[t])-1), self.away_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) return state_vectors_store
[ "def", "_model_abilities_one_components", "(", "self", ",", "beta", ")", ":", "parm", "=", "np", ".", "array", "(", "[", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "transform", "(", "beta", "[", "k", "]", ")", "for", "k", "in", "range", "(", "beta", ".", "shape", "[", "0", "]", ")", "]", ")", "scale", ",", "shape", ",", "skewness", "=", "self", ".", "_get_scale_and_shape", "(", "parm", ")", "state_vectors", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "max_team", "+", "1", ")", ")", "state_vectors_store", "=", "np", ".", "zeros", "(", "shape", "=", "(", "int", "(", "np", ".", "max", "(", "self", ".", "home_count", ")", "+", "50", ")", ",", "int", "(", "self", ".", "max_team", "+", "1", ")", ")", ")", "theta", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ")", "for", "t", "in", "range", "(", "0", ",", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ":", "theta", "[", "t", "]", "=", "parm", "[", "0", "]", "+", "state_vectors", "[", "self", ".", "home_id", "[", "t", "]", "]", "-", "state_vectors", "[", "self", ".", "away_id", "[", "t", "]", "]", "state_vectors", "[", "self", ".", "home_id", "[", "t", "]", "]", "+=", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors", "[", "self", ".", "away_id", "[", "t", "]", "]", "+=", "-", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_store", "[", "int", "(", "self", ".", "home_count", "[", "t", "]", ")", ",", "self", ".", "home_id", "[", "t", "]", "]", "=", "state_vectors_store", "[", "max", "(", "0", ",", "int", "(", "self", ".", "home_count", "[", "t", "]", ")", "-", "1", ")", ",", "self", ".", "home_id", "[", "t", "]", "]", "+", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_store", "[", "int", "(", "self", ".", "away_count", "[", "t", "]", ")", ",", "self", ".", "away_id", "[", "t", "]", "]", "=", "state_vectors_store", "[", "max", "(", "0", ",", "int", "(", "self", ".", "away_count", "[", "t", "]", ")", "-", "1", ")", ",", "self", ".", "away_id", "[", "t", "]", "]", "-", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "return", "state_vectors_store" ]
Creates the structure of the model - store abilities Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- theta : np.array Contains the predicted values for the time series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the scores for the time series
[ "Creates", "the", "structure", "of", "the", "model", "-", "store", "abilities" ]
python
train
Azure/blobxfer
blobxfer/operations/azure/__init__.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/azure/__init__.py#L620-L683
def _populate_from_list_blobs(self, creds, options, dry_run): # type: (SourcePath, StorageCredentials, Any, bool) -> StorageEntity """Internal generator for Azure remote blobs :param SourcePath self: this :param StorageCredentials creds: storage creds :param object options: download or synccopy options :param bool dry_run: dry run :rtype: StorageEntity :return: Azure storage entity object """ is_synccopy = isinstance(options, blobxfer.models.options.SyncCopy) for _path in self._paths: rpath = str(_path) sa = creds.get_storage_account(self.lookup_storage_account(rpath)) # ensure at least read permissions if not sa.can_read_object: raise RuntimeError( 'unable to populate sources for remote path {} as ' 'credential for storage account {} does not permit read ' 'access'.format(rpath, sa.name)) cont, dir = blobxfer.util.explode_azure_path(rpath) if sa.can_list_container_objects: for blob in blobxfer.operations.azure.blob.list_blobs( sa.block_blob_client, cont, dir, options.mode, options.recursive): # check for virtual directory placeholder if not is_synccopy: try: if (blob.metadata[ _METADATA_VIRTUAL_DIRECTORY] == 'true'): continue except KeyError: pass if not self._inclusion_check(blob.name): if dry_run: logger.info( '[DRY RUN] skipping due to filters: ' '{}/{}'.format(cont, blob.name)) continue for ase in self._handle_vectored_io_stripe( creds, options, is_synccopy, sa, blob, False, cont): if ase is None: continue yield ase else: blob = blobxfer.operations.azure.blob.get_blob_properties( sa.block_blob_client, cont, dir, options.mode) if blob is None: logger.error( 'blob {} not found in storage account {}'.format( rpath, sa.name)) return if not self._inclusion_check(blob.name): if dry_run: logger.info( '[DRY RUN] skipping due to filters: {}/{}'.format( cont, blob.name)) return for ase in self._handle_vectored_io_stripe( creds, options, is_synccopy, sa, blob, False, cont): if ase is None: continue yield ase
[ "def", "_populate_from_list_blobs", "(", "self", ",", "creds", ",", "options", ",", "dry_run", ")", ":", "# type: (SourcePath, StorageCredentials, Any, bool) -> StorageEntity", "is_synccopy", "=", "isinstance", "(", "options", ",", "blobxfer", ".", "models", ".", "options", ".", "SyncCopy", ")", "for", "_path", "in", "self", ".", "_paths", ":", "rpath", "=", "str", "(", "_path", ")", "sa", "=", "creds", ".", "get_storage_account", "(", "self", ".", "lookup_storage_account", "(", "rpath", ")", ")", "# ensure at least read permissions", "if", "not", "sa", ".", "can_read_object", ":", "raise", "RuntimeError", "(", "'unable to populate sources for remote path {} as '", "'credential for storage account {} does not permit read '", "'access'", ".", "format", "(", "rpath", ",", "sa", ".", "name", ")", ")", "cont", ",", "dir", "=", "blobxfer", ".", "util", ".", "explode_azure_path", "(", "rpath", ")", "if", "sa", ".", "can_list_container_objects", ":", "for", "blob", "in", "blobxfer", ".", "operations", ".", "azure", ".", "blob", ".", "list_blobs", "(", "sa", ".", "block_blob_client", ",", "cont", ",", "dir", ",", "options", ".", "mode", ",", "options", ".", "recursive", ")", ":", "# check for virtual directory placeholder", "if", "not", "is_synccopy", ":", "try", ":", "if", "(", "blob", ".", "metadata", "[", "_METADATA_VIRTUAL_DIRECTORY", "]", "==", "'true'", ")", ":", "continue", "except", "KeyError", ":", "pass", "if", "not", "self", ".", "_inclusion_check", "(", "blob", ".", "name", ")", ":", "if", "dry_run", ":", "logger", ".", "info", "(", "'[DRY RUN] skipping due to filters: '", "'{}/{}'", ".", "format", "(", "cont", ",", "blob", ".", "name", ")", ")", "continue", "for", "ase", "in", "self", ".", "_handle_vectored_io_stripe", "(", "creds", ",", "options", ",", "is_synccopy", ",", "sa", ",", "blob", ",", "False", ",", "cont", ")", ":", "if", "ase", "is", "None", ":", "continue", "yield", "ase", "else", ":", "blob", "=", "blobxfer", ".", "operations", ".", "azure", ".", "blob", ".", "get_blob_properties", "(", "sa", ".", "block_blob_client", ",", "cont", ",", "dir", ",", "options", ".", "mode", ")", "if", "blob", "is", "None", ":", "logger", ".", "error", "(", "'blob {} not found in storage account {}'", ".", "format", "(", "rpath", ",", "sa", ".", "name", ")", ")", "return", "if", "not", "self", ".", "_inclusion_check", "(", "blob", ".", "name", ")", ":", "if", "dry_run", ":", "logger", ".", "info", "(", "'[DRY RUN] skipping due to filters: {}/{}'", ".", "format", "(", "cont", ",", "blob", ".", "name", ")", ")", "return", "for", "ase", "in", "self", ".", "_handle_vectored_io_stripe", "(", "creds", ",", "options", ",", "is_synccopy", ",", "sa", ",", "blob", ",", "False", ",", "cont", ")", ":", "if", "ase", "is", "None", ":", "continue", "yield", "ase" ]
Internal generator for Azure remote blobs :param SourcePath self: this :param StorageCredentials creds: storage creds :param object options: download or synccopy options :param bool dry_run: dry run :rtype: StorageEntity :return: Azure storage entity object
[ "Internal", "generator", "for", "Azure", "remote", "blobs", ":", "param", "SourcePath", "self", ":", "this", ":", "param", "StorageCredentials", "creds", ":", "storage", "creds", ":", "param", "object", "options", ":", "download", "or", "synccopy", "options", ":", "param", "bool", "dry_run", ":", "dry", "run", ":", "rtype", ":", "StorageEntity", ":", "return", ":", "Azure", "storage", "entity", "object" ]
python
train
FujiMakoto/AgentML
agentml/parser/element.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/element.py#L107-L125
def _parse_chance(self, element): """ Parse a chance element :param element: The XML Element object :type element: etree._Element """ try: chance = float(element.text) except (ValueError, TypeError, AttributeError): self._log.warn('Invalid Chance string: {chance}'.format(chance=element.text)) return # Make sure the chance is a valid percentage if not (0 <= chance <= 100): self._log.warn('Chance percent must contain an integer or float between 0 and 100') return self.chance = chance self.chance_blocking = bool_attribute(element, 'blocking', self.chance_blocking)
[ "def", "_parse_chance", "(", "self", ",", "element", ")", ":", "try", ":", "chance", "=", "float", "(", "element", ".", "text", ")", "except", "(", "ValueError", ",", "TypeError", ",", "AttributeError", ")", ":", "self", ".", "_log", ".", "warn", "(", "'Invalid Chance string: {chance}'", ".", "format", "(", "chance", "=", "element", ".", "text", ")", ")", "return", "# Make sure the chance is a valid percentage", "if", "not", "(", "0", "<=", "chance", "<=", "100", ")", ":", "self", ".", "_log", ".", "warn", "(", "'Chance percent must contain an integer or float between 0 and 100'", ")", "return", "self", ".", "chance", "=", "chance", "self", ".", "chance_blocking", "=", "bool_attribute", "(", "element", ",", "'blocking'", ",", "self", ".", "chance_blocking", ")" ]
Parse a chance element :param element: The XML Element object :type element: etree._Element
[ "Parse", "a", "chance", "element", ":", "param", "element", ":", "The", "XML", "Element", "object", ":", "type", "element", ":", "etree", ".", "_Element" ]
python
train
shapiromatron/bmds
bmds/datasets.py
https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/datasets.py#L494-L504
def as_dfile(self): """ Return the dataset representation in BMDS .(d) file. """ rows = ["Dose Response"] for dose, response in zip(self.individual_doses, self.responses): dose_idx = self.doses.index(dose) if dose_idx >= self.num_dose_groups: continue rows.append("%f %f" % (dose, response)) return "\n".join(rows)
[ "def", "as_dfile", "(", "self", ")", ":", "rows", "=", "[", "\"Dose Response\"", "]", "for", "dose", ",", "response", "in", "zip", "(", "self", ".", "individual_doses", ",", "self", ".", "responses", ")", ":", "dose_idx", "=", "self", ".", "doses", ".", "index", "(", "dose", ")", "if", "dose_idx", ">=", "self", ".", "num_dose_groups", ":", "continue", "rows", ".", "append", "(", "\"%f %f\"", "%", "(", "dose", ",", "response", ")", ")", "return", "\"\\n\"", ".", "join", "(", "rows", ")" ]
Return the dataset representation in BMDS .(d) file.
[ "Return", "the", "dataset", "representation", "in", "BMDS", ".", "(", "d", ")", "file", "." ]
python
train
goldsborough/li
scripts/bump.py
https://github.com/goldsborough/li/blob/8ea4f8b55183aadaa96bc70cfcfcb7b198874319/scripts/bump.py#L11-L26
def bump(match): """Bumps the version""" before, old_version, after = match.groups() major, minor, patch = map(int, old_version.split('.')) patch += 1 if patch == 10: patch = 0 minor += 1 if minor == 10: minor = 0 major += 1 new_version = '{0}.{1}.{2}'.format(major, minor, patch) print('{0} => {1}'.format(old_version, new_version)) return before + new_version + after
[ "def", "bump", "(", "match", ")", ":", "before", ",", "old_version", ",", "after", "=", "match", ".", "groups", "(", ")", "major", ",", "minor", ",", "patch", "=", "map", "(", "int", ",", "old_version", ".", "split", "(", "'.'", ")", ")", "patch", "+=", "1", "if", "patch", "==", "10", ":", "patch", "=", "0", "minor", "+=", "1", "if", "minor", "==", "10", ":", "minor", "=", "0", "major", "+=", "1", "new_version", "=", "'{0}.{1}.{2}'", ".", "format", "(", "major", ",", "minor", ",", "patch", ")", "print", "(", "'{0} => {1}'", ".", "format", "(", "old_version", ",", "new_version", ")", ")", "return", "before", "+", "new_version", "+", "after" ]
Bumps the version
[ "Bumps", "the", "version" ]
python
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L900-L916
def _do_anchor(self, anchor): """ Collects preposition anchors and attachments in a dictionary. Once the dictionary has an entry for both the anchor and the attachment, they are linked. """ if anchor: for x in anchor.split("-"): A, P = None, None if x.startswith("A") and len(self.chunks) > 0: # anchor A, P = x, x.replace("A","P") self._anchors[A] = self.chunks[-1] if x.startswith("P") and len(self.pnp) > 0: # attachment (PNP) A, P = x.replace("P","A"), x self._anchors[P] = self.pnp[-1] if A in self._anchors and P in self._anchors and not self._anchors[P].anchor: pnp = self._anchors[P] pnp.anchor = self._anchors[A] pnp.anchor.attachments.append(pnp)
[ "def", "_do_anchor", "(", "self", ",", "anchor", ")", ":", "if", "anchor", ":", "for", "x", "in", "anchor", ".", "split", "(", "\"-\"", ")", ":", "A", ",", "P", "=", "None", ",", "None", "if", "x", ".", "startswith", "(", "\"A\"", ")", "and", "len", "(", "self", ".", "chunks", ")", ">", "0", ":", "# anchor", "A", ",", "P", "=", "x", ",", "x", ".", "replace", "(", "\"A\"", ",", "\"P\"", ")", "self", ".", "_anchors", "[", "A", "]", "=", "self", ".", "chunks", "[", "-", "1", "]", "if", "x", ".", "startswith", "(", "\"P\"", ")", "and", "len", "(", "self", ".", "pnp", ")", ">", "0", ":", "# attachment (PNP)", "A", ",", "P", "=", "x", ".", "replace", "(", "\"P\"", ",", "\"A\"", ")", ",", "x", "self", ".", "_anchors", "[", "P", "]", "=", "self", ".", "pnp", "[", "-", "1", "]", "if", "A", "in", "self", ".", "_anchors", "and", "P", "in", "self", ".", "_anchors", "and", "not", "self", ".", "_anchors", "[", "P", "]", ".", "anchor", ":", "pnp", "=", "self", ".", "_anchors", "[", "P", "]", "pnp", ".", "anchor", "=", "self", ".", "_anchors", "[", "A", "]", "pnp", ".", "anchor", ".", "attachments", ".", "append", "(", "pnp", ")" ]
Collects preposition anchors and attachments in a dictionary. Once the dictionary has an entry for both the anchor and the attachment, they are linked.
[ "Collects", "preposition", "anchors", "and", "attachments", "in", "a", "dictionary", ".", "Once", "the", "dictionary", "has", "an", "entry", "for", "both", "the", "anchor", "and", "the", "attachment", "they", "are", "linked", "." ]
python
train
d0ugal/home
home/collect/handlers.py
https://github.com/d0ugal/home/blob/e984716ae6c74dc8e40346584668ac5cfeaaf520/home/collect/handlers.py#L26-L69
def load_handlers(handler_mapping): """ Given a dictionary mapping which looks like the following, import the objects based on the dotted path and yield the packet type and handler as pairs. If the special string '*' is passed, don't process that, pass it on as it is a wildcard. If an non-string object is given for either packet or handler (key or value) assume these are the objects to use and yield them. :: { 'rfxcom.protocol.Status': 'home.collect.logging_handler', 'rfxcom.protocol.Elec': 'home.collect.elec_handler', 'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler', '*': 'home.collect.logging_handler' } """ handlers = {} for packet_type, handler in handler_mapping.items(): if packet_type == '*': Packet = packet_type elif isinstance(packet_type, str): Packet = importer(packet_type) else: Packet = packet_type if isinstance(handler, str): Handler = importer(handler) else: Handler = handler if Packet in handlers: raise HandlerConfigError( "Handler already provided for packet %s" % Packet) handlers[Packet] = Handler return handlers
[ "def", "load_handlers", "(", "handler_mapping", ")", ":", "handlers", "=", "{", "}", "for", "packet_type", ",", "handler", "in", "handler_mapping", ".", "items", "(", ")", ":", "if", "packet_type", "==", "'*'", ":", "Packet", "=", "packet_type", "elif", "isinstance", "(", "packet_type", ",", "str", ")", ":", "Packet", "=", "importer", "(", "packet_type", ")", "else", ":", "Packet", "=", "packet_type", "if", "isinstance", "(", "handler", ",", "str", ")", ":", "Handler", "=", "importer", "(", "handler", ")", "else", ":", "Handler", "=", "handler", "if", "Packet", "in", "handlers", ":", "raise", "HandlerConfigError", "(", "\"Handler already provided for packet %s\"", "%", "Packet", ")", "handlers", "[", "Packet", "]", "=", "Handler", "return", "handlers" ]
Given a dictionary mapping which looks like the following, import the objects based on the dotted path and yield the packet type and handler as pairs. If the special string '*' is passed, don't process that, pass it on as it is a wildcard. If an non-string object is given for either packet or handler (key or value) assume these are the objects to use and yield them. :: { 'rfxcom.protocol.Status': 'home.collect.logging_handler', 'rfxcom.protocol.Elec': 'home.collect.elec_handler', 'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler', '*': 'home.collect.logging_handler' }
[ "Given", "a", "dictionary", "mapping", "which", "looks", "like", "the", "following", "import", "the", "objects", "based", "on", "the", "dotted", "path", "and", "yield", "the", "packet", "type", "and", "handler", "as", "pairs", "." ]
python
test
aguinane/nem-reader
nemreader/nem_reader.py
https://github.com/aguinane/nem-reader/blob/5405a5cba4bb8ebdad05c28455d12bb34a6d3ce5/nemreader/nem_reader.py#L186-L201
def parse_300_row(row: list, interval: int, uom: str) -> IntervalRecord: """ Interval data record (300) """ num_intervals = int(24 * 60 / interval) interval_date = parse_datetime(row[1]) last_interval = 2 + num_intervals quality_method = row[last_interval] interval_values = parse_interval_records( row[2:last_interval], interval_date, interval, uom, quality_method) return IntervalRecord(interval_date, interval_values, row[last_interval + 0], row[last_interval + 1], row[last_interval + 2], parse_datetime(row[last_interval + 3]), parse_datetime(row[last_interval + 4]))
[ "def", "parse_300_row", "(", "row", ":", "list", ",", "interval", ":", "int", ",", "uom", ":", "str", ")", "->", "IntervalRecord", ":", "num_intervals", "=", "int", "(", "24", "*", "60", "/", "interval", ")", "interval_date", "=", "parse_datetime", "(", "row", "[", "1", "]", ")", "last_interval", "=", "2", "+", "num_intervals", "quality_method", "=", "row", "[", "last_interval", "]", "interval_values", "=", "parse_interval_records", "(", "row", "[", "2", ":", "last_interval", "]", ",", "interval_date", ",", "interval", ",", "uom", ",", "quality_method", ")", "return", "IntervalRecord", "(", "interval_date", ",", "interval_values", ",", "row", "[", "last_interval", "+", "0", "]", ",", "row", "[", "last_interval", "+", "1", "]", ",", "row", "[", "last_interval", "+", "2", "]", ",", "parse_datetime", "(", "row", "[", "last_interval", "+", "3", "]", ")", ",", "parse_datetime", "(", "row", "[", "last_interval", "+", "4", "]", ")", ")" ]
Interval data record (300)
[ "Interval", "data", "record", "(", "300", ")" ]
python
train
aboSamoor/polyglot
polyglot/__main__.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/__main__.py#L39-L49
def vocab_counter(args): """Calculate the vocabulary.""" if isinstance(args.input, TextFiles): v = CountedVocabulary.from_textfiles(args.input, workers=args.workers) else: v = CountedVocabulary.from_textfile(args.input, workers=args.workers) if args.min_count > 1: v = v.min_count(args.min_count) if args.most_freq > 0: v = v.most_frequent(args.most_freq) print(v)
[ "def", "vocab_counter", "(", "args", ")", ":", "if", "isinstance", "(", "args", ".", "input", ",", "TextFiles", ")", ":", "v", "=", "CountedVocabulary", ".", "from_textfiles", "(", "args", ".", "input", ",", "workers", "=", "args", ".", "workers", ")", "else", ":", "v", "=", "CountedVocabulary", ".", "from_textfile", "(", "args", ".", "input", ",", "workers", "=", "args", ".", "workers", ")", "if", "args", ".", "min_count", ">", "1", ":", "v", "=", "v", ".", "min_count", "(", "args", ".", "min_count", ")", "if", "args", ".", "most_freq", ">", "0", ":", "v", "=", "v", ".", "most_frequent", "(", "args", ".", "most_freq", ")", "print", "(", "v", ")" ]
Calculate the vocabulary.
[ "Calculate", "the", "vocabulary", "." ]
python
train
tanghaibao/goatools
goatools/grouper/grprobj_init.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprobj_init.py#L49-L62
def _init_usrgos(self, goids): """Return user GO IDs which have GO Terms.""" usrgos = set() goids_missing = set() _go2obj = self.gosubdag.go2obj for goid in goids: if goid in _go2obj: usrgos.add(goid) else: goids_missing.add(goid) if goids_missing: print("MISSING GO IDs: {GOs}".format(GOs=goids_missing)) print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids))) return usrgos
[ "def", "_init_usrgos", "(", "self", ",", "goids", ")", ":", "usrgos", "=", "set", "(", ")", "goids_missing", "=", "set", "(", ")", "_go2obj", "=", "self", ".", "gosubdag", ".", "go2obj", "for", "goid", "in", "goids", ":", "if", "goid", "in", "_go2obj", ":", "usrgos", ".", "add", "(", "goid", ")", "else", ":", "goids_missing", ".", "add", "(", "goid", ")", "if", "goids_missing", ":", "print", "(", "\"MISSING GO IDs: {GOs}\"", ".", "format", "(", "GOs", "=", "goids_missing", ")", ")", "print", "(", "\"{N} of {M} GO IDs ARE MISSING\"", ".", "format", "(", "N", "=", "len", "(", "goids_missing", ")", ",", "M", "=", "len", "(", "goids", ")", ")", ")", "return", "usrgos" ]
Return user GO IDs which have GO Terms.
[ "Return", "user", "GO", "IDs", "which", "have", "GO", "Terms", "." ]
python
train
Kozea/wdb
client/wdb/__init__.py
https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L692-L707
def get_stack(self, f, t): """Build the stack from frame and traceback""" stack = [] if t and t.tb_frame == f: t = t.tb_next while f is not None: stack.append((f, f.f_lineno)) f = f.f_back stack.reverse() i = max(0, len(stack) - 1) while t is not None: stack.append((t.tb_frame, t.tb_lineno)) t = t.tb_next if f is None: i = max(0, len(stack) - 1) return stack, i
[ "def", "get_stack", "(", "self", ",", "f", ",", "t", ")", ":", "stack", "=", "[", "]", "if", "t", "and", "t", ".", "tb_frame", "==", "f", ":", "t", "=", "t", ".", "tb_next", "while", "f", "is", "not", "None", ":", "stack", ".", "append", "(", "(", "f", ",", "f", ".", "f_lineno", ")", ")", "f", "=", "f", ".", "f_back", "stack", ".", "reverse", "(", ")", "i", "=", "max", "(", "0", ",", "len", "(", "stack", ")", "-", "1", ")", "while", "t", "is", "not", "None", ":", "stack", ".", "append", "(", "(", "t", ".", "tb_frame", ",", "t", ".", "tb_lineno", ")", ")", "t", "=", "t", ".", "tb_next", "if", "f", "is", "None", ":", "i", "=", "max", "(", "0", ",", "len", "(", "stack", ")", "-", "1", ")", "return", "stack", ",", "i" ]
Build the stack from frame and traceback
[ "Build", "the", "stack", "from", "frame", "and", "traceback" ]
python
train
usc-isi-i2/etk
etk/extractors/mailman_extractor.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/mailman_extractor.py#L31-L73
def old_format(self, content: BeautifulSoup) -> List[str]: """ Extracts email message information if it uses the old Mailman format Args: content: BeautifulSoup Returns: List[str] """ b = content.find('body') sender, date, nxt, rep_to = None, None, None, None strongs = b.findAll('strong', recursive=False) for s in strongs: field = str(s).split(">")[1].split("<")[0] if 'From' in field: sender = s.next_sibling.split("(")[0].strip() elif 'Date' in field: date_str = s.next_sibling.strip().replace("-","").replace(" "," ").strip() try: date = parsedate_to_datetime(date_str).isoformat()[:19] except: date = None sender = b.find('b').text if sender == None else sender sender = b.find('a').text if len(sender) == 0 else sender date = b.find('i').text[:19] if date == None else date try: nav = content.find('ul').findAll('li') except: nav = None if nav != None: for l in nav: s = l.text if 'Next in thread' in s: nxt = '/'.join(self.email_url.split('/')[:-1]) + '/' + l.find('a')['href'] nxt = nxt[1:] if nxt[0] == '/' else nxt elif 'reply to' in s: rep_to = '/'.join(self.email_url.split('/')[:-1]) + '/' + l.find('a')['href'] rep_to = rep_to[1:] if rep_to[0] == '/' else rep_to body = content.find('pre') body = body.text.strip() if body != None else None return [str(i) for i in [sender, date, body, nxt, rep_to]]
[ "def", "old_format", "(", "self", ",", "content", ":", "BeautifulSoup", ")", "->", "List", "[", "str", "]", ":", "b", "=", "content", ".", "find", "(", "'body'", ")", "sender", ",", "date", ",", "nxt", ",", "rep_to", "=", "None", ",", "None", ",", "None", ",", "None", "strongs", "=", "b", ".", "findAll", "(", "'strong'", ",", "recursive", "=", "False", ")", "for", "s", "in", "strongs", ":", "field", "=", "str", "(", "s", ")", ".", "split", "(", "\">\"", ")", "[", "1", "]", ".", "split", "(", "\"<\"", ")", "[", "0", "]", "if", "'From'", "in", "field", ":", "sender", "=", "s", ".", "next_sibling", ".", "split", "(", "\"(\"", ")", "[", "0", "]", ".", "strip", "(", ")", "elif", "'Date'", "in", "field", ":", "date_str", "=", "s", ".", "next_sibling", ".", "strip", "(", ")", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", ".", "replace", "(", "\" \"", ",", "\" \"", ")", ".", "strip", "(", ")", "try", ":", "date", "=", "parsedate_to_datetime", "(", "date_str", ")", ".", "isoformat", "(", ")", "[", ":", "19", "]", "except", ":", "date", "=", "None", "sender", "=", "b", ".", "find", "(", "'b'", ")", ".", "text", "if", "sender", "==", "None", "else", "sender", "sender", "=", "b", ".", "find", "(", "'a'", ")", ".", "text", "if", "len", "(", "sender", ")", "==", "0", "else", "sender", "date", "=", "b", ".", "find", "(", "'i'", ")", ".", "text", "[", ":", "19", "]", "if", "date", "==", "None", "else", "date", "try", ":", "nav", "=", "content", ".", "find", "(", "'ul'", ")", ".", "findAll", "(", "'li'", ")", "except", ":", "nav", "=", "None", "if", "nav", "!=", "None", ":", "for", "l", "in", "nav", ":", "s", "=", "l", ".", "text", "if", "'Next in thread'", "in", "s", ":", "nxt", "=", "'/'", ".", "join", "(", "self", ".", "email_url", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "+", "'/'", "+", "l", ".", "find", "(", "'a'", ")", "[", "'href'", "]", "nxt", "=", "nxt", "[", "1", ":", "]", "if", "nxt", "[", "0", "]", "==", "'/'", "else", "nxt", "elif", "'reply to'", "in", "s", ":", "rep_to", "=", "'/'", ".", "join", "(", "self", ".", "email_url", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "+", "'/'", "+", "l", ".", "find", "(", "'a'", ")", "[", "'href'", "]", "rep_to", "=", "rep_to", "[", "1", ":", "]", "if", "rep_to", "[", "0", "]", "==", "'/'", "else", "rep_to", "body", "=", "content", ".", "find", "(", "'pre'", ")", "body", "=", "body", ".", "text", ".", "strip", "(", ")", "if", "body", "!=", "None", "else", "None", "return", "[", "str", "(", "i", ")", "for", "i", "in", "[", "sender", ",", "date", ",", "body", ",", "nxt", ",", "rep_to", "]", "]" ]
Extracts email message information if it uses the old Mailman format Args: content: BeautifulSoup Returns: List[str]
[ "Extracts", "email", "message", "information", "if", "it", "uses", "the", "old", "Mailman", "format", "Args", ":", "content", ":", "BeautifulSoup" ]
python
train
Alignak-monitoring/alignak
alignak/modulesmanager.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modulesmanager.py#L90-L99
def set_daemon_name(self, daemon_name): """Set the daemon name of the daemon which this manager is attached to and propagate this daemon name to our managed modules :param daemon_name: :return: """ self.daemon_name = daemon_name for instance in self.instances: instance.set_loaded_into(daemon_name)
[ "def", "set_daemon_name", "(", "self", ",", "daemon_name", ")", ":", "self", ".", "daemon_name", "=", "daemon_name", "for", "instance", "in", "self", ".", "instances", ":", "instance", ".", "set_loaded_into", "(", "daemon_name", ")" ]
Set the daemon name of the daemon which this manager is attached to and propagate this daemon name to our managed modules :param daemon_name: :return:
[ "Set", "the", "daemon", "name", "of", "the", "daemon", "which", "this", "manager", "is", "attached", "to", "and", "propagate", "this", "daemon", "name", "to", "our", "managed", "modules" ]
python
train
broadinstitute/fiss
firecloud/api.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/api.py#L292-L308
def get_entity(namespace, workspace, etype, ename): """Request entity information. Gets entity metadata and attributes. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name etype (str): Entity type ename (str): The entity's unique id Swagger: https://api.firecloud.org/#!/Entities/getEntity """ uri = "workspaces/{0}/{1}/entities/{2}/{3}".format(namespace, workspace, etype, ename) return __get(uri)
[ "def", "get_entity", "(", "namespace", ",", "workspace", ",", "etype", ",", "ename", ")", ":", "uri", "=", "\"workspaces/{0}/{1}/entities/{2}/{3}\"", ".", "format", "(", "namespace", ",", "workspace", ",", "etype", ",", "ename", ")", "return", "__get", "(", "uri", ")" ]
Request entity information. Gets entity metadata and attributes. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name etype (str): Entity type ename (str): The entity's unique id Swagger: https://api.firecloud.org/#!/Entities/getEntity
[ "Request", "entity", "information", "." ]
python
train
Azure/azure-cli-extensions
src/azure-firewall/azext_firewall/_validators.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/azure-firewall/azext_firewall/_validators.py#L24-L49
def get_public_ip_validator(): """ Retrieves a validator for public IP address. Accepting all defaults will perform a check for an existing name or ID with no ARM-required -type parameter. """ from msrestazure.tools import is_valid_resource_id, resource_id def simple_validator(cmd, namespace): if namespace.public_ip_address: is_list = isinstance(namespace.public_ip_address, list) def _validate_name_or_id(public_ip): # determine if public_ip_address is name or ID is_id = is_valid_resource_id(public_ip) return public_ip if is_id else resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=namespace.resource_group_name, namespace='Microsoft.Network', type='publicIPAddresses', name=public_ip) if is_list: for i, public_ip in enumerate(namespace.public_ip_address): namespace.public_ip_address[i] = _validate_name_or_id(public_ip) else: namespace.public_ip_address = _validate_name_or_id(namespace.public_ip_address) return simple_validator
[ "def", "get_public_ip_validator", "(", ")", ":", "from", "msrestazure", ".", "tools", "import", "is_valid_resource_id", ",", "resource_id", "def", "simple_validator", "(", "cmd", ",", "namespace", ")", ":", "if", "namespace", ".", "public_ip_address", ":", "is_list", "=", "isinstance", "(", "namespace", ".", "public_ip_address", ",", "list", ")", "def", "_validate_name_or_id", "(", "public_ip", ")", ":", "# determine if public_ip_address is name or ID", "is_id", "=", "is_valid_resource_id", "(", "public_ip", ")", "return", "public_ip", "if", "is_id", "else", "resource_id", "(", "subscription", "=", "get_subscription_id", "(", "cmd", ".", "cli_ctx", ")", ",", "resource_group", "=", "namespace", ".", "resource_group_name", ",", "namespace", "=", "'Microsoft.Network'", ",", "type", "=", "'publicIPAddresses'", ",", "name", "=", "public_ip", ")", "if", "is_list", ":", "for", "i", ",", "public_ip", "in", "enumerate", "(", "namespace", ".", "public_ip_address", ")", ":", "namespace", ".", "public_ip_address", "[", "i", "]", "=", "_validate_name_or_id", "(", "public_ip", ")", "else", ":", "namespace", ".", "public_ip_address", "=", "_validate_name_or_id", "(", "namespace", ".", "public_ip_address", ")", "return", "simple_validator" ]
Retrieves a validator for public IP address. Accepting all defaults will perform a check for an existing name or ID with no ARM-required -type parameter.
[ "Retrieves", "a", "validator", "for", "public", "IP", "address", ".", "Accepting", "all", "defaults", "will", "perform", "a", "check", "for", "an", "existing", "name", "or", "ID", "with", "no", "ARM", "-", "required", "-", "type", "parameter", "." ]
python
train
trailofbits/protofuzz
protofuzz/protofuzz.py
https://github.com/trailofbits/protofuzz/blob/589492d34de9a0da6cc5554094e2588b893b2fd8/protofuzz/protofuzz.py#L71-L105
def _prototype_to_generator(descriptor, cls): 'Helper to map a descriptor to a protofuzz generator' _fd = D.FieldDescriptor generator = None ints32 = [_fd.TYPE_INT32, _fd.TYPE_UINT32, _fd.TYPE_FIXED32, _fd.TYPE_SFIXED32, _fd.TYPE_SINT32] ints64 = [_fd.TYPE_INT64, _fd.TYPE_UINT64, _fd.TYPE_FIXED64, _fd.TYPE_SFIXED64, _fd.TYPE_SINT64] ints_signed = [_fd.TYPE_INT32, _fd.TYPE_SFIXED32, _fd.TYPE_SINT32, _fd.TYPE_INT64, _fd.TYPE_SFIXED64, _fd.TYPE_SINT64] if descriptor.type in ints32+ints64: bitwidth = [32, 64][descriptor.type in ints64] unsigned = descriptor.type not in ints_signed generator = _int_generator(descriptor, bitwidth, unsigned) elif descriptor.type == _fd.TYPE_DOUBLE: generator = _float_generator(descriptor, 64) elif descriptor.type == _fd.TYPE_FLOAT: generator = _float_generator(descriptor, 32) elif descriptor.type == _fd.TYPE_STRING: generator = _string_generator(descriptor) elif descriptor.type == _fd.TYPE_BYTES: generator = _bytes_generator(descriptor) elif descriptor.type == _fd.TYPE_BOOL: generator = gen.IterValueGenerator(descriptor.name, [True, False]) elif descriptor.type == _fd.TYPE_ENUM: generator = _enum_generator(descriptor) elif descriptor.type == _fd.TYPE_MESSAGE: generator = descriptor_to_generator(descriptor.message_type, cls) generator.set_name(descriptor.name) else: raise RuntimeError("type {} unsupported".format(descriptor.type)) return generator
[ "def", "_prototype_to_generator", "(", "descriptor", ",", "cls", ")", ":", "_fd", "=", "D", ".", "FieldDescriptor", "generator", "=", "None", "ints32", "=", "[", "_fd", ".", "TYPE_INT32", ",", "_fd", ".", "TYPE_UINT32", ",", "_fd", ".", "TYPE_FIXED32", ",", "_fd", ".", "TYPE_SFIXED32", ",", "_fd", ".", "TYPE_SINT32", "]", "ints64", "=", "[", "_fd", ".", "TYPE_INT64", ",", "_fd", ".", "TYPE_UINT64", ",", "_fd", ".", "TYPE_FIXED64", ",", "_fd", ".", "TYPE_SFIXED64", ",", "_fd", ".", "TYPE_SINT64", "]", "ints_signed", "=", "[", "_fd", ".", "TYPE_INT32", ",", "_fd", ".", "TYPE_SFIXED32", ",", "_fd", ".", "TYPE_SINT32", ",", "_fd", ".", "TYPE_INT64", ",", "_fd", ".", "TYPE_SFIXED64", ",", "_fd", ".", "TYPE_SINT64", "]", "if", "descriptor", ".", "type", "in", "ints32", "+", "ints64", ":", "bitwidth", "=", "[", "32", ",", "64", "]", "[", "descriptor", ".", "type", "in", "ints64", "]", "unsigned", "=", "descriptor", ".", "type", "not", "in", "ints_signed", "generator", "=", "_int_generator", "(", "descriptor", ",", "bitwidth", ",", "unsigned", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_DOUBLE", ":", "generator", "=", "_float_generator", "(", "descriptor", ",", "64", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_FLOAT", ":", "generator", "=", "_float_generator", "(", "descriptor", ",", "32", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_STRING", ":", "generator", "=", "_string_generator", "(", "descriptor", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_BYTES", ":", "generator", "=", "_bytes_generator", "(", "descriptor", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_BOOL", ":", "generator", "=", "gen", ".", "IterValueGenerator", "(", "descriptor", ".", "name", ",", "[", "True", ",", "False", "]", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_ENUM", ":", "generator", "=", "_enum_generator", "(", "descriptor", ")", "elif", "descriptor", ".", "type", "==", "_fd", ".", "TYPE_MESSAGE", ":", "generator", "=", "descriptor_to_generator", "(", "descriptor", ".", "message_type", ",", "cls", ")", "generator", ".", "set_name", "(", "descriptor", ".", "name", ")", "else", ":", "raise", "RuntimeError", "(", "\"type {} unsupported\"", ".", "format", "(", "descriptor", ".", "type", ")", ")", "return", "generator" ]
Helper to map a descriptor to a protofuzz generator
[ "Helper", "to", "map", "a", "descriptor", "to", "a", "protofuzz", "generator" ]
python
train
AmesCornish/buttersink
buttersink/BestDiffs.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/BestDiffs.py#L110-L153
def analyze(self, chunkSize, *sinks): """ Figure out the best diffs to use to reach all our required volumes. """ measureSize = False if self.measureSize: for sink in sinks: if sink.isRemote: measureSize = True # Use destination (already uploaded) edges first sinks = list(sinks) sinks.reverse() self.dest = sinks[0] def currentSize(): return sum([ n.diffSize for n in self.nodes.values() if n.diff is not None and n.diff.sink != self.dest ]) while True: self._analyzeDontMeasure(chunkSize, measureSize, *sinks) if not measureSize: return estimatedSize = currentSize() # logger.info("Measuring any estimated diffs") for node in self.nodes.values(): edge = node.diff if edge is not None and edge.sink != self.dest and edge.sizeIsEstimated: edge.sink.measureSize(edge, chunkSize) actualSize = currentSize() logger.info( "measured size (%s), estimated size (%s)", humanize(actualSize), humanize(estimatedSize), ) if actualSize <= 1.2 * estimatedSize: return
[ "def", "analyze", "(", "self", ",", "chunkSize", ",", "*", "sinks", ")", ":", "measureSize", "=", "False", "if", "self", ".", "measureSize", ":", "for", "sink", "in", "sinks", ":", "if", "sink", ".", "isRemote", ":", "measureSize", "=", "True", "# Use destination (already uploaded) edges first", "sinks", "=", "list", "(", "sinks", ")", "sinks", ".", "reverse", "(", ")", "self", ".", "dest", "=", "sinks", "[", "0", "]", "def", "currentSize", "(", ")", ":", "return", "sum", "(", "[", "n", ".", "diffSize", "for", "n", "in", "self", ".", "nodes", ".", "values", "(", ")", "if", "n", ".", "diff", "is", "not", "None", "and", "n", ".", "diff", ".", "sink", "!=", "self", ".", "dest", "]", ")", "while", "True", ":", "self", ".", "_analyzeDontMeasure", "(", "chunkSize", ",", "measureSize", ",", "*", "sinks", ")", "if", "not", "measureSize", ":", "return", "estimatedSize", "=", "currentSize", "(", ")", "# logger.info(\"Measuring any estimated diffs\")", "for", "node", "in", "self", ".", "nodes", ".", "values", "(", ")", ":", "edge", "=", "node", ".", "diff", "if", "edge", "is", "not", "None", "and", "edge", ".", "sink", "!=", "self", ".", "dest", "and", "edge", ".", "sizeIsEstimated", ":", "edge", ".", "sink", ".", "measureSize", "(", "edge", ",", "chunkSize", ")", "actualSize", "=", "currentSize", "(", ")", "logger", ".", "info", "(", "\"measured size (%s), estimated size (%s)\"", ",", "humanize", "(", "actualSize", ")", ",", "humanize", "(", "estimatedSize", ")", ",", ")", "if", "actualSize", "<=", "1.2", "*", "estimatedSize", ":", "return" ]
Figure out the best diffs to use to reach all our required volumes.
[ "Figure", "out", "the", "best", "diffs", "to", "use", "to", "reach", "all", "our", "required", "volumes", "." ]
python
train
pybel/pybel
src/pybel/manager/query_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/query_manager.py#L22-L29
def graph_from_edges(edges: Iterable[Edge], **kwargs) -> BELGraph: """Build a BEL graph from edges.""" graph = BELGraph(**kwargs) for edge in edges: edge.insert_into_graph(graph) return graph
[ "def", "graph_from_edges", "(", "edges", ":", "Iterable", "[", "Edge", "]", ",", "*", "*", "kwargs", ")", "->", "BELGraph", ":", "graph", "=", "BELGraph", "(", "*", "*", "kwargs", ")", "for", "edge", "in", "edges", ":", "edge", ".", "insert_into_graph", "(", "graph", ")", "return", "graph" ]
Build a BEL graph from edges.
[ "Build", "a", "BEL", "graph", "from", "edges", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xorbtreewidget/xorbtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbtreewidget.py#L2049-L2062
def setRecords(self, records): """ Manually sets the list of records that will be displayed in this tree. This is a shortcut method to creating a RecordSet with a list of records and assigning it to the tree. :param records | [<orb.Table>, ..] """ self._searchTerms = '' if not isinstance(records, RecordSet): records = RecordSet(records) self.setRecordSet(records)
[ "def", "setRecords", "(", "self", ",", "records", ")", ":", "self", ".", "_searchTerms", "=", "''", "if", "not", "isinstance", "(", "records", ",", "RecordSet", ")", ":", "records", "=", "RecordSet", "(", "records", ")", "self", ".", "setRecordSet", "(", "records", ")" ]
Manually sets the list of records that will be displayed in this tree. This is a shortcut method to creating a RecordSet with a list of records and assigning it to the tree. :param records | [<orb.Table>, ..]
[ "Manually", "sets", "the", "list", "of", "records", "that", "will", "be", "displayed", "in", "this", "tree", ".", "This", "is", "a", "shortcut", "method", "to", "creating", "a", "RecordSet", "with", "a", "list", "of", "records", "and", "assigning", "it", "to", "the", "tree", ".", ":", "param", "records", "|", "[", "<orb", ".", "Table", ">", "..", "]" ]
python
train
axialmarket/fsq
libexec/fsq/push.py
https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/libexec/fsq/push.py#L26-L41
def usage(asked_for=0): '''Exit with a usage string, used for bad argument or with -h''' exit = fsq.const('FSQ_SUCCESS') if asked_for else\ fsq.const('FSQ_FAIL_PERM') f = sys.stdout if asked_for else sys.stderr shout('{0} [opts] src_queue trg_queue host item_id [item_id [...]]'.format( os.path.basename(_PROG)), f) if asked_for: shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] '\ '[-i|--ignore-listener] <proto>://<host>:<port>/url'\ .format(os.path.basename(_PROG)), f) shout('{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger]'\ '[-i|--ignore-listener] unix://var/sock/foo.sock'\ .format(os.path.basename(_PROG)), f) shout(' src_queue trg_queue host_queue item [item [...]]', f) return exit
[ "def", "usage", "(", "asked_for", "=", "0", ")", ":", "exit", "=", "fsq", ".", "const", "(", "'FSQ_SUCCESS'", ")", "if", "asked_for", "else", "fsq", ".", "const", "(", "'FSQ_FAIL_PERM'", ")", "f", "=", "sys", ".", "stdout", "if", "asked_for", "else", "sys", ".", "stderr", "shout", "(", "'{0} [opts] src_queue trg_queue host item_id [item_id [...]]'", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "_PROG", ")", ")", ",", "f", ")", "if", "asked_for", ":", "shout", "(", "'{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger] '", "'[-i|--ignore-listener] <proto>://<host>:<port>/url'", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "_PROG", ")", ")", ",", "f", ")", "shout", "(", "'{0} [-p|--protocol=jsonrpc] [-L|--no-lock] [-t|--trigger]'", "'[-i|--ignore-listener] unix://var/sock/foo.sock'", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "_PROG", ")", ")", ",", "f", ")", "shout", "(", "' src_queue trg_queue host_queue item [item [...]]'", ",", "f", ")", "return", "exit" ]
Exit with a usage string, used for bad argument or with -h
[ "Exit", "with", "a", "usage", "string", "used", "for", "bad", "argument", "or", "with", "-", "h" ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L1369-L1385
async def pixy_set_led(self, r, g, b): """ Sends the setLed Pixy command. This method sets the RGB LED on front of Pixy. :param r: red range between 0 and 255 :param g: green range between 0 and 255 :param b: blue range between 0 and 255 :returns: No return value. """ data = [PrivateConstants.PIXY_SET_LED, r & 0x7f, (r >> 7) & 0x7f, g & 0x7f, (g >> 7) & 0x7f, b & 0x7f, (b >> 7) & 0x7f] await self._send_sysex(PrivateConstants.PIXY_CONFIG, data)
[ "async", "def", "pixy_set_led", "(", "self", ",", "r", ",", "g", ",", "b", ")", ":", "data", "=", "[", "PrivateConstants", ".", "PIXY_SET_LED", ",", "r", "&", "0x7f", ",", "(", "r", ">>", "7", ")", "&", "0x7f", ",", "g", "&", "0x7f", ",", "(", "g", ">>", "7", ")", "&", "0x7f", ",", "b", "&", "0x7f", ",", "(", "b", ">>", "7", ")", "&", "0x7f", "]", "await", "self", ".", "_send_sysex", "(", "PrivateConstants", ".", "PIXY_CONFIG", ",", "data", ")" ]
Sends the setLed Pixy command. This method sets the RGB LED on front of Pixy. :param r: red range between 0 and 255 :param g: green range between 0 and 255 :param b: blue range between 0 and 255 :returns: No return value.
[ "Sends", "the", "setLed", "Pixy", "command", ".", "This", "method", "sets", "the", "RGB", "LED", "on", "front", "of", "Pixy", "." ]
python
train
KelSolaar/Foundations
foundations/nodes.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/nodes.py#L707-L729
def index_of(self, child): """ Returns the given child index. Usage:: >>> node_a = AbstractCompositeNode("MyNodeA") >>> node_b = AbstractCompositeNode("MyNodeB", node_a) >>> node_c = AbstractCompositeNode("MyNodeC", node_a) >>> node_a.index_of(node_b) 0 >>> node_a.index_of(node_c) 1 :param child: Child node. :type child: AbstractNode or AbstractCompositeNode or Object :return: Child index. :rtype: int """ for i, item in enumerate(self.__children): if child is item: return i
[ "def", "index_of", "(", "self", ",", "child", ")", ":", "for", "i", ",", "item", "in", "enumerate", "(", "self", ".", "__children", ")", ":", "if", "child", "is", "item", ":", "return", "i" ]
Returns the given child index. Usage:: >>> node_a = AbstractCompositeNode("MyNodeA") >>> node_b = AbstractCompositeNode("MyNodeB", node_a) >>> node_c = AbstractCompositeNode("MyNodeC", node_a) >>> node_a.index_of(node_b) 0 >>> node_a.index_of(node_c) 1 :param child: Child node. :type child: AbstractNode or AbstractCompositeNode or Object :return: Child index. :rtype: int
[ "Returns", "the", "given", "child", "index", "." ]
python
train
gwastro/pycbc-glue
pycbc_glue/ligolw/utils/__init__.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/__init__.py#L95-L115
def local_path_from_url(url): """ For URLs that point to locations in the local filesystem, extract and return the filesystem path of the object to which they point. As a special case pass-through, if the URL is None, the return value is None. Raises ValueError if the URL is not None and does not point to a local file. Example: >>> print local_path_from_url(None) None >>> local_path_from_url("file:///home/me/somefile.xml.gz") '/home/me/somefile.xml.gz' """ if url is None: return None scheme, host, path = urlparse.urlparse(url)[:3] if scheme.lower() not in ("", "file") or host.lower() not in ("", "localhost"): raise ValueError("%s is not a local file" % repr(url)) return path
[ "def", "local_path_from_url", "(", "url", ")", ":", "if", "url", "is", "None", ":", "return", "None", "scheme", ",", "host", ",", "path", "=", "urlparse", ".", "urlparse", "(", "url", ")", "[", ":", "3", "]", "if", "scheme", ".", "lower", "(", ")", "not", "in", "(", "\"\"", ",", "\"file\"", ")", "or", "host", ".", "lower", "(", ")", "not", "in", "(", "\"\"", ",", "\"localhost\"", ")", ":", "raise", "ValueError", "(", "\"%s is not a local file\"", "%", "repr", "(", "url", ")", ")", "return", "path" ]
For URLs that point to locations in the local filesystem, extract and return the filesystem path of the object to which they point. As a special case pass-through, if the URL is None, the return value is None. Raises ValueError if the URL is not None and does not point to a local file. Example: >>> print local_path_from_url(None) None >>> local_path_from_url("file:///home/me/somefile.xml.gz") '/home/me/somefile.xml.gz'
[ "For", "URLs", "that", "point", "to", "locations", "in", "the", "local", "filesystem", "extract", "and", "return", "the", "filesystem", "path", "of", "the", "object", "to", "which", "they", "point", ".", "As", "a", "special", "case", "pass", "-", "through", "if", "the", "URL", "is", "None", "the", "return", "value", "is", "None", ".", "Raises", "ValueError", "if", "the", "URL", "is", "not", "None", "and", "does", "not", "point", "to", "a", "local", "file", "." ]
python
train
bintoro/overloading.py
overloading.py
https://github.com/bintoro/overloading.py/blob/d7b044d6f7e38043f0fc20f44f134baec84a5b32/overloading.py#L658-L691
def sig_cmp(sig1, sig2): """ Compares two normalized type signatures for validation purposes. """ types1 = sig1.required types2 = sig2.required if len(types1) != len(types2): return False dup_pos = [] dup_kw = {} for t1, t2 in zip(types1, types2): match = type_cmp(t1, t2) if match: dup_pos.append(match) else: break else: return tuple(dup_pos) kw_range = slice(len(dup_pos), len(types1)) kwds1 = sig1.parameters[kw_range] kwds2 = sig2.parameters[kw_range] if set(kwds1) != set(kwds2): return False kwtypes1 = dict(zip(sig1.parameters, types1)) kwtypes2 = dict(zip(sig2.parameters, types2)) for kw in kwds1: match = type_cmp(kwtypes1[kw], kwtypes2[kw]) if match: dup_kw[kw] = match else: break else: return tuple(dup_pos), dup_kw return False
[ "def", "sig_cmp", "(", "sig1", ",", "sig2", ")", ":", "types1", "=", "sig1", ".", "required", "types2", "=", "sig2", ".", "required", "if", "len", "(", "types1", ")", "!=", "len", "(", "types2", ")", ":", "return", "False", "dup_pos", "=", "[", "]", "dup_kw", "=", "{", "}", "for", "t1", ",", "t2", "in", "zip", "(", "types1", ",", "types2", ")", ":", "match", "=", "type_cmp", "(", "t1", ",", "t2", ")", "if", "match", ":", "dup_pos", ".", "append", "(", "match", ")", "else", ":", "break", "else", ":", "return", "tuple", "(", "dup_pos", ")", "kw_range", "=", "slice", "(", "len", "(", "dup_pos", ")", ",", "len", "(", "types1", ")", ")", "kwds1", "=", "sig1", ".", "parameters", "[", "kw_range", "]", "kwds2", "=", "sig2", ".", "parameters", "[", "kw_range", "]", "if", "set", "(", "kwds1", ")", "!=", "set", "(", "kwds2", ")", ":", "return", "False", "kwtypes1", "=", "dict", "(", "zip", "(", "sig1", ".", "parameters", ",", "types1", ")", ")", "kwtypes2", "=", "dict", "(", "zip", "(", "sig2", ".", "parameters", ",", "types2", ")", ")", "for", "kw", "in", "kwds1", ":", "match", "=", "type_cmp", "(", "kwtypes1", "[", "kw", "]", ",", "kwtypes2", "[", "kw", "]", ")", "if", "match", ":", "dup_kw", "[", "kw", "]", "=", "match", "else", ":", "break", "else", ":", "return", "tuple", "(", "dup_pos", ")", ",", "dup_kw", "return", "False" ]
Compares two normalized type signatures for validation purposes.
[ "Compares", "two", "normalized", "type", "signatures", "for", "validation", "purposes", "." ]
python
train
wavycloud/pyboto3
pyboto3/opsworks.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/opsworks.py#L152-L350
def clone_stack(SourceStackId=None, Name=None, Region=None, VpcId=None, Attributes=None, ServiceRoleArn=None, DefaultInstanceProfileArn=None, DefaultOs=None, HostnameTheme=None, DefaultAvailabilityZone=None, DefaultSubnetId=None, CustomJson=None, ConfigurationManager=None, ChefConfiguration=None, UseCustomCookbooks=None, UseOpsworksSecurityGroups=None, CustomCookbooksSource=None, DefaultSshKeyName=None, ClonePermissions=None, CloneAppIds=None, DefaultRootDeviceType=None, AgentVersion=None): """ Creates a clone of a specified stack. For more information, see Clone a Stack . By default, all parameters are set to the values used by the parent stack. See also: AWS API Documentation :example: response = client.clone_stack( SourceStackId='string', Name='string', Region='string', VpcId='string', Attributes={ 'string': 'string' }, ServiceRoleArn='string', DefaultInstanceProfileArn='string', DefaultOs='string', HostnameTheme='string', DefaultAvailabilityZone='string', DefaultSubnetId='string', CustomJson='string', ConfigurationManager={ 'Name': 'string', 'Version': 'string' }, ChefConfiguration={ 'ManageBerkshelf': True|False, 'BerkshelfVersion': 'string' }, UseCustomCookbooks=True|False, UseOpsworksSecurityGroups=True|False, CustomCookbooksSource={ 'Type': 'git'|'svn'|'archive'|'s3', 'Url': 'string', 'Username': 'string', 'Password': 'string', 'SshKey': 'string', 'Revision': 'string' }, DefaultSshKeyName='string', ClonePermissions=True|False, CloneAppIds=[ 'string', ], DefaultRootDeviceType='ebs'|'instance-store', AgentVersion='string' ) :type SourceStackId: string :param SourceStackId: [REQUIRED] The source stack ID. :type Name: string :param Name: The cloned stack name. :type Region: string :param Region: The cloned stack AWS region, such as 'ap-northeast-2'. For more information about AWS regions, see Regions and Endpoints . :type VpcId: string :param VpcId: The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later. If your account supports EC2 Classic, the default value is no VPC. If your account does not support EC2 Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note the following: It must belong to a VPC in your account that is in the specified region. You must specify a value for DefaultSubnetId . For more information on how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC . For more information on default VPC and EC2 Classic, see Supported Platforms . :type Attributes: dict :param Attributes: A list of stack attributes and values as key/value pairs to be added to the cloned stack. (string) -- (string) -- :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions . For more information about IAM ARNs, see Using Identifiers . Note You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly. :type DefaultInstanceProfileArn: string :param DefaultInstanceProfileArn: The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers . :type DefaultOs: string :param DefaultOs: The stack's operating system, which must be set to one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.09 , Amazon Linux 2016.03 , Amazon Linux 2015.09 , or Amazon Linux 2015.03 . A supported Ubuntu operating system, such as Ubuntu 16.04 LTS , Ubuntu 14.04 LTS , or Ubuntu 12.04 LTS . CentOS Linux 7 Red Hat Enterprise Linux 7 Microsoft Windows Server 2012 R2 Base , Microsoft Windows Server 2012 R2 with SQL Server Express , Microsoft Windows Server 2012 R2 with SQL Server Standard , or Microsoft Windows Server 2012 R2 with SQL Server Web . A custom AMI: Custom . You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs . The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems . Note You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux. :type HostnameTheme: string :param HostnameTheme: The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent , which creates host names by appending integers to the layer's short name. The other themes are: Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion , which returns a host name based on the current theme. :type DefaultAvailabilityZone: string :param DefaultAvailabilityZone: The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints . If you also specify a value for DefaultSubnetId , the subnet must be in the same zone. For more information, see the VpcId parameter description. :type DefaultSubnetId: string :param DefaultSubnetId: The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone , the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description. :type CustomJson: string :param CustomJson: A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format: '{\'key1\': \'value1\', \'key2\': \'value2\',...}' For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes :type ConfigurationManager: dict :param ConfigurationManager: The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. Name (string) --The name. This parameter must be set to 'Chef'. Version (string) --The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4. :type ChefConfiguration: dict :param ChefConfiguration: A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack . ManageBerkshelf (boolean) --Whether to enable Berkshelf. BerkshelfVersion (string) --The Berkshelf version. :type UseCustomCookbooks: boolean :param UseCustomCookbooks: Whether to use custom cookbooks. :type UseOpsworksSecurityGroups: boolean :param UseOpsworksSecurityGroups: Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack . :type CustomCookbooksSource: dict :param CustomCookbooksSource: Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks . Type (string) --The repository type. Url (string) --The source URL. Username (string) --This parameter depends on the repository type. For Amazon S3 bundles, set Username to the appropriate IAM access key ID. For HTTP bundles, Git repositories, and Subversion repositories, set Username to the user name. Password (string) --When included in a request, the parameter depends on the repository type. For Amazon S3 bundles, set Password to the appropriate IAM secret access key. For HTTP bundles and Subversion repositories, set Password to the password. For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html . In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. SshKey (string) --In requests, the repository's SSH key. In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. Revision (string) --The application's version. AWS OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed. :type DefaultSshKeyName: string :param DefaultSshKeyName: A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access . You can override this setting by specifying a different key pair, or no key pair, when you create an instance . :type ClonePermissions: boolean :param ClonePermissions: Whether to clone the source stack's permissions. :type CloneAppIds: list :param CloneAppIds: A list of source stack app IDs to be included in the cloned stack. (string) -- :type DefaultRootDeviceType: string :param DefaultRootDeviceType: The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device . :type AgentVersion: string :param AgentVersion: The default AWS OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST . AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances. The default setting is LATEST . To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions . AgentVersion cannot be set to Chef 12.2. Note You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. :rtype: dict :return: { 'StackId': 'string' } """ pass
[ "def", "clone_stack", "(", "SourceStackId", "=", "None", ",", "Name", "=", "None", ",", "Region", "=", "None", ",", "VpcId", "=", "None", ",", "Attributes", "=", "None", ",", "ServiceRoleArn", "=", "None", ",", "DefaultInstanceProfileArn", "=", "None", ",", "DefaultOs", "=", "None", ",", "HostnameTheme", "=", "None", ",", "DefaultAvailabilityZone", "=", "None", ",", "DefaultSubnetId", "=", "None", ",", "CustomJson", "=", "None", ",", "ConfigurationManager", "=", "None", ",", "ChefConfiguration", "=", "None", ",", "UseCustomCookbooks", "=", "None", ",", "UseOpsworksSecurityGroups", "=", "None", ",", "CustomCookbooksSource", "=", "None", ",", "DefaultSshKeyName", "=", "None", ",", "ClonePermissions", "=", "None", ",", "CloneAppIds", "=", "None", ",", "DefaultRootDeviceType", "=", "None", ",", "AgentVersion", "=", "None", ")", ":", "pass" ]
Creates a clone of a specified stack. For more information, see Clone a Stack . By default, all parameters are set to the values used by the parent stack. See also: AWS API Documentation :example: response = client.clone_stack( SourceStackId='string', Name='string', Region='string', VpcId='string', Attributes={ 'string': 'string' }, ServiceRoleArn='string', DefaultInstanceProfileArn='string', DefaultOs='string', HostnameTheme='string', DefaultAvailabilityZone='string', DefaultSubnetId='string', CustomJson='string', ConfigurationManager={ 'Name': 'string', 'Version': 'string' }, ChefConfiguration={ 'ManageBerkshelf': True|False, 'BerkshelfVersion': 'string' }, UseCustomCookbooks=True|False, UseOpsworksSecurityGroups=True|False, CustomCookbooksSource={ 'Type': 'git'|'svn'|'archive'|'s3', 'Url': 'string', 'Username': 'string', 'Password': 'string', 'SshKey': 'string', 'Revision': 'string' }, DefaultSshKeyName='string', ClonePermissions=True|False, CloneAppIds=[ 'string', ], DefaultRootDeviceType='ebs'|'instance-store', AgentVersion='string' ) :type SourceStackId: string :param SourceStackId: [REQUIRED] The source stack ID. :type Name: string :param Name: The cloned stack name. :type Region: string :param Region: The cloned stack AWS region, such as 'ap-northeast-2'. For more information about AWS regions, see Regions and Endpoints . :type VpcId: string :param VpcId: The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later. If your account supports EC2 Classic, the default value is no VPC. If your account does not support EC2 Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note the following: It must belong to a VPC in your account that is in the specified region. You must specify a value for DefaultSubnetId . For more information on how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC . For more information on default VPC and EC2 Classic, see Supported Platforms . :type Attributes: dict :param Attributes: A list of stack attributes and values as key/value pairs to be added to the cloned stack. (string) -- (string) -- :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions . For more information about IAM ARNs, see Using Identifiers . Note You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly. :type DefaultInstanceProfileArn: string :param DefaultInstanceProfileArn: The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers . :type DefaultOs: string :param DefaultOs: The stack's operating system, which must be set to one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.09 , Amazon Linux 2016.03 , Amazon Linux 2015.09 , or Amazon Linux 2015.03 . A supported Ubuntu operating system, such as Ubuntu 16.04 LTS , Ubuntu 14.04 LTS , or Ubuntu 12.04 LTS . CentOS Linux 7 Red Hat Enterprise Linux 7 Microsoft Windows Server 2012 R2 Base , Microsoft Windows Server 2012 R2 with SQL Server Express , Microsoft Windows Server 2012 R2 with SQL Server Standard , or Microsoft Windows Server 2012 R2 with SQL Server Web . A custom AMI: Custom . You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs . The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems . Note You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux. :type HostnameTheme: string :param HostnameTheme: The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent , which creates host names by appending integers to the layer's short name. The other themes are: Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion , which returns a host name based on the current theme. :type DefaultAvailabilityZone: string :param DefaultAvailabilityZone: The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints . If you also specify a value for DefaultSubnetId , the subnet must be in the same zone. For more information, see the VpcId parameter description. :type DefaultSubnetId: string :param DefaultSubnetId: The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone , the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description. :type CustomJson: string :param CustomJson: A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format: '{\'key1\': \'value1\', \'key2\': \'value2\',...}' For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes :type ConfigurationManager: dict :param ConfigurationManager: The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. Name (string) --The name. This parameter must be set to 'Chef'. Version (string) --The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4. :type ChefConfiguration: dict :param ChefConfiguration: A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack . ManageBerkshelf (boolean) --Whether to enable Berkshelf. BerkshelfVersion (string) --The Berkshelf version. :type UseCustomCookbooks: boolean :param UseCustomCookbooks: Whether to use custom cookbooks. :type UseOpsworksSecurityGroups: boolean :param UseOpsworksSecurityGroups: Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack . :type CustomCookbooksSource: dict :param CustomCookbooksSource: Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks . Type (string) --The repository type. Url (string) --The source URL. Username (string) --This parameter depends on the repository type. For Amazon S3 bundles, set Username to the appropriate IAM access key ID. For HTTP bundles, Git repositories, and Subversion repositories, set Username to the user name. Password (string) --When included in a request, the parameter depends on the repository type. For Amazon S3 bundles, set Password to the appropriate IAM secret access key. For HTTP bundles and Subversion repositories, set Password to the password. For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html . In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. SshKey (string) --In requests, the repository's SSH key. In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. Revision (string) --The application's version. AWS OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed. :type DefaultSshKeyName: string :param DefaultSshKeyName: A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access . You can override this setting by specifying a different key pair, or no key pair, when you create an instance . :type ClonePermissions: boolean :param ClonePermissions: Whether to clone the source stack's permissions. :type CloneAppIds: list :param CloneAppIds: A list of source stack app IDs to be included in the cloned stack. (string) -- :type DefaultRootDeviceType: string :param DefaultRootDeviceType: The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device . :type AgentVersion: string :param AgentVersion: The default AWS OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST . AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances. The default setting is LATEST . To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions . AgentVersion cannot be set to Chef 12.2. Note You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. :rtype: dict :return: { 'StackId': 'string' }
[ "Creates", "a", "clone", "of", "a", "specified", "stack", ".", "For", "more", "information", "see", "Clone", "a", "Stack", ".", "By", "default", "all", "parameters", "are", "set", "to", "the", "values", "used", "by", "the", "parent", "stack", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "clone_stack", "(", "SourceStackId", "=", "string", "Name", "=", "string", "Region", "=", "string", "VpcId", "=", "string", "Attributes", "=", "{", "string", ":", "string", "}", "ServiceRoleArn", "=", "string", "DefaultInstanceProfileArn", "=", "string", "DefaultOs", "=", "string", "HostnameTheme", "=", "string", "DefaultAvailabilityZone", "=", "string", "DefaultSubnetId", "=", "string", "CustomJson", "=", "string", "ConfigurationManager", "=", "{", "Name", ":", "string", "Version", ":", "string", "}", "ChefConfiguration", "=", "{", "ManageBerkshelf", ":", "True|False", "BerkshelfVersion", ":", "string", "}", "UseCustomCookbooks", "=", "True|False", "UseOpsworksSecurityGroups", "=", "True|False", "CustomCookbooksSource", "=", "{", "Type", ":", "git", "|", "svn", "|", "archive", "|", "s3", "Url", ":", "string", "Username", ":", "string", "Password", ":", "string", "SshKey", ":", "string", "Revision", ":", "string", "}", "DefaultSshKeyName", "=", "string", "ClonePermissions", "=", "True|False", "CloneAppIds", "=", "[", "string", "]", "DefaultRootDeviceType", "=", "ebs", "|", "instance", "-", "store", "AgentVersion", "=", "string", ")", ":", "type", "SourceStackId", ":", "string", ":", "param", "SourceStackId", ":", "[", "REQUIRED", "]", "The", "source", "stack", "ID", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L2364-L2375
def wait_for_element_absent(self, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT): """ Waits for an element to no longer appear in the HTML of a page. A hidden element still counts as appearing in the page HTML. If an element with "hidden" status is acceptable, use wait_for_element_not_visible() instead. """ if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT: timeout = self.__get_new_timeout(timeout) if page_utils.is_xpath_selector(selector): by = By.XPATH return page_actions.wait_for_element_absent( self.driver, selector, by, timeout)
[ "def", "wait_for_element_absent", "(", "self", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "LARGE_TIMEOUT", ")", ":", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "LARGE_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "if", "page_utils", ".", "is_xpath_selector", "(", "selector", ")", ":", "by", "=", "By", ".", "XPATH", "return", "page_actions", ".", "wait_for_element_absent", "(", "self", ".", "driver", ",", "selector", ",", "by", ",", "timeout", ")" ]
Waits for an element to no longer appear in the HTML of a page. A hidden element still counts as appearing in the page HTML. If an element with "hidden" status is acceptable, use wait_for_element_not_visible() instead.
[ "Waits", "for", "an", "element", "to", "no", "longer", "appear", "in", "the", "HTML", "of", "a", "page", ".", "A", "hidden", "element", "still", "counts", "as", "appearing", "in", "the", "page", "HTML", ".", "If", "an", "element", "with", "hidden", "status", "is", "acceptable", "use", "wait_for_element_not_visible", "()", "instead", "." ]
python
train
johnnoone/json-spec
src/jsonspec/validators/__init__.py
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/validators/__init__.py#L21-L37
def load(schema, uri=None, spec=None, provider=None): """Scaffold a validator against a schema. :param schema: the schema to compile into a Validator :type schema: Mapping :param uri: the uri of the schema. it may be ignored in case of not cross referencing. :type uri: Pointer, str :param spec: fallback to this spec if the schema does not provides ts own :type spec: str :param provider: the other schemas, in case of cross referencing :type provider: Mapping, Provider... """ factory = Factory(provider, spec) return factory(schema, uri or '#')
[ "def", "load", "(", "schema", ",", "uri", "=", "None", ",", "spec", "=", "None", ",", "provider", "=", "None", ")", ":", "factory", "=", "Factory", "(", "provider", ",", "spec", ")", "return", "factory", "(", "schema", ",", "uri", "or", "'#'", ")" ]
Scaffold a validator against a schema. :param schema: the schema to compile into a Validator :type schema: Mapping :param uri: the uri of the schema. it may be ignored in case of not cross referencing. :type uri: Pointer, str :param spec: fallback to this spec if the schema does not provides ts own :type spec: str :param provider: the other schemas, in case of cross referencing :type provider: Mapping, Provider...
[ "Scaffold", "a", "validator", "against", "a", "schema", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/panels/scrollflag.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/scrollflag.py#L49-L61
def offset(self): """This property holds the vertical offset of the scroll flag area relative to the top of the text editor.""" vsb = self.editor.verticalScrollBar() style = vsb.style() opt = QStyleOptionSlider() vsb.initStyleOption(opt) # Get the area in which the slider handle may move. groove_rect = style.subControlRect( QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarGroove, self) return groove_rect.y()
[ "def", "offset", "(", "self", ")", ":", "vsb", "=", "self", ".", "editor", ".", "verticalScrollBar", "(", ")", "style", "=", "vsb", ".", "style", "(", ")", "opt", "=", "QStyleOptionSlider", "(", ")", "vsb", ".", "initStyleOption", "(", "opt", ")", "# Get the area in which the slider handle may move.", "groove_rect", "=", "style", ".", "subControlRect", "(", "QStyle", ".", "CC_ScrollBar", ",", "opt", ",", "QStyle", ".", "SC_ScrollBarGroove", ",", "self", ")", "return", "groove_rect", ".", "y", "(", ")" ]
This property holds the vertical offset of the scroll flag area relative to the top of the text editor.
[ "This", "property", "holds", "the", "vertical", "offset", "of", "the", "scroll", "flag", "area", "relative", "to", "the", "top", "of", "the", "text", "editor", "." ]
python
train
chaimleib/intervaltree
intervaltree/interval.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/interval.py#L255-L268
def gt(self, other): """ Strictly greater than. Returns True if no part of this Interval extends lower than or into other. :raises ValueError: if either self or other is a null Interval :param other: Interval or point :return: True or False :rtype: bool """ self._raise_if_null(other) if hasattr(other, 'end'): return self.begin >= other.end else: return self.begin > other
[ "def", "gt", "(", "self", ",", "other", ")", ":", "self", ".", "_raise_if_null", "(", "other", ")", "if", "hasattr", "(", "other", ",", "'end'", ")", ":", "return", "self", ".", "begin", ">=", "other", ".", "end", "else", ":", "return", "self", ".", "begin", ">", "other" ]
Strictly greater than. Returns True if no part of this Interval extends lower than or into other. :raises ValueError: if either self or other is a null Interval :param other: Interval or point :return: True or False :rtype: bool
[ "Strictly", "greater", "than", ".", "Returns", "True", "if", "no", "part", "of", "this", "Interval", "extends", "lower", "than", "or", "into", "other", ".", ":", "raises", "ValueError", ":", "if", "either", "self", "or", "other", "is", "a", "null", "Interval", ":", "param", "other", ":", "Interval", "or", "point", ":", "return", ":", "True", "or", "False", ":", "rtype", ":", "bool" ]
python
train
Kortemme-Lab/klab
klab/biblio/pubmed.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/biblio/pubmed.py#L43-L86
def convert(ids, from_type): '''Uses the NCBI IP Converter API to converts a list of publication IDs in the same format e.g. DOI identifiers to another format e.g. PubMed identifiers. ids is a list of IDs of the type from_type e.g. a from_type of 'doi' specifies DOI identifiers. The function returns a Python dict with the mappings from the input IDs to IDs of all other types. ''' if from_type not in converter_types: raise PubMedConverterTypeException(from_type) # Avoid multiple requests of the same ID mapping = {} ids = list(set(ids)) # Request the mapping from the server query_string = "?ids=%s&idtype=%s" % (urllib2.quote(",".join(ids), ''), from_type) xml = get_resource("www.ncbi.nlm.nih.gov", '/pmc/utils/idconv/v1.0/%s' % query_string).strip() # Parse the response try: _dom = parseString(xml) main_tag = _dom.getElementsByTagName("pmcids") assert(len(main_tag) == 1) main_tag = main_tag[0] request_status = main_tag.getAttribute('status') except Exception, e: raise PubMedIDRetrievalException('An error occurred retrieving the XML from the PubMed ID Converter API: %s.' % str(e)) if request_status == 'ok': for record_tag in main_tag.getElementsByTagName("record"): attributes = record_tag.attributes record_keys = attributes.keys() assert('requested-id' in record_keys) from_key = attributes['requested-id'].value assert(from_key not in mapping) mapping[from_key] = {} for k in record_keys: if k != 'requested-id': mapping[from_key][k] = attributes[k].value else: # todo: parse the error tag here to print more details raise PubMedIDRetrievalException('The request to the PubMed ID Converter API failed. Please check that the IDs are of the correct types.') return mapping
[ "def", "convert", "(", "ids", ",", "from_type", ")", ":", "if", "from_type", "not", "in", "converter_types", ":", "raise", "PubMedConverterTypeException", "(", "from_type", ")", "# Avoid multiple requests of the same ID", "mapping", "=", "{", "}", "ids", "=", "list", "(", "set", "(", "ids", ")", ")", "# Request the mapping from the server", "query_string", "=", "\"?ids=%s&idtype=%s\"", "%", "(", "urllib2", ".", "quote", "(", "\",\"", ".", "join", "(", "ids", ")", ",", "''", ")", ",", "from_type", ")", "xml", "=", "get_resource", "(", "\"www.ncbi.nlm.nih.gov\"", ",", "'/pmc/utils/idconv/v1.0/%s'", "%", "query_string", ")", ".", "strip", "(", ")", "# Parse the response", "try", ":", "_dom", "=", "parseString", "(", "xml", ")", "main_tag", "=", "_dom", ".", "getElementsByTagName", "(", "\"pmcids\"", ")", "assert", "(", "len", "(", "main_tag", ")", "==", "1", ")", "main_tag", "=", "main_tag", "[", "0", "]", "request_status", "=", "main_tag", ".", "getAttribute", "(", "'status'", ")", "except", "Exception", ",", "e", ":", "raise", "PubMedIDRetrievalException", "(", "'An error occurred retrieving the XML from the PubMed ID Converter API: %s.'", "%", "str", "(", "e", ")", ")", "if", "request_status", "==", "'ok'", ":", "for", "record_tag", "in", "main_tag", ".", "getElementsByTagName", "(", "\"record\"", ")", ":", "attributes", "=", "record_tag", ".", "attributes", "record_keys", "=", "attributes", ".", "keys", "(", ")", "assert", "(", "'requested-id'", "in", "record_keys", ")", "from_key", "=", "attributes", "[", "'requested-id'", "]", ".", "value", "assert", "(", "from_key", "not", "in", "mapping", ")", "mapping", "[", "from_key", "]", "=", "{", "}", "for", "k", "in", "record_keys", ":", "if", "k", "!=", "'requested-id'", ":", "mapping", "[", "from_key", "]", "[", "k", "]", "=", "attributes", "[", "k", "]", ".", "value", "else", ":", "# todo: parse the error tag here to print more details", "raise", "PubMedIDRetrievalException", "(", "'The request to the PubMed ID Converter API failed. Please check that the IDs are of the correct types.'", ")", "return", "mapping" ]
Uses the NCBI IP Converter API to converts a list of publication IDs in the same format e.g. DOI identifiers to another format e.g. PubMed identifiers. ids is a list of IDs of the type from_type e.g. a from_type of 'doi' specifies DOI identifiers. The function returns a Python dict with the mappings from the input IDs to IDs of all other types.
[ "Uses", "the", "NCBI", "IP", "Converter", "API", "to", "converts", "a", "list", "of", "publication", "IDs", "in", "the", "same", "format", "e", ".", "g", ".", "DOI", "identifiers", "to", "another", "format", "e", ".", "g", ".", "PubMed", "identifiers", ".", "ids", "is", "a", "list", "of", "IDs", "of", "the", "type", "from_type", "e", ".", "g", ".", "a", "from_type", "of", "doi", "specifies", "DOI", "identifiers", ".", "The", "function", "returns", "a", "Python", "dict", "with", "the", "mappings", "from", "the", "input", "IDs", "to", "IDs", "of", "all", "other", "types", "." ]
python
train
sbarham/dsrt
build/lib/dsrt/application/Application.py
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L69-L90
def corpus(self): '''Command to add a corpus to the dsrt library''' # Initialize the addcorpus subcommand's argparser description = '''The corpus subcommand has a number of subcommands of its own, including: list\t-\tlists all available corpora in dsrt's library add\t-\tadds a corpus to dsrt's library''' parser = argparse.ArgumentParser(description=description) self.init_corpus_args(parser) # parse the args we got args = parser.parse_args(sys.argv[2:3]) corpus_command = 'corpus_' + args.corpus_command if not hasattr(self, corpus_command): print('Unrecognized corpus command.') parser.print_help() exit(1) getattr(self, corpus_command)()
[ "def", "corpus", "(", "self", ")", ":", "# Initialize the addcorpus subcommand's argparser", "description", "=", "'''The corpus subcommand has a number of subcommands of its own, including:\n list\\t-\\tlists all available corpora in dsrt's library\n add\\t-\\tadds a corpus to dsrt's library'''", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "description", ")", "self", ".", "init_corpus_args", "(", "parser", ")", "# parse the args we got", "args", "=", "parser", ".", "parse_args", "(", "sys", ".", "argv", "[", "2", ":", "3", "]", ")", "corpus_command", "=", "'corpus_'", "+", "args", ".", "corpus_command", "if", "not", "hasattr", "(", "self", ",", "corpus_command", ")", ":", "print", "(", "'Unrecognized corpus command.'", ")", "parser", ".", "print_help", "(", ")", "exit", "(", "1", ")", "getattr", "(", "self", ",", "corpus_command", ")", "(", ")" ]
Command to add a corpus to the dsrt library
[ "Command", "to", "add", "a", "corpus", "to", "the", "dsrt", "library" ]
python
train
Azure/azure-kusto-python
azure-kusto-ingest/azure/kusto/ingest/_ingestion_blob_info.py
https://github.com/Azure/azure-kusto-python/blob/92466a2ae175d6353d1dee3496a02517b2a71a86/azure-kusto-ingest/azure/kusto/ingest/_ingestion_blob_info.py#L70-L80
def _convert_dict_to_json(array): """ Converts array to a json string """ return json.dumps( array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"), sort_keys=True, default=lambda o: o.__dict__, )
[ "def", "_convert_dict_to_json", "(", "array", ")", ":", "return", "json", ".", "dumps", "(", "array", ",", "skipkeys", "=", "False", ",", "allow_nan", "=", "False", ",", "indent", "=", "None", ",", "separators", "=", "(", "\",\"", ",", "\":\"", ")", ",", "sort_keys", "=", "True", ",", "default", "=", "lambda", "o", ":", "o", ".", "__dict__", ",", ")" ]
Converts array to a json string
[ "Converts", "array", "to", "a", "json", "string" ]
python
train
GuyAllard/markov_clustering
markov_clustering/mcl.py
https://github.com/GuyAllard/markov_clustering/blob/28787cf64ef06bf024ff915246008c767ea830cf/markov_clustering/mcl.py#L123-L137
def iterate(matrix, expansion, inflation): """ Run a single iteration (expansion + inflation) of the mcl algorithm :param matrix: The matrix to perform the iteration on :param expansion: Cluster expansion factor :param inflation: Cluster inflation factor """ # Expansion matrix = expand(matrix, expansion) # Inflation matrix = inflate(matrix, inflation) return matrix
[ "def", "iterate", "(", "matrix", ",", "expansion", ",", "inflation", ")", ":", "# Expansion", "matrix", "=", "expand", "(", "matrix", ",", "expansion", ")", "# Inflation", "matrix", "=", "inflate", "(", "matrix", ",", "inflation", ")", "return", "matrix" ]
Run a single iteration (expansion + inflation) of the mcl algorithm :param matrix: The matrix to perform the iteration on :param expansion: Cluster expansion factor :param inflation: Cluster inflation factor
[ "Run", "a", "single", "iteration", "(", "expansion", "+", "inflation", ")", "of", "the", "mcl", "algorithm", ":", "param", "matrix", ":", "The", "matrix", "to", "perform", "the", "iteration", "on", ":", "param", "expansion", ":", "Cluster", "expansion", "factor", ":", "param", "inflation", ":", "Cluster", "inflation", "factor" ]
python
train
radujica/baloo
baloo/core/frame.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/core/frame.py#L982-L1024
def drop_duplicates(self, subset=None, keep='min'): """Return DataFrame with duplicate rows (excluding index) removed, optionally only considering subset columns. Note that the row order is NOT maintained due to hashing. Parameters ---------- subset : list of str, optional Which columns to consider keep : {'+', '*', 'min', 'max'}, optional What to select from the duplicate rows. These correspond to the possible merge operations in Weld. Note that '+' and '-' might produce unexpected results for strings. Returns ------- DataFrame DataFrame without duplicate rows. """ subset = check_and_obtain_subset_columns(subset, self) df = self.reset_index() df_names = df._gather_column_names() subset_indices = [df_names.index(col_name) for col_name in subset] weld_objects = weld_drop_duplicates(df._gather_data_for_weld(), df._gather_weld_types(), subset_indices, keep) index_data = self.index._gather_data(name=None) new_index = [Index(weld_objects[i], v.dtype, k) for i, k, v in zip(list(range(len(index_data))), index_data.keys(), index_data.values())] if len(new_index) > 1: new_index = MultiIndex(new_index, self.index._gather_names()) else: new_index = new_index[0] new_data = OrderedDict((sr.name, Series(obj, new_index, sr.dtype, sr.name)) for sr, obj in zip(self._iter(), weld_objects[len(index_data):])) return DataFrame(new_data, new_index)
[ "def", "drop_duplicates", "(", "self", ",", "subset", "=", "None", ",", "keep", "=", "'min'", ")", ":", "subset", "=", "check_and_obtain_subset_columns", "(", "subset", ",", "self", ")", "df", "=", "self", ".", "reset_index", "(", ")", "df_names", "=", "df", ".", "_gather_column_names", "(", ")", "subset_indices", "=", "[", "df_names", ".", "index", "(", "col_name", ")", "for", "col_name", "in", "subset", "]", "weld_objects", "=", "weld_drop_duplicates", "(", "df", ".", "_gather_data_for_weld", "(", ")", ",", "df", ".", "_gather_weld_types", "(", ")", ",", "subset_indices", ",", "keep", ")", "index_data", "=", "self", ".", "index", ".", "_gather_data", "(", "name", "=", "None", ")", "new_index", "=", "[", "Index", "(", "weld_objects", "[", "i", "]", ",", "v", ".", "dtype", ",", "k", ")", "for", "i", ",", "k", ",", "v", "in", "zip", "(", "list", "(", "range", "(", "len", "(", "index_data", ")", ")", ")", ",", "index_data", ".", "keys", "(", ")", ",", "index_data", ".", "values", "(", ")", ")", "]", "if", "len", "(", "new_index", ")", ">", "1", ":", "new_index", "=", "MultiIndex", "(", "new_index", ",", "self", ".", "index", ".", "_gather_names", "(", ")", ")", "else", ":", "new_index", "=", "new_index", "[", "0", "]", "new_data", "=", "OrderedDict", "(", "(", "sr", ".", "name", ",", "Series", "(", "obj", ",", "new_index", ",", "sr", ".", "dtype", ",", "sr", ".", "name", ")", ")", "for", "sr", ",", "obj", "in", "zip", "(", "self", ".", "_iter", "(", ")", ",", "weld_objects", "[", "len", "(", "index_data", ")", ":", "]", ")", ")", "return", "DataFrame", "(", "new_data", ",", "new_index", ")" ]
Return DataFrame with duplicate rows (excluding index) removed, optionally only considering subset columns. Note that the row order is NOT maintained due to hashing. Parameters ---------- subset : list of str, optional Which columns to consider keep : {'+', '*', 'min', 'max'}, optional What to select from the duplicate rows. These correspond to the possible merge operations in Weld. Note that '+' and '-' might produce unexpected results for strings. Returns ------- DataFrame DataFrame without duplicate rows.
[ "Return", "DataFrame", "with", "duplicate", "rows", "(", "excluding", "index", ")", "removed", "optionally", "only", "considering", "subset", "columns", "." ]
python
train
The-Politico/politico-civic-election-night
electionnight/serializers/election.py
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/election.py#L128-L136
def get_override_winner(self, obj): """Winner marked in backend.""" if obj.election.division.level.name == DivisionLevel.DISTRICT: division = obj.election.division.parent else: division = obj.election.division vote = obj.votes.filter(division=division).first() return vote.winning if vote else False
[ "def", "get_override_winner", "(", "self", ",", "obj", ")", ":", "if", "obj", ".", "election", ".", "division", ".", "level", ".", "name", "==", "DivisionLevel", ".", "DISTRICT", ":", "division", "=", "obj", ".", "election", ".", "division", ".", "parent", "else", ":", "division", "=", "obj", ".", "election", ".", "division", "vote", "=", "obj", ".", "votes", ".", "filter", "(", "division", "=", "division", ")", ".", "first", "(", ")", "return", "vote", ".", "winning", "if", "vote", "else", "False" ]
Winner marked in backend.
[ "Winner", "marked", "in", "backend", "." ]
python
train
tensorflow/lucid
lucid/optvis/objectives.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L442-L461
def class_logit(layer, label): """Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit. """ def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner
[ "def", "class_logit", "(", "layer", ",", "label", ")", ":", "def", "inner", "(", "T", ")", ":", "if", "isinstance", "(", "label", ",", "int", ")", ":", "class_n", "=", "label", "else", ":", "class_n", "=", "T", "(", "\"labels\"", ")", ".", "index", "(", "label", ")", "logits", "=", "T", "(", "layer", ")", "logit", "=", "tf", ".", "reduce_sum", "(", "logits", "[", ":", ",", "class_n", "]", ")", "return", "logit", "return", "inner" ]
Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit.
[ "Like", "channel", "but", "for", "softmax", "layers", "." ]
python
train
letuananh/chirptext
chirptext/texttaglib.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L265-L276
def add_concept(self, concept_obj): ''' Add a concept to current concept list ''' if concept_obj is None: raise Exception("Concept object cannot be None") elif concept_obj in self.__concepts: raise Exception("Concept object is already inside") elif concept_obj.cidx in self.__concept_map: raise Exception("Duplicated concept ID ({})".format(concept_obj.cidx)) self.__concepts.append(concept_obj) self.__concept_map[concept_obj.cidx] = concept_obj concept_obj.sent = self return concept_obj
[ "def", "add_concept", "(", "self", ",", "concept_obj", ")", ":", "if", "concept_obj", "is", "None", ":", "raise", "Exception", "(", "\"Concept object cannot be None\"", ")", "elif", "concept_obj", "in", "self", ".", "__concepts", ":", "raise", "Exception", "(", "\"Concept object is already inside\"", ")", "elif", "concept_obj", ".", "cidx", "in", "self", ".", "__concept_map", ":", "raise", "Exception", "(", "\"Duplicated concept ID ({})\"", ".", "format", "(", "concept_obj", ".", "cidx", ")", ")", "self", ".", "__concepts", ".", "append", "(", "concept_obj", ")", "self", ".", "__concept_map", "[", "concept_obj", ".", "cidx", "]", "=", "concept_obj", "concept_obj", ".", "sent", "=", "self", "return", "concept_obj" ]
Add a concept to current concept list
[ "Add", "a", "concept", "to", "current", "concept", "list" ]
python
train
ssato/python-anytemplate
anytemplate/utils.py
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/utils.py#L44-L57
def uniq(items): """Remove duplicates in given list with its order kept. >>> uniq([]) [] >>> uniq([1, 4, 5, 1, 2, 3, 5, 10]) [1, 4, 5, 2, 3, 10] """ acc = items[:1] for item in items[1:]: if item not in acc: acc += [item] return acc
[ "def", "uniq", "(", "items", ")", ":", "acc", "=", "items", "[", ":", "1", "]", "for", "item", "in", "items", "[", "1", ":", "]", ":", "if", "item", "not", "in", "acc", ":", "acc", "+=", "[", "item", "]", "return", "acc" ]
Remove duplicates in given list with its order kept. >>> uniq([]) [] >>> uniq([1, 4, 5, 1, 2, 3, 5, 10]) [1, 4, 5, 2, 3, 10]
[ "Remove", "duplicates", "in", "given", "list", "with", "its", "order", "kept", "." ]
python
train
nicolargo/glances
glances/plugins/glances_cpu.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_cpu.py#L141-L188
def update_snmp(self): """Update CPU stats using SNMP.""" # Init new stats stats = self.get_init_value() # Update stats using SNMP if self.short_system_name in ('windows', 'esxi'): # Windows or VMWare ESXi # You can find the CPU utilization of windows system by querying the oid # Give also the number of core (number of element in the table) try: cpu_stats = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: self.reset() # Iter through CPU and compute the idle CPU stats stats['nb_log_core'] = 0 stats['idle'] = 0 for c in cpu_stats: if c.startswith('percent'): stats['idle'] += float(cpu_stats['percent.3']) stats['nb_log_core'] += 1 if stats['nb_log_core'] > 0: stats['idle'] = stats['idle'] / stats['nb_log_core'] stats['idle'] = 100 - stats['idle'] stats['total'] = 100 - stats['idle'] else: # Default behavor try: stats = self.get_stats_snmp( snmp_oid=snmp_oid[self.short_system_name]) except KeyError: stats = self.get_stats_snmp( snmp_oid=snmp_oid['default']) if stats['idle'] == '': self.reset() return self.stats # Convert SNMP stats to float for key in iterkeys(stats): stats[key] = float(stats[key]) stats['total'] = 100 - stats['idle'] return stats
[ "def", "update_snmp", "(", "self", ")", ":", "# Init new stats", "stats", "=", "self", ".", "get_init_value", "(", ")", "# Update stats using SNMP", "if", "self", ".", "short_system_name", "in", "(", "'windows'", ",", "'esxi'", ")", ":", "# Windows or VMWare ESXi", "# You can find the CPU utilization of windows system by querying the oid", "# Give also the number of core (number of element in the table)", "try", ":", "cpu_stats", "=", "self", ".", "get_stats_snmp", "(", "snmp_oid", "=", "snmp_oid", "[", "self", ".", "short_system_name", "]", ",", "bulk", "=", "True", ")", "except", "KeyError", ":", "self", ".", "reset", "(", ")", "# Iter through CPU and compute the idle CPU stats", "stats", "[", "'nb_log_core'", "]", "=", "0", "stats", "[", "'idle'", "]", "=", "0", "for", "c", "in", "cpu_stats", ":", "if", "c", ".", "startswith", "(", "'percent'", ")", ":", "stats", "[", "'idle'", "]", "+=", "float", "(", "cpu_stats", "[", "'percent.3'", "]", ")", "stats", "[", "'nb_log_core'", "]", "+=", "1", "if", "stats", "[", "'nb_log_core'", "]", ">", "0", ":", "stats", "[", "'idle'", "]", "=", "stats", "[", "'idle'", "]", "/", "stats", "[", "'nb_log_core'", "]", "stats", "[", "'idle'", "]", "=", "100", "-", "stats", "[", "'idle'", "]", "stats", "[", "'total'", "]", "=", "100", "-", "stats", "[", "'idle'", "]", "else", ":", "# Default behavor", "try", ":", "stats", "=", "self", ".", "get_stats_snmp", "(", "snmp_oid", "=", "snmp_oid", "[", "self", ".", "short_system_name", "]", ")", "except", "KeyError", ":", "stats", "=", "self", ".", "get_stats_snmp", "(", "snmp_oid", "=", "snmp_oid", "[", "'default'", "]", ")", "if", "stats", "[", "'idle'", "]", "==", "''", ":", "self", ".", "reset", "(", ")", "return", "self", ".", "stats", "# Convert SNMP stats to float", "for", "key", "in", "iterkeys", "(", "stats", ")", ":", "stats", "[", "key", "]", "=", "float", "(", "stats", "[", "key", "]", ")", "stats", "[", "'total'", "]", "=", "100", "-", "stats", "[", "'idle'", "]", "return", "stats" ]
Update CPU stats using SNMP.
[ "Update", "CPU", "stats", "using", "SNMP", "." ]
python
train
materialsproject/pymatgen
pymatgen/core/xcfunc.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/xcfunc.py#L149-L175
def from_type_name(cls, typ, name): """Build the object from (type, name).""" # Try aliases first. for k, nt in cls.defined_aliases.items(): if typ is not None and typ != nt.type: continue #print(name, nt.name) if name == nt.name: if len(k) == 1: return cls(xc=k) if len(k) == 2: return cls(x=k[0], c=k[1]) raise ValueError("Wrong key: %s" % k) # At this point, we should have something in the form # name="GGA_X_PBE+GGA_C_PBE" or name=""LDA_XC_TETER93" if "+" in name: #if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name)) x, c = (s.strip() for s in name.split("+")) x, c = LibxcFunc[x], LibxcFunc[c] return cls(x=x, c=c) else: #if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name)) xc = LibxcFunc[name] return cls(xc=xc) if typ is None: raise ValueError("Cannot find name=%s in defined_aliases" % name) else: raise ValueError("Cannot find type=%s, name=%s in defined_aliases" % (typ, name))
[ "def", "from_type_name", "(", "cls", ",", "typ", ",", "name", ")", ":", "# Try aliases first.", "for", "k", ",", "nt", "in", "cls", ".", "defined_aliases", ".", "items", "(", ")", ":", "if", "typ", "is", "not", "None", "and", "typ", "!=", "nt", ".", "type", ":", "continue", "#print(name, nt.name)", "if", "name", "==", "nt", ".", "name", ":", "if", "len", "(", "k", ")", "==", "1", ":", "return", "cls", "(", "xc", "=", "k", ")", "if", "len", "(", "k", ")", "==", "2", ":", "return", "cls", "(", "x", "=", "k", "[", "0", "]", ",", "c", "=", "k", "[", "1", "]", ")", "raise", "ValueError", "(", "\"Wrong key: %s\"", "%", "k", ")", "# At this point, we should have something in the form", "# name=\"GGA_X_PBE+GGA_C_PBE\" or name=\"\"LDA_XC_TETER93\"", "if", "\"+\"", "in", "name", ":", "#if typ is not None: raise ValueError(\"typ: `%s` but name: `%s`\" % (typ, name))", "x", ",", "c", "=", "(", "s", ".", "strip", "(", ")", "for", "s", "in", "name", ".", "split", "(", "\"+\"", ")", ")", "x", ",", "c", "=", "LibxcFunc", "[", "x", "]", ",", "LibxcFunc", "[", "c", "]", "return", "cls", "(", "x", "=", "x", ",", "c", "=", "c", ")", "else", ":", "#if typ is not None: raise ValueError(\"typ: `%s` but name: `%s`\" % (typ, name))", "xc", "=", "LibxcFunc", "[", "name", "]", "return", "cls", "(", "xc", "=", "xc", ")", "if", "typ", "is", "None", ":", "raise", "ValueError", "(", "\"Cannot find name=%s in defined_aliases\"", "%", "name", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot find type=%s, name=%s in defined_aliases\"", "%", "(", "typ", ",", "name", ")", ")" ]
Build the object from (type, name).
[ "Build", "the", "object", "from", "(", "type", "name", ")", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/fast_sync.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/fast_sync.py#L492-L509
def fast_sync_inspect_snapshot( snapshot_path ): """ Inspect a snapshot Return useful information Return {'status': True, 'signatures': ..., 'payload_size': ..., 'sig_append_offset': ..., 'hash': ...} on success Return {'error': ...} on error """ with open(snapshot_path, 'r') as f: info = fast_sync_inspect( f ) if 'error' in info: log.error("Failed to inspect snapshot {}: {}".format(snapshot_path, info['error'])) return {'error': 'Failed to inspect snapshot'} # get the hash of the file hash_hex = get_file_hash(f, hashlib.sha256, fd_len=info['payload_size']) info['hash'] = hash_hex return info
[ "def", "fast_sync_inspect_snapshot", "(", "snapshot_path", ")", ":", "with", "open", "(", "snapshot_path", ",", "'r'", ")", "as", "f", ":", "info", "=", "fast_sync_inspect", "(", "f", ")", "if", "'error'", "in", "info", ":", "log", ".", "error", "(", "\"Failed to inspect snapshot {}: {}\"", ".", "format", "(", "snapshot_path", ",", "info", "[", "'error'", "]", ")", ")", "return", "{", "'error'", ":", "'Failed to inspect snapshot'", "}", "# get the hash of the file ", "hash_hex", "=", "get_file_hash", "(", "f", ",", "hashlib", ".", "sha256", ",", "fd_len", "=", "info", "[", "'payload_size'", "]", ")", "info", "[", "'hash'", "]", "=", "hash_hex", "return", "info" ]
Inspect a snapshot Return useful information Return {'status': True, 'signatures': ..., 'payload_size': ..., 'sig_append_offset': ..., 'hash': ...} on success Return {'error': ...} on error
[ "Inspect", "a", "snapshot", "Return", "useful", "information", "Return", "{", "status", ":", "True", "signatures", ":", "...", "payload_size", ":", "...", "sig_append_offset", ":", "...", "hash", ":", "...", "}", "on", "success", "Return", "{", "error", ":", "...", "}", "on", "error" ]
python
train
googleads/googleads-python-lib
googleads/ad_manager.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/ad_manager.py#L996-L1025
def _ConvertDateTimeToOffset(self, date_time_value): """Converts the PQL formatted response for a dateTime object. Output conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.' Args: date_time_value: dict The date time value from the PQL response. Returns: str: A string representation of the date time value uniform to ReportService. """ date_time_obj = datetime.datetime(int(date_time_value['date']['year']), int(date_time_value['date']['month']), int(date_time_value['date']['day']), int(date_time_value['hour']), int(date_time_value['minute']), int(date_time_value['second'])) # v201808 is the last Ad Manager version to use timeZoneID. if self._version > 'v201808': time_zone_str = 'timeZoneId' else: time_zone_str = 'timeZoneID' date_time_str = pytz.timezone( date_time_value[time_zone_str]).localize(date_time_obj).isoformat() if date_time_str[-5:] == '00:00': return date_time_str[:-6] + 'Z' else: return date_time_str
[ "def", "_ConvertDateTimeToOffset", "(", "self", ",", "date_time_value", ")", ":", "date_time_obj", "=", "datetime", ".", "datetime", "(", "int", "(", "date_time_value", "[", "'date'", "]", "[", "'year'", "]", ")", ",", "int", "(", "date_time_value", "[", "'date'", "]", "[", "'month'", "]", ")", ",", "int", "(", "date_time_value", "[", "'date'", "]", "[", "'day'", "]", ")", ",", "int", "(", "date_time_value", "[", "'hour'", "]", ")", ",", "int", "(", "date_time_value", "[", "'minute'", "]", ")", ",", "int", "(", "date_time_value", "[", "'second'", "]", ")", ")", "# v201808 is the last Ad Manager version to use timeZoneID.", "if", "self", ".", "_version", ">", "'v201808'", ":", "time_zone_str", "=", "'timeZoneId'", "else", ":", "time_zone_str", "=", "'timeZoneID'", "date_time_str", "=", "pytz", ".", "timezone", "(", "date_time_value", "[", "time_zone_str", "]", ")", ".", "localize", "(", "date_time_obj", ")", ".", "isoformat", "(", ")", "if", "date_time_str", "[", "-", "5", ":", "]", "==", "'00:00'", ":", "return", "date_time_str", "[", ":", "-", "6", "]", "+", "'Z'", "else", ":", "return", "date_time_str" ]
Converts the PQL formatted response for a dateTime object. Output conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.' Args: date_time_value: dict The date time value from the PQL response. Returns: str: A string representation of the date time value uniform to ReportService.
[ "Converts", "the", "PQL", "formatted", "response", "for", "a", "dateTime", "object", "." ]
python
train
SBRG/ssbio
ssbio/pipeline/atlas2.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas2.py#L242-L310
def filter_genes_and_strains(self, remove_genes_not_in_reference_model=True, remove_strains_with_no_orthology=True, remove_strains_with_no_differences=False, custom_keep_strains=None, custom_keep_genes=None): """Filters the analysis by keeping a subset of strains or genes based on certain criteria. Args: remove_genes_not_in_reference_model (bool): Remove genes from reference model not in orthology matrix remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model. Default is False because since orthology is found using a PID cutoff, all genes may be present but differences may be on the sequence level. custom_keep_genes (list): List of gene IDs to keep in analysis custom_keep_strains (list): List of strain IDs to keep in analysis """ if len(self.df_orthology_matrix) == 0: raise RuntimeError('Empty orthology matrix, please calculate first!') reference_strain_gene_ids = [x.id for x in self.reference_gempro.genes] initial_num_genes = len(reference_strain_gene_ids) initial_num_strains = len(self.strain_ids) # Gene filtering to_remove_genes = [] if custom_keep_genes: to_remove_genes.extend([x for x in reference_strain_gene_ids if x not in custom_keep_genes]) if remove_genes_not_in_reference_model: to_remove_genes.extend([x for x in reference_strain_gene_ids if x not in self.df_orthology_matrix.index.tolist()]) to_remove_genes = list(set(to_remove_genes)) if self.reference_gempro.model: cobra.manipulation.delete_model_genes(self.reference_gempro.model, to_remove_genes) else: for g_id in to_remove_genes: self.reference_gempro.genes.get_by_id(g_id).functional = False # Create new orthology matrix with only our genes of interest new_gene_subset = [x.id for x in self.reference_gempro.functional_genes] tmp_new_orthology_matrix = self.df_orthology_matrix[self.df_orthology_matrix.index.isin(new_gene_subset)] # Strain filtering if custom_keep_strains or remove_strains_with_no_orthology or remove_strains_with_no_differences: for strain_id in self.strain_ids: if custom_keep_strains: if strain_id not in custom_keep_strains: self.strain_ids.remove(strain_id) continue if remove_strains_with_no_orthology: if strain_id not in tmp_new_orthology_matrix.columns: self.strain_ids.remove(strain_id) log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_id)) continue elif tmp_new_orthology_matrix[strain_id].isnull().all(): self.strain_ids.remove(strain_id) log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_id)) continue if remove_strains_with_no_differences: not_in_strain = tmp_new_orthology_matrix[pd.isnull(tmp_new_orthology_matrix[strain_id])][strain_id].index.tolist() if len(not_in_strain) == 0: self.strain_ids.remove(strain_id) log.info('{}: strain has no differences from the base, removed from analysis.') continue log.info('{} genes to be analyzed, originally {}'.format(len(self.reference_gempro.functional_genes), initial_num_genes)) log.info('{} strains to be analyzed, originally {}'.format(len(self.strain_ids), initial_num_strains))
[ "def", "filter_genes_and_strains", "(", "self", ",", "remove_genes_not_in_reference_model", "=", "True", ",", "remove_strains_with_no_orthology", "=", "True", ",", "remove_strains_with_no_differences", "=", "False", ",", "custom_keep_strains", "=", "None", ",", "custom_keep_genes", "=", "None", ")", ":", "if", "len", "(", "self", ".", "df_orthology_matrix", ")", "==", "0", ":", "raise", "RuntimeError", "(", "'Empty orthology matrix, please calculate first!'", ")", "reference_strain_gene_ids", "=", "[", "x", ".", "id", "for", "x", "in", "self", ".", "reference_gempro", ".", "genes", "]", "initial_num_genes", "=", "len", "(", "reference_strain_gene_ids", ")", "initial_num_strains", "=", "len", "(", "self", ".", "strain_ids", ")", "# Gene filtering", "to_remove_genes", "=", "[", "]", "if", "custom_keep_genes", ":", "to_remove_genes", ".", "extend", "(", "[", "x", "for", "x", "in", "reference_strain_gene_ids", "if", "x", "not", "in", "custom_keep_genes", "]", ")", "if", "remove_genes_not_in_reference_model", ":", "to_remove_genes", ".", "extend", "(", "[", "x", "for", "x", "in", "reference_strain_gene_ids", "if", "x", "not", "in", "self", ".", "df_orthology_matrix", ".", "index", ".", "tolist", "(", ")", "]", ")", "to_remove_genes", "=", "list", "(", "set", "(", "to_remove_genes", ")", ")", "if", "self", ".", "reference_gempro", ".", "model", ":", "cobra", ".", "manipulation", ".", "delete_model_genes", "(", "self", ".", "reference_gempro", ".", "model", ",", "to_remove_genes", ")", "else", ":", "for", "g_id", "in", "to_remove_genes", ":", "self", ".", "reference_gempro", ".", "genes", ".", "get_by_id", "(", "g_id", ")", ".", "functional", "=", "False", "# Create new orthology matrix with only our genes of interest", "new_gene_subset", "=", "[", "x", ".", "id", "for", "x", "in", "self", ".", "reference_gempro", ".", "functional_genes", "]", "tmp_new_orthology_matrix", "=", "self", ".", "df_orthology_matrix", "[", "self", ".", "df_orthology_matrix", ".", "index", ".", "isin", "(", "new_gene_subset", ")", "]", "# Strain filtering", "if", "custom_keep_strains", "or", "remove_strains_with_no_orthology", "or", "remove_strains_with_no_differences", ":", "for", "strain_id", "in", "self", ".", "strain_ids", ":", "if", "custom_keep_strains", ":", "if", "strain_id", "not", "in", "custom_keep_strains", ":", "self", ".", "strain_ids", ".", "remove", "(", "strain_id", ")", "continue", "if", "remove_strains_with_no_orthology", ":", "if", "strain_id", "not", "in", "tmp_new_orthology_matrix", ".", "columns", ":", "self", ".", "strain_ids", ".", "remove", "(", "strain_id", ")", "log", ".", "info", "(", "'{}: no orthologous genes found for this strain, removed from analysis.'", ".", "format", "(", "strain_id", ")", ")", "continue", "elif", "tmp_new_orthology_matrix", "[", "strain_id", "]", ".", "isnull", "(", ")", ".", "all", "(", ")", ":", "self", ".", "strain_ids", ".", "remove", "(", "strain_id", ")", "log", ".", "info", "(", "'{}: no orthologous genes found for this strain, removed from analysis.'", ".", "format", "(", "strain_id", ")", ")", "continue", "if", "remove_strains_with_no_differences", ":", "not_in_strain", "=", "tmp_new_orthology_matrix", "[", "pd", ".", "isnull", "(", "tmp_new_orthology_matrix", "[", "strain_id", "]", ")", "]", "[", "strain_id", "]", ".", "index", ".", "tolist", "(", ")", "if", "len", "(", "not_in_strain", ")", "==", "0", ":", "self", ".", "strain_ids", ".", "remove", "(", "strain_id", ")", "log", ".", "info", "(", "'{}: strain has no differences from the base, removed from analysis.'", ")", "continue", "log", ".", "info", "(", "'{} genes to be analyzed, originally {}'", ".", "format", "(", "len", "(", "self", ".", "reference_gempro", ".", "functional_genes", ")", ",", "initial_num_genes", ")", ")", "log", ".", "info", "(", "'{} strains to be analyzed, originally {}'", ".", "format", "(", "len", "(", "self", ".", "strain_ids", ")", ",", "initial_num_strains", ")", ")" ]
Filters the analysis by keeping a subset of strains or genes based on certain criteria. Args: remove_genes_not_in_reference_model (bool): Remove genes from reference model not in orthology matrix remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model. Default is False because since orthology is found using a PID cutoff, all genes may be present but differences may be on the sequence level. custom_keep_genes (list): List of gene IDs to keep in analysis custom_keep_strains (list): List of strain IDs to keep in analysis
[ "Filters", "the", "analysis", "by", "keeping", "a", "subset", "of", "strains", "or", "genes", "based", "on", "certain", "criteria", "." ]
python
train
nerdvegas/rez
src/rez/package_order.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_order.py#L141-L178
def to_pod(self): """ Example (in yaml): type: per_family orderers: - packages: ['foo', 'bah'] type: version_split first_version: '4.0.5' - packages: ['python'] type: sorted descending: false default_order: type: sorted descending: true """ orderers = {} packages = {} # group package fams by orderer they use for fam, orderer in self.order_dict.iteritems(): k = id(orderer) orderers[k] = orderer packages.setdefault(k, set()).add(fam) orderlist = [] for k, fams in packages.iteritems(): orderer = orderers[k] data = to_pod(orderer) data["packages"] = sorted(fams) orderlist.append(data) result = {"orderers": orderlist} if self.default_order is not None: result["default_order"] = to_pod(self.default_order) return result
[ "def", "to_pod", "(", "self", ")", ":", "orderers", "=", "{", "}", "packages", "=", "{", "}", "# group package fams by orderer they use", "for", "fam", ",", "orderer", "in", "self", ".", "order_dict", ".", "iteritems", "(", ")", ":", "k", "=", "id", "(", "orderer", ")", "orderers", "[", "k", "]", "=", "orderer", "packages", ".", "setdefault", "(", "k", ",", "set", "(", ")", ")", ".", "add", "(", "fam", ")", "orderlist", "=", "[", "]", "for", "k", ",", "fams", "in", "packages", ".", "iteritems", "(", ")", ":", "orderer", "=", "orderers", "[", "k", "]", "data", "=", "to_pod", "(", "orderer", ")", "data", "[", "\"packages\"", "]", "=", "sorted", "(", "fams", ")", "orderlist", ".", "append", "(", "data", ")", "result", "=", "{", "\"orderers\"", ":", "orderlist", "}", "if", "self", ".", "default_order", "is", "not", "None", ":", "result", "[", "\"default_order\"", "]", "=", "to_pod", "(", "self", ".", "default_order", ")", "return", "result" ]
Example (in yaml): type: per_family orderers: - packages: ['foo', 'bah'] type: version_split first_version: '4.0.5' - packages: ['python'] type: sorted descending: false default_order: type: sorted descending: true
[ "Example", "(", "in", "yaml", ")", ":" ]
python
train
tschaume/ccsgp_get_started
ccsgp_get_started/examples/utils.py
https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/utils.py#L49-L52
def getEdges(npArr): """get np array of bin edges""" edges = np.concatenate(([0], npArr[:,0] + npArr[:,2])) return np.array([Decimal(str(i)) for i in edges])
[ "def", "getEdges", "(", "npArr", ")", ":", "edges", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "npArr", "[", ":", ",", "0", "]", "+", "npArr", "[", ":", ",", "2", "]", ")", ")", "return", "np", ".", "array", "(", "[", "Decimal", "(", "str", "(", "i", ")", ")", "for", "i", "in", "edges", "]", ")" ]
get np array of bin edges
[ "get", "np", "array", "of", "bin", "edges" ]
python
train
NuGrid/NuGridPy
nugridpy/ppn.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ppn.py#L478-L540
def getCycleData(self, attri, fname, numtype='cycNum'): """ In this method a column of data for the associated cycle attribute is returned. Parameters ---------- attri : string The name of the attribute we are looking for. fname : string The name of the file we are getting the data from or the cycle number found in the filename. numtype : string, optional Determines whether fname is the name of a file or, the cycle number. If it is 'file' it will then interpret it as a file, if it is 'cycNum' it will then interpret it as a cycle number. The default is "cycNum". """ fname=self.findFile(fname,numtype) if self.inputdir == '': self.inputdir = self.sldir # This chunk of code changes into the directory where fname is, os.chdir(self.inputdir) # and appends a '/' to the directory title so it accesses the self.sldir=os.getcwd() + '/' # file correctly f=open(fname,'r') lines=f.readlines() if self.inputdir != './': #This chunk of code changes back into the directory you started in. os.chdir(self.startdir) self.sldir = self.inputdir for i in range(len(lines)): lines[i]=lines[i].strip() for i in range(len(lines)): if lines[i].startswith('#'): lines[i]=lines[i].strip('#') tmp=lines[i].split() tmp1=[] for j in range(len(tmp)): if tmp[j] != '=' or '': tmp1.append(tmp[j]) tmp=tmp1 for j in range(len(tmp)): if tmp[j]== attri: try: if '.' in tmp[j+1]: return float(tmp[j+1]) else: return int(tmp[j+1]) except ValueError: return str(tmp[j+1]) elif lines[i].startswith('H'): continue else: print('This cycle attribute does not exist') print('Returning None') return None
[ "def", "getCycleData", "(", "self", ",", "attri", ",", "fname", ",", "numtype", "=", "'cycNum'", ")", ":", "fname", "=", "self", ".", "findFile", "(", "fname", ",", "numtype", ")", "if", "self", ".", "inputdir", "==", "''", ":", "self", ".", "inputdir", "=", "self", ".", "sldir", "# This chunk of code changes into the directory where fname is,", "os", ".", "chdir", "(", "self", ".", "inputdir", ")", "# and appends a '/' to the directory title so it accesses the", "self", ".", "sldir", "=", "os", ".", "getcwd", "(", ")", "+", "'/'", "# file correctly", "f", "=", "open", "(", "fname", ",", "'r'", ")", "lines", "=", "f", ".", "readlines", "(", ")", "if", "self", ".", "inputdir", "!=", "'./'", ":", "#This chunk of code changes back into the directory you started in.", "os", ".", "chdir", "(", "self", ".", "startdir", ")", "self", ".", "sldir", "=", "self", ".", "inputdir", "for", "i", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", ".", "strip", "(", ")", "for", "i", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "if", "lines", "[", "i", "]", ".", "startswith", "(", "'#'", ")", ":", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", ".", "strip", "(", "'#'", ")", "tmp", "=", "lines", "[", "i", "]", ".", "split", "(", ")", "tmp1", "=", "[", "]", "for", "j", "in", "range", "(", "len", "(", "tmp", ")", ")", ":", "if", "tmp", "[", "j", "]", "!=", "'='", "or", "''", ":", "tmp1", ".", "append", "(", "tmp", "[", "j", "]", ")", "tmp", "=", "tmp1", "for", "j", "in", "range", "(", "len", "(", "tmp", ")", ")", ":", "if", "tmp", "[", "j", "]", "==", "attri", ":", "try", ":", "if", "'.'", "in", "tmp", "[", "j", "+", "1", "]", ":", "return", "float", "(", "tmp", "[", "j", "+", "1", "]", ")", "else", ":", "return", "int", "(", "tmp", "[", "j", "+", "1", "]", ")", "except", "ValueError", ":", "return", "str", "(", "tmp", "[", "j", "+", "1", "]", ")", "elif", "lines", "[", "i", "]", ".", "startswith", "(", "'H'", ")", ":", "continue", "else", ":", "print", "(", "'This cycle attribute does not exist'", ")", "print", "(", "'Returning None'", ")", "return", "None" ]
In this method a column of data for the associated cycle attribute is returned. Parameters ---------- attri : string The name of the attribute we are looking for. fname : string The name of the file we are getting the data from or the cycle number found in the filename. numtype : string, optional Determines whether fname is the name of a file or, the cycle number. If it is 'file' it will then interpret it as a file, if it is 'cycNum' it will then interpret it as a cycle number. The default is "cycNum".
[ "In", "this", "method", "a", "column", "of", "data", "for", "the", "associated", "cycle", "attribute", "is", "returned", "." ]
python
train