repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
amcfague/webunit2
webunit2/response.py
https://github.com/amcfague/webunit2/blob/3157e5837aad0810800628c1383f1fe11ee3e513/webunit2/response.py#L97-L99
def assertNotCookie(self, name, value=None, attrs={}, *args, **kwargs): """ Negation of :meth:`assertCookie`. """ return not self.assertCookie(name, value, attrs)
[ "def", "assertNotCookie", "(", "self", ",", "name", ",", "value", "=", "None", ",", "attrs", "=", "{", "}", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "not", "self", ".", "assertCookie", "(", "name", ",", "value", ",", "attrs", ")" ]
Negation of :meth:`assertCookie`.
[ "Negation", "of", ":", "meth", ":", "assertCookie", "." ]
python
train
58.666667
SUNCAT-Center/CatHub
cathub/make_folders_template.py
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/make_folders_template.py#L14-L225
def main( title, authors, year, email, journal='', volume='', number='', pages='', publisher='', doi='', tags=[], DFT_code='Quantum ESPRESSO', DFT_functionals=['BEEF-vdW'], reactions=[ {'reactants': ['2.0H2Ogas', '-1.5H2gas', 'star'], 'products': ['OOHstar@ontop']}], energy_corrections={}, bulk_compositions=['Pt', 'Ag'], crystal_structures=['fcc', 'hcp'], facets=['111'], custom_base=None): """Automatically generate an organized folder structure for a DFT calculation. Start by copying the script to a folder in your username and assign the right information to the arguments in the function. You can change the parameters and run the script several times if you, for example, are using different functionals or are doing different reactions on different surfaces. Remember to include the reaction that gives the adsorption energy of reaction intermediates, taking gas phase molecules as references (preferably H20, H2, CH4, CO, NH3). Parameters ---------- title : str Publication or working title if not yet published. authors : list Author names, e.g. ['Doe, John', 'Einstein, Albert'] year : str Year of (submission?) email : str email address of the person responsible for uploading. Login at catalysis-hub.org currently only supports @gmail or Slack login email addresses. journal : str Publications journal name volume : str Publications volume number number : str Publication number pages : str Publication page numbers publisher : str Publisher name doi : str, optional DOI of publication tags : list, optional User defined quire tags DFT_code : str e.g. 'Quantum ESPRESSO' DFT_functionals : list of str Calculator functional used, e.g. 'BEEF-vdW' reactions : list of dict A new dictionary is required for each reaction, and should include two lists, 'reactants' and 'products'. Remember to include a minus sign and prefactor in the name when relevant. If your reaction is not balanced, you will receive an error when running the script. Include the phase if mixing gas phase and surface phase. e.g. 'star' for empty site or adsorbed phase, 'gas' if in gas phase. Include the adsorption site if relevant. e.g. star@top or star@bridge. For example, we can write an entry for the adsorption of CH2: CH4(g) - H2(g) + * -> CH2* as: {'reactants': ['CH4gas', 'H2gas', 'star'], 'products': ['CH2star@bridge']} A complete entry could read: reactions = [ {'reactants': ['CH4gas', '-H2gas', 'star'], 'products': ['CH2star@bridge']}, {'reactants': ['CH4gas', '-0.5H2gas', 'star'], 'products': ['CH3star@top']}] energy_corrections : dict, optional e.g. {'H2gas': 0.1} bulk_compositions : list of str e.g. ['Pt', 'Ag'] crystal_structures : list of str e.g. ['fcc', 'hcp'] facets : list For complicated structures use term you would use in publication. e.g. ['111'] custom_base : str TODO """ for reaction in reactions: check_reaction(reaction['reactants'], reaction['products']) # Set up directories if custom_base is not None: base = custom_base + '/' else: catbase = os.path.abspath(os.path.curdir) base = '%s/%s/' % (catbase, username) if not os.path.exists(base): os.mkdir(base) publication_shortname = get_pub_id(title, authors, year) publication_base = base + publication_shortname + '/' if not os.path.exists(publication_base): os.mkdir(publication_base) # save publication info to publications.txt publication_dict = {'title': title, 'authors': authors, 'journal': journal, 'volume': volume, 'number': number, 'pages': pages, 'year': year, 'email': email, 'publisher': publisher, 'doi': doi, 'tags': tags } pub_txt = publication_base + 'publication.txt' with open(pub_txt, 'w') as f: yaml.dump(publication_dict, f) if not len(energy_corrections.keys()) == 0: energy_txt = publication_base + 'energy_corrections.txt' with open(energy_txt, 'w') as fe: yaml.dump(energy_corrections, fe) def create(path): if not os.path.exists(path): os.mkdir(path) return path base = create(publication_base + DFT_code + '/') bulk_bases = [] gas_bases = [] for DFT_functional in DFT_functionals: bulk_bases += [create(base + DFT_functional + '/')] gas_bases += [create(base + DFT_functional + '/gas/')] gas_names = [] ads_names = [] for i in range(len(reactions)): rnames = [r.split('@')[0] for r in reactions[i]['reactants'] + reactions[i]['products']] states = [get_state(r) for r in rnames] gas_names += [clear_state(clear_prefactor(rnames[i])) for i in range(len(states)) if states[i] == 'gas'] for gas_base in gas_bases: for name in set(gas_names): with open(gas_base + 'MISSING:{}_gas'.format(name), 'w'): pass for bulk_base in bulk_bases: for bulk in bulk_compositions: for crystal_structure in crystal_structures: bulk_name = bulk + '_' + crystal_structure facet_base = create(bulk_base + bulk_name + '/') with open(facet_base + 'MISSING:{}_bulk'.format(bulk_name), 'w'): pass for facet in facets: reaction_base = create(facet_base + facet + '/') with open(reaction_base + 'MISSING:empty_slab' .format(bulk_name), 'w'): pass for i in range(len(reactions)): rname = '_'.join(reactions[i]['reactants']) pname = '_'.join(reactions[i]['products']) reaction_name = '__'.join([rname, pname]) base = create(reaction_base + reaction_name + '/') rnames = [r.split('@')[0] for r in reactions[i]['reactants'] + reactions[i]['products']] states = [get_state(r) for r in rnames] ads_names = [clear_prefactor(clear_state(rnames[i])) for i in range(len(states)) if states[i] == 'star'] for ads in ads_names: if ads == '': continue with open(base + 'MISSING:{}_slab'.format(ads), 'w'): pass with open(base + 'MISSING:TS?'.format(ads), 'w'): pass print('Folders were succesfully created under {}'.format(publication_base))
[ "def", "main", "(", "title", ",", "authors", ",", "year", ",", "email", ",", "journal", "=", "''", ",", "volume", "=", "''", ",", "number", "=", "''", ",", "pages", "=", "''", ",", "publisher", "=", "''", ",", "doi", "=", "''", ",", "tags", "=", "[", "]", ",", "DFT_code", "=", "'Quantum ESPRESSO'", ",", "DFT_functionals", "=", "[", "'BEEF-vdW'", "]", ",", "reactions", "=", "[", "{", "'reactants'", ":", "[", "'2.0H2Ogas'", ",", "'-1.5H2gas'", ",", "'star'", "]", ",", "'products'", ":", "[", "'OOHstar@ontop'", "]", "}", "]", ",", "energy_corrections", "=", "{", "}", ",", "bulk_compositions", "=", "[", "'Pt'", ",", "'Ag'", "]", ",", "crystal_structures", "=", "[", "'fcc'", ",", "'hcp'", "]", ",", "facets", "=", "[", "'111'", "]", ",", "custom_base", "=", "None", ")", ":", "for", "reaction", "in", "reactions", ":", "check_reaction", "(", "reaction", "[", "'reactants'", "]", ",", "reaction", "[", "'products'", "]", ")", "# Set up directories", "if", "custom_base", "is", "not", "None", ":", "base", "=", "custom_base", "+", "'/'", "else", ":", "catbase", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", "base", "=", "'%s/%s/'", "%", "(", "catbase", ",", "username", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "base", ")", ":", "os", ".", "mkdir", "(", "base", ")", "publication_shortname", "=", "get_pub_id", "(", "title", ",", "authors", ",", "year", ")", "publication_base", "=", "base", "+", "publication_shortname", "+", "'/'", "if", "not", "os", ".", "path", ".", "exists", "(", "publication_base", ")", ":", "os", ".", "mkdir", "(", "publication_base", ")", "# save publication info to publications.txt", "publication_dict", "=", "{", "'title'", ":", "title", ",", "'authors'", ":", "authors", ",", "'journal'", ":", "journal", ",", "'volume'", ":", "volume", ",", "'number'", ":", "number", ",", "'pages'", ":", "pages", ",", "'year'", ":", "year", ",", "'email'", ":", "email", ",", "'publisher'", ":", "publisher", ",", "'doi'", ":", "doi", ",", "'tags'", ":", "tags", "}", "pub_txt", "=", "publication_base", "+", "'publication.txt'", "with", "open", "(", "pub_txt", ",", "'w'", ")", "as", "f", ":", "yaml", ".", "dump", "(", "publication_dict", ",", "f", ")", "if", "not", "len", "(", "energy_corrections", ".", "keys", "(", ")", ")", "==", "0", ":", "energy_txt", "=", "publication_base", "+", "'energy_corrections.txt'", "with", "open", "(", "energy_txt", ",", "'w'", ")", "as", "fe", ":", "yaml", ".", "dump", "(", "energy_corrections", ",", "fe", ")", "def", "create", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "mkdir", "(", "path", ")", "return", "path", "base", "=", "create", "(", "publication_base", "+", "DFT_code", "+", "'/'", ")", "bulk_bases", "=", "[", "]", "gas_bases", "=", "[", "]", "for", "DFT_functional", "in", "DFT_functionals", ":", "bulk_bases", "+=", "[", "create", "(", "base", "+", "DFT_functional", "+", "'/'", ")", "]", "gas_bases", "+=", "[", "create", "(", "base", "+", "DFT_functional", "+", "'/gas/'", ")", "]", "gas_names", "=", "[", "]", "ads_names", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "reactions", ")", ")", ":", "rnames", "=", "[", "r", ".", "split", "(", "'@'", ")", "[", "0", "]", "for", "r", "in", "reactions", "[", "i", "]", "[", "'reactants'", "]", "+", "reactions", "[", "i", "]", "[", "'products'", "]", "]", "states", "=", "[", "get_state", "(", "r", ")", "for", "r", "in", "rnames", "]", "gas_names", "+=", "[", "clear_state", "(", "clear_prefactor", "(", "rnames", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "states", ")", ")", "if", "states", "[", "i", "]", "==", "'gas'", "]", "for", "gas_base", "in", "gas_bases", ":", "for", "name", "in", "set", "(", "gas_names", ")", ":", "with", "open", "(", "gas_base", "+", "'MISSING:{}_gas'", ".", "format", "(", "name", ")", ",", "'w'", ")", ":", "pass", "for", "bulk_base", "in", "bulk_bases", ":", "for", "bulk", "in", "bulk_compositions", ":", "for", "crystal_structure", "in", "crystal_structures", ":", "bulk_name", "=", "bulk", "+", "'_'", "+", "crystal_structure", "facet_base", "=", "create", "(", "bulk_base", "+", "bulk_name", "+", "'/'", ")", "with", "open", "(", "facet_base", "+", "'MISSING:{}_bulk'", ".", "format", "(", "bulk_name", ")", ",", "'w'", ")", ":", "pass", "for", "facet", "in", "facets", ":", "reaction_base", "=", "create", "(", "facet_base", "+", "facet", "+", "'/'", ")", "with", "open", "(", "reaction_base", "+", "'MISSING:empty_slab'", ".", "format", "(", "bulk_name", ")", ",", "'w'", ")", ":", "pass", "for", "i", "in", "range", "(", "len", "(", "reactions", ")", ")", ":", "rname", "=", "'_'", ".", "join", "(", "reactions", "[", "i", "]", "[", "'reactants'", "]", ")", "pname", "=", "'_'", ".", "join", "(", "reactions", "[", "i", "]", "[", "'products'", "]", ")", "reaction_name", "=", "'__'", ".", "join", "(", "[", "rname", ",", "pname", "]", ")", "base", "=", "create", "(", "reaction_base", "+", "reaction_name", "+", "'/'", ")", "rnames", "=", "[", "r", ".", "split", "(", "'@'", ")", "[", "0", "]", "for", "r", "in", "reactions", "[", "i", "]", "[", "'reactants'", "]", "+", "reactions", "[", "i", "]", "[", "'products'", "]", "]", "states", "=", "[", "get_state", "(", "r", ")", "for", "r", "in", "rnames", "]", "ads_names", "=", "[", "clear_prefactor", "(", "clear_state", "(", "rnames", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "states", ")", ")", "if", "states", "[", "i", "]", "==", "'star'", "]", "for", "ads", "in", "ads_names", ":", "if", "ads", "==", "''", ":", "continue", "with", "open", "(", "base", "+", "'MISSING:{}_slab'", ".", "format", "(", "ads", ")", ",", "'w'", ")", ":", "pass", "with", "open", "(", "base", "+", "'MISSING:TS?'", ".", "format", "(", "ads", ")", ",", "'w'", ")", ":", "pass", "print", "(", "'Folders were succesfully created under {}'", ".", "format", "(", "publication_base", ")", ")" ]
Automatically generate an organized folder structure for a DFT calculation. Start by copying the script to a folder in your username and assign the right information to the arguments in the function. You can change the parameters and run the script several times if you, for example, are using different functionals or are doing different reactions on different surfaces. Remember to include the reaction that gives the adsorption energy of reaction intermediates, taking gas phase molecules as references (preferably H20, H2, CH4, CO, NH3). Parameters ---------- title : str Publication or working title if not yet published. authors : list Author names, e.g. ['Doe, John', 'Einstein, Albert'] year : str Year of (submission?) email : str email address of the person responsible for uploading. Login at catalysis-hub.org currently only supports @gmail or Slack login email addresses. journal : str Publications journal name volume : str Publications volume number number : str Publication number pages : str Publication page numbers publisher : str Publisher name doi : str, optional DOI of publication tags : list, optional User defined quire tags DFT_code : str e.g. 'Quantum ESPRESSO' DFT_functionals : list of str Calculator functional used, e.g. 'BEEF-vdW' reactions : list of dict A new dictionary is required for each reaction, and should include two lists, 'reactants' and 'products'. Remember to include a minus sign and prefactor in the name when relevant. If your reaction is not balanced, you will receive an error when running the script. Include the phase if mixing gas phase and surface phase. e.g. 'star' for empty site or adsorbed phase, 'gas' if in gas phase. Include the adsorption site if relevant. e.g. star@top or star@bridge. For example, we can write an entry for the adsorption of CH2: CH4(g) - H2(g) + * -> CH2* as: {'reactants': ['CH4gas', 'H2gas', 'star'], 'products': ['CH2star@bridge']} A complete entry could read: reactions = [ {'reactants': ['CH4gas', '-H2gas', 'star'], 'products': ['CH2star@bridge']}, {'reactants': ['CH4gas', '-0.5H2gas', 'star'], 'products': ['CH3star@top']}] energy_corrections : dict, optional e.g. {'H2gas': 0.1} bulk_compositions : list of str e.g. ['Pt', 'Ag'] crystal_structures : list of str e.g. ['fcc', 'hcp'] facets : list For complicated structures use term you would use in publication. e.g. ['111'] custom_base : str TODO
[ "Automatically", "generate", "an", "organized", "folder", "structure", "for", "a", "DFT", "calculation", "." ]
python
train
35.212264
The-Politico/politico-civic-demography
demography/management/commands/bootstrap/fetch/__init__.py
https://github.com/The-Politico/politico-civic-demography/blob/080bb964b64b06db7fd04386530e893ceed1cf98/demography/management/commands/bootstrap/fetch/__init__.py#L20-L55
def fetch_state_data(self, states): """ Fetch census estimates from table. """ print("Fetching census data") for table in CensusTable.objects.all(): api = self.get_series(table.series) for variable in table.variables.all(): estimate = "{}_{}".format(table.code, variable.code) print( ">> Fetching {} {} {}".format( table.year, table.series, estimate ) ) for state in tqdm(states): self.get_state_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_county_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_district_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, )
[ "def", "fetch_state_data", "(", "self", ",", "states", ")", ":", "print", "(", "\"Fetching census data\"", ")", "for", "table", "in", "CensusTable", ".", "objects", ".", "all", "(", ")", ":", "api", "=", "self", ".", "get_series", "(", "table", ".", "series", ")", "for", "variable", "in", "table", ".", "variables", ".", "all", "(", ")", ":", "estimate", "=", "\"{}_{}\"", ".", "format", "(", "table", ".", "code", ",", "variable", ".", "code", ")", "print", "(", "\">> Fetching {} {} {}\"", ".", "format", "(", "table", ".", "year", ",", "table", ".", "series", ",", "estimate", ")", ")", "for", "state", "in", "tqdm", "(", "states", ")", ":", "self", ".", "get_state_estimates_by_state", "(", "api", "=", "api", ",", "table", "=", "table", ",", "variable", "=", "variable", ",", "estimate", "=", "estimate", ",", "state", "=", "state", ",", ")", "self", ".", "get_county_estimates_by_state", "(", "api", "=", "api", ",", "table", "=", "table", ",", "variable", "=", "variable", ",", "estimate", "=", "estimate", ",", "state", "=", "state", ",", ")", "self", ".", "get_district_estimates_by_state", "(", "api", "=", "api", ",", "table", "=", "table", ",", "variable", "=", "variable", ",", "estimate", "=", "estimate", ",", "state", "=", "state", ",", ")" ]
Fetch census estimates from table.
[ "Fetch", "census", "estimates", "from", "table", "." ]
python
train
37.527778
palantir/typedjsonrpc
typedjsonrpc/server.py
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L114-L127
def run(self, host, port, **options): """For debugging purposes, you can run this as a standalone server. .. WARNING:: **Security vulnerability** This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use this in production, you should run :class:`Server` as a standard WSGI app with `uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server. .. versionadded:: 0.1.0 """ self.registry.debug = True debugged = DebuggedJsonRpcApplication(self, evalex=True) run_simple(host, port, debugged, use_reloader=True, **options)
[ "def", "run", "(", "self", ",", "host", ",", "port", ",", "*", "*", "options", ")", ":", "self", ".", "registry", ".", "debug", "=", "True", "debugged", "=", "DebuggedJsonRpcApplication", "(", "self", ",", "evalex", "=", "True", ")", "run_simple", "(", "host", ",", "port", ",", "debugged", ",", "use_reloader", "=", "True", ",", "*", "*", "options", ")" ]
For debugging purposes, you can run this as a standalone server. .. WARNING:: **Security vulnerability** This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use this in production, you should run :class:`Server` as a standard WSGI app with `uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server. .. versionadded:: 0.1.0
[ "For", "debugging", "purposes", "you", "can", "run", "this", "as", "a", "standalone", "server", "." ]
python
train
46.857143
ruipgil/changepy
changepy/costs.py
https://github.com/ruipgil/changepy/blob/95a903a24d532d658d4f1775d298c7fd51cdf47c/changepy/costs.py#L3-L41
def normal_mean(data, variance): """ Creates a segment cost function for a time series with a Normal distribution with changing mean Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment """ if not isinstance(data, np.ndarray): data = np.array(data) i_variance_2 = 1 / (variance ** 2) cmm = [0.0] cmm.extend(np.cumsum(data)) cmm2 = [0.0] cmm2.extend(np.cumsum(np.abs(data))) def cost(start, end): """ Cost function for normal distribution with variable mean Args: start (int): start index end (int): end index Returns: float: Cost, from start to end """ cmm2_diff = cmm2[end] - cmm2[start] cmm_diff = pow(cmm[end] - cmm[start], 2) i_diff = end - start diff = cmm2_diff - cmm_diff return (diff/i_diff) * i_variance_2 return cost
[ "def", "normal_mean", "(", "data", ",", "variance", ")", ":", "if", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "np", ".", "array", "(", "data", ")", "i_variance_2", "=", "1", "/", "(", "variance", "**", "2", ")", "cmm", "=", "[", "0.0", "]", "cmm", ".", "extend", "(", "np", ".", "cumsum", "(", "data", ")", ")", "cmm2", "=", "[", "0.0", "]", "cmm2", ".", "extend", "(", "np", ".", "cumsum", "(", "np", ".", "abs", "(", "data", ")", ")", ")", "def", "cost", "(", "start", ",", "end", ")", ":", "\"\"\" Cost function for normal distribution with variable mean\n\n Args:\n start (int): start index\n end (int): end index\n Returns:\n float: Cost, from start to end\n \"\"\"", "cmm2_diff", "=", "cmm2", "[", "end", "]", "-", "cmm2", "[", "start", "]", "cmm_diff", "=", "pow", "(", "cmm", "[", "end", "]", "-", "cmm", "[", "start", "]", ",", "2", ")", "i_diff", "=", "end", "-", "start", "diff", "=", "cmm2_diff", "-", "cmm_diff", "return", "(", "diff", "/", "i_diff", ")", "*", "i_variance_2", "return", "cost" ]
Creates a segment cost function for a time series with a Normal distribution with changing mean Args: data (:obj:`list` of float): 1D time series data variance (float): variance Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
[ "Creates", "a", "segment", "cost", "function", "for", "a", "time", "series", "with", "a", "Normal", "distribution", "with", "changing", "mean" ]
python
train
28.871795
DistrictDataLabs/yellowbrick
yellowbrick/cluster/silhouette.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/cluster/silhouette.py#L184-L217
def finalize(self): """ Prepare the figure for rendering by setting the title and adjusting the limits on the axes, adding labels and a legend. """ # Set the title self.set_title(( "Silhouette Plot of {} Clustering for {} Samples in {} Centers" ).format( self.name, self.n_samples_, self.n_clusters_ )) # Set the X and Y limits # The silhouette coefficient can range from -1, 1; # but here we scale the plot according to our visualizations # l_xlim and u_xlim are lower and upper limits of the x-axis, # set according to our calculated maximum and minimum silhouette score along with necessary padding l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1))) u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1)) self.ax.set_xlim([l_xlim, u_xlim]) # The (n_clusters_+1)*10 is for inserting blank space between # silhouette plots of individual clusters, to demarcate them clearly. self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10]) # Set the x and y labels self.ax.set_xlabel("silhouette coefficient values") self.ax.set_ylabel("cluster label") # Set the ticks on the axis object. self.ax.set_yticks([]) # Clear the yaxis labels / ticks self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1))
[ "def", "finalize", "(", "self", ")", ":", "# Set the title", "self", ".", "set_title", "(", "(", "\"Silhouette Plot of {} Clustering for {} Samples in {} Centers\"", ")", ".", "format", "(", "self", ".", "name", ",", "self", ".", "n_samples_", ",", "self", ".", "n_clusters_", ")", ")", "# Set the X and Y limits", "# The silhouette coefficient can range from -1, 1;", "# but here we scale the plot according to our visualizations", "# l_xlim and u_xlim are lower and upper limits of the x-axis,", "# set according to our calculated maximum and minimum silhouette score along with necessary padding", "l_xlim", "=", "max", "(", "-", "1", ",", "min", "(", "-", "0.1", ",", "round", "(", "min", "(", "self", ".", "silhouette_samples_", ")", "-", "0.1", ",", "1", ")", ")", ")", "u_xlim", "=", "min", "(", "1", ",", "round", "(", "max", "(", "self", ".", "silhouette_samples_", ")", "+", "0.1", ",", "1", ")", ")", "self", ".", "ax", ".", "set_xlim", "(", "[", "l_xlim", ",", "u_xlim", "]", ")", "# The (n_clusters_+1)*10 is for inserting blank space between", "# silhouette plots of individual clusters, to demarcate them clearly.", "self", ".", "ax", ".", "set_ylim", "(", "[", "0", ",", "self", ".", "n_samples_", "+", "(", "self", ".", "n_clusters_", "+", "1", ")", "*", "10", "]", ")", "# Set the x and y labels", "self", ".", "ax", ".", "set_xlabel", "(", "\"silhouette coefficient values\"", ")", "self", ".", "ax", ".", "set_ylabel", "(", "\"cluster label\"", ")", "# Set the ticks on the axis object.", "self", ".", "ax", ".", "set_yticks", "(", "[", "]", ")", "# Clear the yaxis labels / ticks", "self", ".", "ax", ".", "xaxis", ".", "set_major_locator", "(", "ticker", ".", "MultipleLocator", "(", "0.1", ")", ")" ]
Prepare the figure for rendering by setting the title and adjusting the limits on the axes, adding labels and a legend.
[ "Prepare", "the", "figure", "for", "rendering", "by", "setting", "the", "title", "and", "adjusting", "the", "limits", "on", "the", "axes", "adding", "labels", "and", "a", "legend", "." ]
python
train
42.323529
zeaphoo/budoc
budoc/pydoc.py
https://github.com/zeaphoo/budoc/blob/28f3aea4ad72ac90605ced012ed20e61af90c23a/budoc/pydoc.py#L663-L674
def methods(self): """ Returns all documented methods as `pydoc.Function` objects in the class, sorted alphabetically with `__init__` always coming first. Unfortunately, this also includes class methods. """ p = lambda o: (isinstance(o, Function) and o.method and self.module._docfilter(o)) return filter(p, self.doc.values())
[ "def", "methods", "(", "self", ")", ":", "p", "=", "lambda", "o", ":", "(", "isinstance", "(", "o", ",", "Function", ")", "and", "o", ".", "method", "and", "self", ".", "module", ".", "_docfilter", "(", "o", ")", ")", "return", "filter", "(", "p", ",", "self", ".", "doc", ".", "values", "(", ")", ")" ]
Returns all documented methods as `pydoc.Function` objects in the class, sorted alphabetically with `__init__` always coming first. Unfortunately, this also includes class methods.
[ "Returns", "all", "documented", "methods", "as", "pydoc", ".", "Function", "objects", "in", "the", "class", "sorted", "alphabetically", "with", "__init__", "always", "coming", "first", "." ]
python
train
35.5
openego/eDisGo
edisgo/tools/tools.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/tools.py#L71-L135
def assign_load_feedin_case(network): """ For each time step evaluate whether it is a feed-in or a load case. Feed-in and load case are identified based on the generation and load time series and defined as follows: 1. Load case: positive (load - generation) at HV/MV substation 2. Feed-in case: negative (load - generation) at HV/MV substation Output of this function is written to `timesteps_load_feedin_case` attribute of the network.timeseries (see :class:`~.grid.network.TimeSeries`). Parameters ---------- network : :class:`~.grid.network.Network` Network for which worst-case snapshots are identified. Returns -------- :pandas:`pandas.DataFrame<dataframe>` Dataframe with information on whether time step is handled as load case ('load_case') or feed-in case ('feedin_case') for each time step in `timeindex` attribute of network.timeseries. Index of the dataframe is network.timeseries.timeindex. Columns of the dataframe are 'residual_load' with (load - generation) in kW at HV/MV substation and 'case' with 'load_case' for positive residual load and 'feedin_case' for negative residual load. """ if network.pypsa is not None: residual_load = get_residual_load_from_pypsa_network(network.pypsa) * \ 1e3 else: grids = [network.mv_grid] + list(network.mv_grid.lv_grids) gens = [] loads = [] for grid in grids: gens.extend(grid.generators) gens.extend(list(grid.graph.nodes_by_attribute('storage'))) loads.extend(list(grid.graph.nodes_by_attribute('load'))) generation_timeseries = pd.Series( 0, index=network.timeseries.timeindex) for gen in gens: generation_timeseries += gen.timeseries.p load_timeseries = pd.Series(0, index=network.timeseries.timeindex) for load in loads: load_timeseries += load.timeseries.p residual_load = load_timeseries - generation_timeseries timeseries_load_feedin_case = residual_load.rename( 'residual_load').to_frame() timeseries_load_feedin_case['case'] = \ timeseries_load_feedin_case.residual_load.apply( lambda _: 'feedin_case' if _ < 0 else 'load_case') return timeseries_load_feedin_case
[ "def", "assign_load_feedin_case", "(", "network", ")", ":", "if", "network", ".", "pypsa", "is", "not", "None", ":", "residual_load", "=", "get_residual_load_from_pypsa_network", "(", "network", ".", "pypsa", ")", "*", "1e3", "else", ":", "grids", "=", "[", "network", ".", "mv_grid", "]", "+", "list", "(", "network", ".", "mv_grid", ".", "lv_grids", ")", "gens", "=", "[", "]", "loads", "=", "[", "]", "for", "grid", "in", "grids", ":", "gens", ".", "extend", "(", "grid", ".", "generators", ")", "gens", ".", "extend", "(", "list", "(", "grid", ".", "graph", ".", "nodes_by_attribute", "(", "'storage'", ")", ")", ")", "loads", ".", "extend", "(", "list", "(", "grid", ".", "graph", ".", "nodes_by_attribute", "(", "'load'", ")", ")", ")", "generation_timeseries", "=", "pd", ".", "Series", "(", "0", ",", "index", "=", "network", ".", "timeseries", ".", "timeindex", ")", "for", "gen", "in", "gens", ":", "generation_timeseries", "+=", "gen", ".", "timeseries", ".", "p", "load_timeseries", "=", "pd", ".", "Series", "(", "0", ",", "index", "=", "network", ".", "timeseries", ".", "timeindex", ")", "for", "load", "in", "loads", ":", "load_timeseries", "+=", "load", ".", "timeseries", ".", "p", "residual_load", "=", "load_timeseries", "-", "generation_timeseries", "timeseries_load_feedin_case", "=", "residual_load", ".", "rename", "(", "'residual_load'", ")", ".", "to_frame", "(", ")", "timeseries_load_feedin_case", "[", "'case'", "]", "=", "timeseries_load_feedin_case", ".", "residual_load", ".", "apply", "(", "lambda", "_", ":", "'feedin_case'", "if", "_", "<", "0", "else", "'load_case'", ")", "return", "timeseries_load_feedin_case" ]
For each time step evaluate whether it is a feed-in or a load case. Feed-in and load case are identified based on the generation and load time series and defined as follows: 1. Load case: positive (load - generation) at HV/MV substation 2. Feed-in case: negative (load - generation) at HV/MV substation Output of this function is written to `timesteps_load_feedin_case` attribute of the network.timeseries (see :class:`~.grid.network.TimeSeries`). Parameters ---------- network : :class:`~.grid.network.Network` Network for which worst-case snapshots are identified. Returns -------- :pandas:`pandas.DataFrame<dataframe>` Dataframe with information on whether time step is handled as load case ('load_case') or feed-in case ('feedin_case') for each time step in `timeindex` attribute of network.timeseries. Index of the dataframe is network.timeseries.timeindex. Columns of the dataframe are 'residual_load' with (load - generation) in kW at HV/MV substation and 'case' with 'load_case' for positive residual load and 'feedin_case' for negative residual load.
[ "For", "each", "time", "step", "evaluate", "whether", "it", "is", "a", "feed", "-", "in", "or", "a", "load", "case", "." ]
python
train
35.892308
inspirehep/harvesting-kit
harvestingkit/utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/utils.py#L343-L361
def punctuate_authorname(an): """Punctuate author names properly. Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'. """ name = an.strip() parts = [x for x in name.split(',') if x != ''] ret_str = '' for idx, part in enumerate(parts): subparts = part.strip().split(' ') for sidx, substr in enumerate(subparts): ret_str += substr if len(substr) == 1: ret_str += '.' if sidx < (len(subparts) - 1): ret_str += ' ' if idx < (len(parts) - 1): ret_str += ', ' return ret_str.strip()
[ "def", "punctuate_authorname", "(", "an", ")", ":", "name", "=", "an", ".", "strip", "(", ")", "parts", "=", "[", "x", "for", "x", "in", "name", ".", "split", "(", "','", ")", "if", "x", "!=", "''", "]", "ret_str", "=", "''", "for", "idx", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "subparts", "=", "part", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "for", "sidx", ",", "substr", "in", "enumerate", "(", "subparts", ")", ":", "ret_str", "+=", "substr", "if", "len", "(", "substr", ")", "==", "1", ":", "ret_str", "+=", "'.'", "if", "sidx", "<", "(", "len", "(", "subparts", ")", "-", "1", ")", ":", "ret_str", "+=", "' '", "if", "idx", "<", "(", "len", "(", "parts", ")", "-", "1", ")", ":", "ret_str", "+=", "', '", "return", "ret_str", ".", "strip", "(", ")" ]
Punctuate author names properly. Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.
[ "Punctuate", "author", "names", "properly", "." ]
python
valid
32.473684
akfullfo/taskforce
taskforce/task.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/task.py#L151-L369
def _exec_process(cmd_list, base_context, instance=0, log=None): """ Process execution tool. The forks and execs a process with args formatted according to a context. This is implemented as a module function to make it available to event_targets, legion and tasks. The args are: cmd_list - The path and arg vector context - Task's context instance - An integer instance number used with multi-process tasks log - Logging object (default is nothing logged). The context is used to format command args. In addition, these values will be used to change the process execution environment: procname - Changes the process name of the executed command (but not the path executed). user - Does a setuid for the process group - Does a setgid for the process cwd - Does a chdir before executing The passed context is extended to include these specific runtime values which are only available for cmd_list substitution. context_prefix+'pid' - The process ID of the child process context_prefix+'instance' - The instance number (0 if not provided) context_prefix+'uid' - The numeric uid (based on 'user' if set, getuid() otherwise) context_prefix+'gid' - The numeric gid (based on 'group' if set, getgid() otherwise) """ if not log: # pragma: no cover log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) # Get a copy of the context so changes here will not affect the # task's base context. # context = base_context.copy() # Make sure we have a normalized clone of the cmd_list # cmd_list = list(cmd_list) name = context.get(context_prefix+'name', cmd_list[0]) log.debug("Starting %s instance %d", name, instance) procname = _fmt_context(context.get(context_prefix+'procname'), context) user = _fmt_context(context.get(context_prefix+'user'), context) group = _fmt_context(context.get(context_prefix+'group'), context) cwd = _fmt_context(context.get(context_prefix+'cwd'), context) # Do the user setup early so we can throw an Exception on failure. # Identity errors are considered fatal as we do not want to run # a process at a higher priv if it was explicitly set to something # else. # proc_uid = os.geteuid() proc_gid = os.getegid() do_setuid = (proc_uid != os.getuid()) do_setgid = (proc_gid != os.getgid()) if user is not None: pw = None try: uid = int(user) try: pw = pwd.getpwuid(uid) except: pass # pragma: no cover except: pass if pw is None: try: pw = pwd.getpwnam(user) except Exception as e: raise TaskError(name, "Bad user %r -- %s" % (user, e)) if proc_uid != pw.pw_uid: proc_uid = pw.pw_uid do_setuid = True if proc_gid != pw.pw_gid: proc_gid = pw.pw_gid do_setgid = True if group is not None: gr = None try: gid = int(group) try: gr = grp.getgrgid(gid) except: pass except: pass if gr is None: try: gr = grp.getgrnam(group) except Exception as e: raise TaskError(name, "Bad group %r -- %s" % (group, e)) if proc_uid is not None and proc_gid != gr.gr_gid: log.info("gid for user %r (%d) overridden by group %r (%d)", user, proc_gid, group, gr.gr_gid) proc_gid = gr.gr_gid do_setgid = True if cwd is not None and not os.path.isdir(cwd): raise TaskError(name, "Directory for cwd setting '%s' does not exist" % (cwd,)) # Add in per-process context # context[context_prefix+'instance'] = instance context[context_prefix+'started'] = time.time() context[context_prefix+'uid'] = proc_uid context[context_prefix+'gid'] = proc_gid pid = os.fork() # Parent just returns pid if pid > 0: return pid # This section is processing the child. Exceptions from this point must # never escape to outside handlers or we might create zombie init tasks. # try: # Add the pid to the context now that we have it. # context[context_prefix+'pid'] = os.getpid() # Set up the requested process environment # if do_setgid: try: os.setgid(proc_gid) log.debug("Setgid to %d succeeded in child '%s', instance %d", proc_gid, name, instance) except Exception as e: log.error("Setgid to %d failed in child %r, instance %d -- %s", proc_gid, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG)) os._exit(81) if do_setuid: try: os.setuid(proc_uid) log.debug("Setuid to %d succeeded in child '%s', instance %d", proc_uid, name, instance) except Exception as e: log.error("Setuid to %d failed in child %r, instance %d -- %s", proc_uid, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG)) os._exit(82) if cwd is not None: try: os.chdir(cwd) log.debug("Chdir to '%s' succeeded in child '%s', instance %d", cwd, name, instance) except Exception as e: log.error("Chdir to '%s' failed in child %r, instance %d -- %s", cwd, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG)) os._exit(83) # Build formatted command # prog = _fmt_context(cmd_list[0], context) cmd = [] if procname: cmd_list.pop(0) cmd.append(_fmt_context(context['procname'], context)) for a in cmd_list: cmd.append(_fmt_context(a, context)) log.info("child, Execing: %s <%s>", prog, utils.format_cmd(cmd)) except Exception as e: # Log any exceptions here while we still can. After the closeall, # bets are off. # log.error("Child processing failed for task %r, instance %d -- %s", name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG)) os._exit(84) try: retain_fds = [0,1,2] for log_fd in utils.log_filenos(log): if log_fd not in retain_fds: retain_fds.append(log_fd) utils.closeall(exclude=retain_fds) fd = None try: os.close(0) except: pass try: fd = os.open(std_process_dest, os.O_RDONLY) except Exception as e: log.error("child read open of %s failed -- %s", std_process_dest, e) if fd != 0: log.error("child failed to redirect stdin to %s", std_process_dest) try: os.close(1) except: pass try: fd = os.open('/dev/null', os.O_WRONLY) except Exception as e: log.error("child write open of %s failed -- %s", std_process_dest, e) if fd != 1: log.error("child failed to redirect stdout to %s", std_process_dest) # Build a fresh environment based on context, with None values excluded and # all other values as strings, formatted where appropriate: # env = {} for tag, val in context.items(): if val is None: continue val = _fmt_context(str(val), context) if val is not None: env[tag] = val except Exception as e: # At this point we can still send logs to stderr, so log these # too, just in case. # log.error("Child processing failed for task %r, instance %d -- %s", name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG)) os._exit(85) try: try: os.close(2) except: pass try: os.dup(1) except: pass os.execvpe(prog, cmd, env) except: pass # There is no way to report an exception here, so hopefully the exit code will # be evidence enough. When child output logging is supported, this can be reworked. # os._exit(86)
[ "def", "_exec_process", "(", "cmd_list", ",", "base_context", ",", "instance", "=", "0", ",", "log", "=", "None", ")", ":", "if", "not", "log", ":", "# pragma: no cover", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "log", ".", "addHandler", "(", "logging", ".", "NullHandler", "(", ")", ")", "# Get a copy of the context so changes here will not affect the", "# task's base context.", "#", "context", "=", "base_context", ".", "copy", "(", ")", "# Make sure we have a normalized clone of the cmd_list", "#", "cmd_list", "=", "list", "(", "cmd_list", ")", "name", "=", "context", ".", "get", "(", "context_prefix", "+", "'name'", ",", "cmd_list", "[", "0", "]", ")", "log", ".", "debug", "(", "\"Starting %s instance %d\"", ",", "name", ",", "instance", ")", "procname", "=", "_fmt_context", "(", "context", ".", "get", "(", "context_prefix", "+", "'procname'", ")", ",", "context", ")", "user", "=", "_fmt_context", "(", "context", ".", "get", "(", "context_prefix", "+", "'user'", ")", ",", "context", ")", "group", "=", "_fmt_context", "(", "context", ".", "get", "(", "context_prefix", "+", "'group'", ")", ",", "context", ")", "cwd", "=", "_fmt_context", "(", "context", ".", "get", "(", "context_prefix", "+", "'cwd'", ")", ",", "context", ")", "# Do the user setup early so we can throw an Exception on failure.", "# Identity errors are considered fatal as we do not want to run", "# a process at a higher priv if it was explicitly set to something", "# else.", "#", "proc_uid", "=", "os", ".", "geteuid", "(", ")", "proc_gid", "=", "os", ".", "getegid", "(", ")", "do_setuid", "=", "(", "proc_uid", "!=", "os", ".", "getuid", "(", ")", ")", "do_setgid", "=", "(", "proc_gid", "!=", "os", ".", "getgid", "(", ")", ")", "if", "user", "is", "not", "None", ":", "pw", "=", "None", "try", ":", "uid", "=", "int", "(", "user", ")", "try", ":", "pw", "=", "pwd", ".", "getpwuid", "(", "uid", ")", "except", ":", "pass", "# pragma: no cover", "except", ":", "pass", "if", "pw", "is", "None", ":", "try", ":", "pw", "=", "pwd", ".", "getpwnam", "(", "user", ")", "except", "Exception", "as", "e", ":", "raise", "TaskError", "(", "name", ",", "\"Bad user %r -- %s\"", "%", "(", "user", ",", "e", ")", ")", "if", "proc_uid", "!=", "pw", ".", "pw_uid", ":", "proc_uid", "=", "pw", ".", "pw_uid", "do_setuid", "=", "True", "if", "proc_gid", "!=", "pw", ".", "pw_gid", ":", "proc_gid", "=", "pw", ".", "pw_gid", "do_setgid", "=", "True", "if", "group", "is", "not", "None", ":", "gr", "=", "None", "try", ":", "gid", "=", "int", "(", "group", ")", "try", ":", "gr", "=", "grp", ".", "getgrgid", "(", "gid", ")", "except", ":", "pass", "except", ":", "pass", "if", "gr", "is", "None", ":", "try", ":", "gr", "=", "grp", ".", "getgrnam", "(", "group", ")", "except", "Exception", "as", "e", ":", "raise", "TaskError", "(", "name", ",", "\"Bad group %r -- %s\"", "%", "(", "group", ",", "e", ")", ")", "if", "proc_uid", "is", "not", "None", "and", "proc_gid", "!=", "gr", ".", "gr_gid", ":", "log", ".", "info", "(", "\"gid for user %r (%d) overridden by group %r (%d)\"", ",", "user", ",", "proc_gid", ",", "group", ",", "gr", ".", "gr_gid", ")", "proc_gid", "=", "gr", ".", "gr_gid", "do_setgid", "=", "True", "if", "cwd", "is", "not", "None", "and", "not", "os", ".", "path", ".", "isdir", "(", "cwd", ")", ":", "raise", "TaskError", "(", "name", ",", "\"Directory for cwd setting '%s' does not exist\"", "%", "(", "cwd", ",", ")", ")", "# Add in per-process context", "#", "context", "[", "context_prefix", "+", "'instance'", "]", "=", "instance", "context", "[", "context_prefix", "+", "'started'", "]", "=", "time", ".", "time", "(", ")", "context", "[", "context_prefix", "+", "'uid'", "]", "=", "proc_uid", "context", "[", "context_prefix", "+", "'gid'", "]", "=", "proc_gid", "pid", "=", "os", ".", "fork", "(", ")", "# Parent just returns pid", "if", "pid", ">", "0", ":", "return", "pid", "# This section is processing the child. Exceptions from this point must", "# never escape to outside handlers or we might create zombie init tasks.", "#", "try", ":", "# Add the pid to the context now that we have it.", "#", "context", "[", "context_prefix", "+", "'pid'", "]", "=", "os", ".", "getpid", "(", ")", "# Set up the requested process environment", "#", "if", "do_setgid", ":", "try", ":", "os", ".", "setgid", "(", "proc_gid", ")", "log", ".", "debug", "(", "\"Setgid to %d succeeded in child '%s', instance %d\"", ",", "proc_gid", ",", "name", ",", "instance", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"Setgid to %d failed in child %r, instance %d -- %s\"", ",", "proc_gid", ",", "name", ",", "instance", ",", "e", ",", "exc_info", "=", "log", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")", "os", ".", "_exit", "(", "81", ")", "if", "do_setuid", ":", "try", ":", "os", ".", "setuid", "(", "proc_uid", ")", "log", ".", "debug", "(", "\"Setuid to %d succeeded in child '%s', instance %d\"", ",", "proc_uid", ",", "name", ",", "instance", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"Setuid to %d failed in child %r, instance %d -- %s\"", ",", "proc_uid", ",", "name", ",", "instance", ",", "e", ",", "exc_info", "=", "log", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")", "os", ".", "_exit", "(", "82", ")", "if", "cwd", "is", "not", "None", ":", "try", ":", "os", ".", "chdir", "(", "cwd", ")", "log", ".", "debug", "(", "\"Chdir to '%s' succeeded in child '%s', instance %d\"", ",", "cwd", ",", "name", ",", "instance", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"Chdir to '%s' failed in child %r, instance %d -- %s\"", ",", "cwd", ",", "name", ",", "instance", ",", "e", ",", "exc_info", "=", "log", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")", "os", ".", "_exit", "(", "83", ")", "# Build formatted command", "#", "prog", "=", "_fmt_context", "(", "cmd_list", "[", "0", "]", ",", "context", ")", "cmd", "=", "[", "]", "if", "procname", ":", "cmd_list", ".", "pop", "(", "0", ")", "cmd", ".", "append", "(", "_fmt_context", "(", "context", "[", "'procname'", "]", ",", "context", ")", ")", "for", "a", "in", "cmd_list", ":", "cmd", ".", "append", "(", "_fmt_context", "(", "a", ",", "context", ")", ")", "log", ".", "info", "(", "\"child, Execing: %s <%s>\"", ",", "prog", ",", "utils", ".", "format_cmd", "(", "cmd", ")", ")", "except", "Exception", "as", "e", ":", "# Log any exceptions here while we still can. After the closeall,", "# bets are off.", "#", "log", ".", "error", "(", "\"Child processing failed for task %r, instance %d -- %s\"", ",", "name", ",", "instance", ",", "e", ",", "exc_info", "=", "log", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")", "os", ".", "_exit", "(", "84", ")", "try", ":", "retain_fds", "=", "[", "0", ",", "1", ",", "2", "]", "for", "log_fd", "in", "utils", ".", "log_filenos", "(", "log", ")", ":", "if", "log_fd", "not", "in", "retain_fds", ":", "retain_fds", ".", "append", "(", "log_fd", ")", "utils", ".", "closeall", "(", "exclude", "=", "retain_fds", ")", "fd", "=", "None", "try", ":", "os", ".", "close", "(", "0", ")", "except", ":", "pass", "try", ":", "fd", "=", "os", ".", "open", "(", "std_process_dest", ",", "os", ".", "O_RDONLY", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"child read open of %s failed -- %s\"", ",", "std_process_dest", ",", "e", ")", "if", "fd", "!=", "0", ":", "log", ".", "error", "(", "\"child failed to redirect stdin to %s\"", ",", "std_process_dest", ")", "try", ":", "os", ".", "close", "(", "1", ")", "except", ":", "pass", "try", ":", "fd", "=", "os", ".", "open", "(", "'/dev/null'", ",", "os", ".", "O_WRONLY", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"child write open of %s failed -- %s\"", ",", "std_process_dest", ",", "e", ")", "if", "fd", "!=", "1", ":", "log", ".", "error", "(", "\"child failed to redirect stdout to %s\"", ",", "std_process_dest", ")", "# Build a fresh environment based on context, with None values excluded and", "# all other values as strings, formatted where appropriate:", "#", "env", "=", "{", "}", "for", "tag", ",", "val", "in", "context", ".", "items", "(", ")", ":", "if", "val", "is", "None", ":", "continue", "val", "=", "_fmt_context", "(", "str", "(", "val", ")", ",", "context", ")", "if", "val", "is", "not", "None", ":", "env", "[", "tag", "]", "=", "val", "except", "Exception", "as", "e", ":", "# At this point we can still send logs to stderr, so log these", "# too, just in case.", "#", "log", ".", "error", "(", "\"Child processing failed for task %r, instance %d -- %s\"", ",", "name", ",", "instance", ",", "e", ",", "exc_info", "=", "log", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")", "os", ".", "_exit", "(", "85", ")", "try", ":", "try", ":", "os", ".", "close", "(", "2", ")", "except", ":", "pass", "try", ":", "os", ".", "dup", "(", "1", ")", "except", ":", "pass", "os", ".", "execvpe", "(", "prog", ",", "cmd", ",", "env", ")", "except", ":", "pass", "# There is no way to report an exception here, so hopefully the exit code will", "# be evidence enough. When child output logging is supported, this can be reworked.", "#", "os", ".", "_exit", "(", "86", ")" ]
Process execution tool. The forks and execs a process with args formatted according to a context. This is implemented as a module function to make it available to event_targets, legion and tasks. The args are: cmd_list - The path and arg vector context - Task's context instance - An integer instance number used with multi-process tasks log - Logging object (default is nothing logged). The context is used to format command args. In addition, these values will be used to change the process execution environment: procname - Changes the process name of the executed command (but not the path executed). user - Does a setuid for the process group - Does a setgid for the process cwd - Does a chdir before executing The passed context is extended to include these specific runtime values which are only available for cmd_list substitution. context_prefix+'pid' - The process ID of the child process context_prefix+'instance' - The instance number (0 if not provided) context_prefix+'uid' - The numeric uid (based on 'user' if set, getuid() otherwise) context_prefix+'gid' - The numeric gid (based on 'group' if set, getgid() otherwise)
[ "Process", "execution", "tool", "." ]
python
train
37.794521
pjmark/NIMPA
niftypet/nimpa/prc/imio.py
https://github.com/pjmark/NIMPA/blob/3f4231fed2934a1d92e4cd8e9e153b0118e29d86/niftypet/nimpa/prc/imio.py#L220-L233
def nii_gzip(imfile, outpath=''): '''Compress *.gz file''' import gzip with open(imfile, 'rb') as f: d = f.read() # Now store the compressed data if outpath=='': fout = imfile+'.gz' else: fout = os.path.join(outpath, os.path.basename(imfile)+'.gz') # store compressed file data from 'd' variable with gzip.open(fout, 'wb') as f: f.write(d) return fout
[ "def", "nii_gzip", "(", "imfile", ",", "outpath", "=", "''", ")", ":", "import", "gzip", "with", "open", "(", "imfile", ",", "'rb'", ")", "as", "f", ":", "d", "=", "f", ".", "read", "(", ")", "# Now store the compressed data", "if", "outpath", "==", "''", ":", "fout", "=", "imfile", "+", "'.gz'", "else", ":", "fout", "=", "os", ".", "path", ".", "join", "(", "outpath", ",", "os", ".", "path", ".", "basename", "(", "imfile", ")", "+", "'.gz'", ")", "# store compressed file data from 'd' variable", "with", "gzip", ".", "open", "(", "fout", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "d", ")", "return", "fout" ]
Compress *.gz file
[ "Compress", "*", ".", "gz", "file" ]
python
train
29
Dentosal/python-sc2
sc2/bot_ai.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/bot_ai.py#L366-L386
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]: """ Check if an upgrade is being researched Return values: 0: not started 0 < x < 1: researching 1: finished """ assert isinstance(upgrade_type, UpgradeId) if upgrade_type in self.state.upgrades: return 1 level = None if "LEVEL" in upgrade_type.name: level = upgrade_type.name[-1] creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id for structure in self.units.structure.ready: for order in structure.orders: if order.ability.id is creationAbilityID: if level and order.ability.button_name[-1] != level: return 0 return order.progress return 0
[ "def", "already_pending_upgrade", "(", "self", ",", "upgrade_type", ":", "UpgradeId", ")", "->", "Union", "[", "int", ",", "float", "]", ":", "assert", "isinstance", "(", "upgrade_type", ",", "UpgradeId", ")", "if", "upgrade_type", "in", "self", ".", "state", ".", "upgrades", ":", "return", "1", "level", "=", "None", "if", "\"LEVEL\"", "in", "upgrade_type", ".", "name", ":", "level", "=", "upgrade_type", ".", "name", "[", "-", "1", "]", "creationAbilityID", "=", "self", ".", "_game_data", ".", "upgrades", "[", "upgrade_type", ".", "value", "]", ".", "research_ability", ".", "id", "for", "structure", "in", "self", ".", "units", ".", "structure", ".", "ready", ":", "for", "order", "in", "structure", ".", "orders", ":", "if", "order", ".", "ability", ".", "id", "is", "creationAbilityID", ":", "if", "level", "and", "order", ".", "ability", ".", "button_name", "[", "-", "1", "]", "!=", "level", ":", "return", "0", "return", "order", ".", "progress", "return", "0" ]
Check if an upgrade is being researched Return values: 0: not started 0 < x < 1: researching 1: finished
[ "Check", "if", "an", "upgrade", "is", "being", "researched", "Return", "values", ":", "0", ":", "not", "started", "0", "<", "x", "<", "1", ":", "researching", "1", ":", "finished" ]
python
train
40.809524
DataBiosphere/toil
attic/toil-sort-example.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L24-L60
def down(job, input_file_id, n, down_checkpoints): """Input is a file and a range into that file to sort and an output location in which to write the sorted file. If the range is larger than a threshold N the range is divided recursively and a follow on job is then created which merges back the results. Otherwise, the file is sorted and placed in the output. """ # Read the file input_file = job.fileStore.readGlobalFile(input_file_id, cache=False) length = os.path.getsize(input_file) if length > n: # We will subdivide the file job.fileStore.logToMaster("Splitting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Split the file into two copies mid_point = get_midpoint(input_file, 0, length) t1 = job.fileStore.getLocalTempFile() with open(t1, 'w') as fH: copy_subrange_of_file(input_file, 0, mid_point + 1, fH) t2 = job.fileStore.getLocalTempFile() with open(t2, 'w') as fH: copy_subrange_of_file(input_file, mid_point + 1, length, fH) # Call the down function recursively return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n, down_checkpoints=down_checkpoints, memory='600M').rv(), job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n, down_checkpoints=down_checkpoints, memory='600M').rv()).rv() else: # We can sort this bit of the file job.fileStore.logToMaster("Sorting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Sort the copy and write back to the fileStore output_file = job.fileStore.getLocalTempFile() sort(input_file, output_file) return job.fileStore.writeGlobalFile(output_file)
[ "def", "down", "(", "job", ",", "input_file_id", ",", "n", ",", "down_checkpoints", ")", ":", "# Read the file", "input_file", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "input_file_id", ",", "cache", "=", "False", ")", "length", "=", "os", ".", "path", ".", "getsize", "(", "input_file", ")", "if", "length", ">", "n", ":", "# We will subdivide the file", "job", ".", "fileStore", ".", "logToMaster", "(", "\"Splitting file: %s of size: %s\"", "%", "(", "input_file_id", ",", "length", ")", ",", "level", "=", "logging", ".", "CRITICAL", ")", "# Split the file into two copies", "mid_point", "=", "get_midpoint", "(", "input_file", ",", "0", ",", "length", ")", "t1", "=", "job", ".", "fileStore", ".", "getLocalTempFile", "(", ")", "with", "open", "(", "t1", ",", "'w'", ")", "as", "fH", ":", "copy_subrange_of_file", "(", "input_file", ",", "0", ",", "mid_point", "+", "1", ",", "fH", ")", "t2", "=", "job", ".", "fileStore", ".", "getLocalTempFile", "(", ")", "with", "open", "(", "t2", ",", "'w'", ")", "as", "fH", ":", "copy_subrange_of_file", "(", "input_file", ",", "mid_point", "+", "1", ",", "length", ",", "fH", ")", "# Call the down function recursively", "return", "job", ".", "addFollowOnJobFn", "(", "up", ",", "job", ".", "addChildJobFn", "(", "down", ",", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "t1", ")", ",", "n", ",", "down_checkpoints", "=", "down_checkpoints", ",", "memory", "=", "'600M'", ")", ".", "rv", "(", ")", ",", "job", ".", "addChildJobFn", "(", "down", ",", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "t2", ")", ",", "n", ",", "down_checkpoints", "=", "down_checkpoints", ",", "memory", "=", "'600M'", ")", ".", "rv", "(", ")", ")", ".", "rv", "(", ")", "else", ":", "# We can sort this bit of the file", "job", ".", "fileStore", ".", "logToMaster", "(", "\"Sorting file: %s of size: %s\"", "%", "(", "input_file_id", ",", "length", ")", ",", "level", "=", "logging", ".", "CRITICAL", ")", "# Sort the copy and write back to the fileStore", "output_file", "=", "job", ".", "fileStore", ".", "getLocalTempFile", "(", ")", "sort", "(", "input_file", ",", "output_file", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output_file", ")" ]
Input is a file and a range into that file to sort and an output location in which to write the sorted file. If the range is larger than a threshold N the range is divided recursively and a follow on job is then created which merges back the results. Otherwise, the file is sorted and placed in the output.
[ "Input", "is", "a", "file", "and", "a", "range", "into", "that", "file", "to", "sort", "and", "an", "output", "location", "in", "which", "to", "write", "the", "sorted", "file", ".", "If", "the", "range", "is", "larger", "than", "a", "threshold", "N", "the", "range", "is", "divided", "recursively", "and", "a", "follow", "on", "job", "is", "then", "created", "which", "merges", "back", "the", "results", ".", "Otherwise", "the", "file", "is", "sorted", "and", "placed", "in", "the", "output", "." ]
python
train
54.324324
uuazed/numerapi
numerapi/cli.py
https://github.com/uuazed/numerapi/blob/fc9dcc53b32ede95bfda1ceeb62aec1d67d26697/numerapi/cli.py#L131-L133
def check_new_round(hours=24, tournament=1): """Check if a new round has started within the last `hours`.""" click.echo(int(napi.check_new_round(hours=hours, tournament=tournament)))
[ "def", "check_new_round", "(", "hours", "=", "24", ",", "tournament", "=", "1", ")", ":", "click", ".", "echo", "(", "int", "(", "napi", ".", "check_new_round", "(", "hours", "=", "hours", ",", "tournament", "=", "tournament", ")", ")", ")" ]
Check if a new round has started within the last `hours`.
[ "Check", "if", "a", "new", "round", "has", "started", "within", "the", "last", "hours", "." ]
python
train
62.666667
pyhys/minimalmodbus
dummy_serial.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/dummy_serial.py#L129-L138
def close(self): """Close a port on dummy_serial.""" if VERBOSE: _print_out('\nDummy_serial: Closing port\n') if not self._isOpen: raise IOError('Dummy_serial: The port is already closed') self._isOpen = False self.port = None
[ "def", "close", "(", "self", ")", ":", "if", "VERBOSE", ":", "_print_out", "(", "'\\nDummy_serial: Closing port\\n'", ")", "if", "not", "self", ".", "_isOpen", ":", "raise", "IOError", "(", "'Dummy_serial: The port is already closed'", ")", "self", ".", "_isOpen", "=", "False", "self", ".", "port", "=", "None" ]
Close a port on dummy_serial.
[ "Close", "a", "port", "on", "dummy_serial", "." ]
python
train
29.5
crackinglandia/pype32
pype32/utils.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L242-L251
def readQword(self): """ Reads a qword value from the L{ReadData} stream object. @rtype: int @return: The qword value read from the L{ReadData} stream. """ qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0] self.offset += 8 return qword
[ "def", "readQword", "(", "self", ")", ":", "qword", "=", "unpack", "(", "self", ".", "endianness", "+", "(", "'Q'", "if", "not", "self", ".", "signed", "else", "'b'", ")", ",", "self", ".", "readAt", "(", "self", ".", "offset", ",", "8", ")", ")", "[", "0", "]", "self", ".", "offset", "+=", "8", "return", "qword" ]
Reads a qword value from the L{ReadData} stream object. @rtype: int @return: The qword value read from the L{ReadData} stream.
[ "Reads", "a", "qword", "value", "from", "the", "L", "{", "ReadData", "}", "stream", "object", "." ]
python
train
35
saltstack/salt
salt/client/ssh/wrapper/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/state.py#L1079-L1198
def single(fun, name, test=None, **kwargs): ''' .. versionadded:: 2015.5.0 Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim ''' st_kwargs = __salt__.kwargs __opts__['grains'] = __grains__ # state.fun -> [state, fun] comps = fun.split('.') if len(comps) < 2: __context__['retcode'] = 1 return 'Invalid function passed' # Create the low chunk, using kwargs as a base kwargs.update({'state': comps[0], 'fun': comps[1], '__id__': name, 'name': name}) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) # Set test mode if salt.utils.args.test_mode(test=test, **kwargs): opts['test'] = True else: opts['test'] = __opts__.get('test', None) # Get the override pillar data __pillar__.update(kwargs.get('pillar', {})) # Create the State environment st_ = salt.client.ssh.state.SSHState(opts, __pillar__) # Verify the low chunk err = st_.verify_data(kwargs) if err: __context__['retcode'] = 1 return err # Must be a list of low-chunks chunks = [kwargs] # Retrieve file refs for the state run, so we can copy relevant files down # to the minion before executing the state file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( kwargs.get('extra_filerefs', ''), opts.get('extra_filerefs', '') ) ) roster = salt.roster.Roster(opts, opts.get('roster', 'flat')) roster_grains = roster.opts['grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __context__['fileclient'], chunks, file_refs, __pillar__, st_kwargs['id_'], roster_grains) # Create a hash so we can verify the tar on the target system trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts['hash_type']) # We use state.pkg to execute the "state package" cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( opts['thin_dir'], test, trans_tar_sum, opts['hash_type']) # Create a salt-ssh Single object to actually do the ssh work single = salt.client.ssh.Single( opts, cmd, fsclient=__context__['fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) # Copy the tar down single.shell.send( trans_tar, '{0}/salt_state.tgz'.format(opts['thin_dir'])) # Run the state.pkg command on the target stdout, stderr, _ = single.cmd_block() # Clean up our tar try: os.remove(trans_tar) except (OSError, IOError): pass # Read in the JSON data and return the data structure try: return salt.utils.json.loads(stdout) except Exception as e: log.error("JSON Render failed for: %s\n%s", stdout, stderr) log.error(six.text_type(e)) # If for some reason the json load fails, return the stdout return stdout
[ "def", "single", "(", "fun", ",", "name", ",", "test", "=", "None", ",", "*", "*", "kwargs", ")", ":", "st_kwargs", "=", "__salt__", ".", "kwargs", "__opts__", "[", "'grains'", "]", "=", "__grains__", "# state.fun -> [state, fun]", "comps", "=", "fun", ".", "split", "(", "'.'", ")", "if", "len", "(", "comps", ")", "<", "2", ":", "__context__", "[", "'retcode'", "]", "=", "1", "return", "'Invalid function passed'", "# Create the low chunk, using kwargs as a base", "kwargs", ".", "update", "(", "{", "'state'", ":", "comps", "[", "0", "]", ",", "'fun'", ":", "comps", "[", "1", "]", ",", "'__id__'", ":", "name", ",", "'name'", ":", "name", "}", ")", "opts", "=", "salt", ".", "utils", ".", "state", ".", "get_sls_opts", "(", "__opts__", ",", "*", "*", "kwargs", ")", "# Set test mode", "if", "salt", ".", "utils", ".", "args", ".", "test_mode", "(", "test", "=", "test", ",", "*", "*", "kwargs", ")", ":", "opts", "[", "'test'", "]", "=", "True", "else", ":", "opts", "[", "'test'", "]", "=", "__opts__", ".", "get", "(", "'test'", ",", "None", ")", "# Get the override pillar data", "__pillar__", ".", "update", "(", "kwargs", ".", "get", "(", "'pillar'", ",", "{", "}", ")", ")", "# Create the State environment", "st_", "=", "salt", ".", "client", ".", "ssh", ".", "state", ".", "SSHState", "(", "opts", ",", "__pillar__", ")", "# Verify the low chunk", "err", "=", "st_", ".", "verify_data", "(", "kwargs", ")", "if", "err", ":", "__context__", "[", "'retcode'", "]", "=", "1", "return", "err", "# Must be a list of low-chunks", "chunks", "=", "[", "kwargs", "]", "# Retrieve file refs for the state run, so we can copy relevant files down", "# to the minion before executing the state", "file_refs", "=", "salt", ".", "client", ".", "ssh", ".", "state", ".", "lowstate_file_refs", "(", "chunks", ",", "_merge_extra_filerefs", "(", "kwargs", ".", "get", "(", "'extra_filerefs'", ",", "''", ")", ",", "opts", ".", "get", "(", "'extra_filerefs'", ",", "''", ")", ")", ")", "roster", "=", "salt", ".", "roster", ".", "Roster", "(", "opts", ",", "opts", ".", "get", "(", "'roster'", ",", "'flat'", ")", ")", "roster_grains", "=", "roster", ".", "opts", "[", "'grains'", "]", "# Create the tar containing the state pkg and relevant files.", "trans_tar", "=", "salt", ".", "client", ".", "ssh", ".", "state", ".", "prep_trans_tar", "(", "__context__", "[", "'fileclient'", "]", ",", "chunks", ",", "file_refs", ",", "__pillar__", ",", "st_kwargs", "[", "'id_'", "]", ",", "roster_grains", ")", "# Create a hash so we can verify the tar on the target system", "trans_tar_sum", "=", "salt", ".", "utils", ".", "hashutils", ".", "get_hash", "(", "trans_tar", ",", "opts", "[", "'hash_type'", "]", ")", "# We use state.pkg to execute the \"state package\"", "cmd", "=", "'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'", ".", "format", "(", "opts", "[", "'thin_dir'", "]", ",", "test", ",", "trans_tar_sum", ",", "opts", "[", "'hash_type'", "]", ")", "# Create a salt-ssh Single object to actually do the ssh work", "single", "=", "salt", ".", "client", ".", "ssh", ".", "Single", "(", "opts", ",", "cmd", ",", "fsclient", "=", "__context__", "[", "'fileclient'", "]", ",", "minion_opts", "=", "__salt__", ".", "minion_opts", ",", "*", "*", "st_kwargs", ")", "# Copy the tar down", "single", ".", "shell", ".", "send", "(", "trans_tar", ",", "'{0}/salt_state.tgz'", ".", "format", "(", "opts", "[", "'thin_dir'", "]", ")", ")", "# Run the state.pkg command on the target", "stdout", ",", "stderr", ",", "_", "=", "single", ".", "cmd_block", "(", ")", "# Clean up our tar", "try", ":", "os", ".", "remove", "(", "trans_tar", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "pass", "# Read in the JSON data and return the data structure", "try", ":", "return", "salt", ".", "utils", ".", "json", ".", "loads", "(", "stdout", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"JSON Render failed for: %s\\n%s\"", ",", "stdout", ",", "stderr", ")", "log", ".", "error", "(", "six", ".", "text_type", "(", "e", ")", ")", "# If for some reason the json load fails, return the stdout", "return", "stdout" ]
.. versionadded:: 2015.5.0 Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim
[ "..", "versionadded", "::", "2015", ".", "5", ".", "0" ]
python
train
29.341667
CodyKochmann/graphdb
graphdb/SQLiteGraphDB.py
https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L212-L222
def delete_item(self, item): ''' removes an item from the db ''' for relation in self.relations_of(item): self.delete_relation(item, relation) for origin, relation in self.relations_to(item, True): self.delete_relation(origin, relation, item) with self._write_lock: self._execute(''' DELETE from objects where code=? ''', (self.serialize(item),)) self.autocommit()
[ "def", "delete_item", "(", "self", ",", "item", ")", ":", "for", "relation", "in", "self", ".", "relations_of", "(", "item", ")", ":", "self", ".", "delete_relation", "(", "item", ",", "relation", ")", "for", "origin", ",", "relation", "in", "self", ".", "relations_to", "(", "item", ",", "True", ")", ":", "self", ".", "delete_relation", "(", "origin", ",", "relation", ",", "item", ")", "with", "self", ".", "_write_lock", ":", "self", ".", "_execute", "(", "'''\n DELETE from objects where code=?\n '''", ",", "(", "self", ".", "serialize", "(", "item", ")", ",", ")", ")", "self", ".", "autocommit", "(", ")" ]
removes an item from the db
[ "removes", "an", "item", "from", "the", "db" ]
python
train
42
CenturyLinkCloud/clc-python-sdk
src/clc/APIv2/anti_affinity.py
https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv2/anti_affinity.py#L28-L49
def GetAll(alias=None,location=None,session=None): """Gets a list of anti-affinity policies within a given account. https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies >>> clc.v2.AntiAffinity.GetAll() [<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>] """ if not alias: alias = clc.v2.Account.GetAlias(session=session) policies = [] policy_resp = clc.v2.API.Call('GET','antiAffinityPolicies/%s' % alias,{},session=session) for k in policy_resp: r_val = policy_resp[k] for r in r_val: if r.get('location'): if location and r['location'].lower()!=location.lower(): continue servers = [obj['id'] for obj in r['links'] if obj['rel'] == "server"] policies.append(AntiAffinity(id=r['id'],name=r['name'],location=r['location'],servers=servers,session=session)) return(policies)
[ "def", "GetAll", "(", "alias", "=", "None", ",", "location", "=", "None", ",", "session", "=", "None", ")", ":", "if", "not", "alias", ":", "alias", "=", "clc", ".", "v2", ".", "Account", ".", "GetAlias", "(", "session", "=", "session", ")", "policies", "=", "[", "]", "policy_resp", "=", "clc", ".", "v2", ".", "API", ".", "Call", "(", "'GET'", ",", "'antiAffinityPolicies/%s'", "%", "alias", ",", "{", "}", ",", "session", "=", "session", ")", "for", "k", "in", "policy_resp", ":", "r_val", "=", "policy_resp", "[", "k", "]", "for", "r", "in", "r_val", ":", "if", "r", ".", "get", "(", "'location'", ")", ":", "if", "location", "and", "r", "[", "'location'", "]", ".", "lower", "(", ")", "!=", "location", ".", "lower", "(", ")", ":", "continue", "servers", "=", "[", "obj", "[", "'id'", "]", "for", "obj", "in", "r", "[", "'links'", "]", "if", "obj", "[", "'rel'", "]", "==", "\"server\"", "]", "policies", ".", "append", "(", "AntiAffinity", "(", "id", "=", "r", "[", "'id'", "]", ",", "name", "=", "r", "[", "'name'", "]", ",", "location", "=", "r", "[", "'location'", "]", ",", "servers", "=", "servers", ",", "session", "=", "session", ")", ")", "return", "(", "policies", ")" ]
Gets a list of anti-affinity policies within a given account. https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies >>> clc.v2.AntiAffinity.GetAll() [<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>]
[ "Gets", "a", "list", "of", "anti", "-", "affinity", "policies", "within", "a", "given", "account", "." ]
python
train
40.5
pvlib/pvlib-python
pvlib/irradiance.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/irradiance.py#L1007-L1197
def perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, airmass, model='allsitescomposite1990', return_components=False): ''' Determine diffuse irradiance from the sky on a tilted surface using one of the Perez models. Perez models determine the diffuse irradiance from the sky (ground reflected irradiance is not included in this algorithm) on a tilted surface using the surface tilt angle, surface azimuth angle, diffuse horizontal irradiance, direct normal irradiance, extraterrestrial irradiance, sun zenith angle, sun azimuth angle, and relative (not pressure-corrected) airmass. Optionally a selector may be used to use any of Perez's model coefficient sets. Parameters ---------- surface_tilt : numeric Surface tilt angles in decimal degrees. surface_tilt must be >=0 and <=180. The tilt angle is defined as degrees from horizontal (e.g. surface facing up = 0, surface facing horizon = 90) surface_azimuth : numeric Surface azimuth angles in decimal degrees. surface_azimuth must be >=0 and <=360. The azimuth convention is defined as degrees east of north (e.g. North = 0, South=180 East = 90, West = 270). dhi : numeric Diffuse horizontal irradiance in W/m^2. DHI must be >=0. dni : numeric Direct normal irradiance in W/m^2. DNI must be >=0. dni_extra : numeric Extraterrestrial normal irradiance in W/m^2. solar_zenith : numeric apparent (refraction-corrected) zenith angles in decimal degrees. solar_zenith must be >=0 and <=180. solar_azimuth : numeric Sun azimuth angles in decimal degrees. solar_azimuth must be >=0 and <=360. The azimuth convention is defined as degrees east of north (e.g. North = 0, East = 90, West = 270). airmass : numeric Relative (not pressure-corrected) airmass values. If AM is a DataFrame it must be of the same size as all other DataFrame inputs. AM must be >=0 (careful using the 1/sec(z) model of AM generation) model : string (optional, default='allsitescomposite1990') A string which selects the desired set of Perez coefficients. If model is not provided as an input, the default, '1990' will be used. All possible model selections are: * '1990' * 'allsitescomposite1990' (same as '1990') * 'allsitescomposite1988' * 'sandiacomposite1988' * 'usacomposite1988' * 'france1988' * 'phoenix1988' * 'elmonte1988' * 'osage1988' * 'albuquerque1988' * 'capecanaveral1988' * 'albany1988' return_components: bool (optional, default=False) Flag used to decide whether to return the calculated diffuse components or not. Returns -------- numeric, OrderedDict, or DataFrame Return type controlled by `return_components` argument. If ``return_components=False``, `sky_diffuse` is returned. If ``return_components=True``, `diffuse_components` is returned. sky_diffuse : numeric The sky diffuse component of the solar radiation on a tilted surface. diffuse_components : OrderedDict (array input) or DataFrame (Series input) Keys/columns are: * sky_diffuse: Total sky diffuse * isotropic * circumsolar * horizon References ---------- [1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute solar irradiance on inclined surfaces for building energy simulation" 2007, Solar Energy vol. 81. pp. 254-267 [2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D., 1987. A new simplified version of the Perez diffuse irradiance model for tilted surfaces. Solar Energy 39(3), 221-232. [3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R., 1990. Modeling daylight availability and irradiance components from direct and global irradiance. Solar Energy 44 (5), 271-289. [4] Perez, R. et. al 1988. "The Development and Verification of the Perez Diffuse Radiation Model". SAND88-7030 ''' kappa = 1.041 # for solar_zenith in radians z = np.radians(solar_zenith) # convert to radians # delta is the sky's "brightness" delta = dhi * airmass / dni_extra # epsilon is the sky's "clearness" with np.errstate(invalid='ignore'): eps = ((dhi + dni) / dhi + kappa * (z ** 3)) / (1 + kappa * (z ** 3)) # numpy indexing below will not work with a Series if isinstance(eps, pd.Series): eps = eps.values # Perez et al define clearness bins according to the following # rules. 1 = overcast ... 8 = clear (these names really only make # sense for small zenith angles, but...) these values will # eventually be used as indicies for coeffecient look ups ebin = np.digitize(eps, (0., 1.065, 1.23, 1.5, 1.95, 2.8, 4.5, 6.2)) ebin = np.array(ebin) # GH 642 ebin[np.isnan(eps)] = 0 # correct for 0 indexing in coeffecient lookup # later, ebin = -1 will yield nan coefficients ebin -= 1 # The various possible sets of Perez coefficients are contained # in a subfunction to clean up the code. F1c, F2c = _get_perez_coefficients(model) # results in invalid eps (ebin = -1) being mapped to nans nans = np.array([np.nan, np.nan, np.nan]) F1c = np.vstack((F1c, nans)) F2c = np.vstack((F2c, nans)) F1 = (F1c[ebin, 0] + F1c[ebin, 1] * delta + F1c[ebin, 2] * z) F1 = np.maximum(F1, 0) F2 = (F2c[ebin, 0] + F2c[ebin, 1] * delta + F2c[ebin, 2] * z) F2 = np.maximum(F2, 0) A = aoi_projection(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) A = np.maximum(A, 0) B = tools.cosd(solar_zenith) B = np.maximum(B, tools.cosd(85)) # Calculate Diffuse POA from sky dome term1 = 0.5 * (1 - F1) * (1 + tools.cosd(surface_tilt)) term2 = F1 * A / B term3 = F2 * tools.sind(surface_tilt) sky_diffuse = np.maximum(dhi * (term1 + term2 + term3), 0) # we've preserved the input type until now, so don't ruin it! if isinstance(sky_diffuse, pd.Series): sky_diffuse[np.isnan(airmass)] = 0 else: sky_diffuse = np.where(np.isnan(airmass), 0, sky_diffuse) if return_components: diffuse_components = OrderedDict() diffuse_components['sky_diffuse'] = sky_diffuse # Calculate the different components diffuse_components['isotropic'] = dhi * term1 diffuse_components['circumsolar'] = dhi * term2 diffuse_components['horizon'] = dhi * term3 # Set values of components to 0 when sky_diffuse is 0 mask = sky_diffuse == 0 if isinstance(sky_diffuse, pd.Series): diffuse_components = pd.DataFrame(diffuse_components) diffuse_components.loc[mask] = 0 else: diffuse_components = {k: np.where(mask, 0, v) for k, v in diffuse_components.items()} return diffuse_components else: return sky_diffuse
[ "def", "perez", "(", "surface_tilt", ",", "surface_azimuth", ",", "dhi", ",", "dni", ",", "dni_extra", ",", "solar_zenith", ",", "solar_azimuth", ",", "airmass", ",", "model", "=", "'allsitescomposite1990'", ",", "return_components", "=", "False", ")", ":", "kappa", "=", "1.041", "# for solar_zenith in radians", "z", "=", "np", ".", "radians", "(", "solar_zenith", ")", "# convert to radians", "# delta is the sky's \"brightness\"", "delta", "=", "dhi", "*", "airmass", "/", "dni_extra", "# epsilon is the sky's \"clearness\"", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ")", ":", "eps", "=", "(", "(", "dhi", "+", "dni", ")", "/", "dhi", "+", "kappa", "*", "(", "z", "**", "3", ")", ")", "/", "(", "1", "+", "kappa", "*", "(", "z", "**", "3", ")", ")", "# numpy indexing below will not work with a Series", "if", "isinstance", "(", "eps", ",", "pd", ".", "Series", ")", ":", "eps", "=", "eps", ".", "values", "# Perez et al define clearness bins according to the following", "# rules. 1 = overcast ... 8 = clear (these names really only make", "# sense for small zenith angles, but...) these values will", "# eventually be used as indicies for coeffecient look ups", "ebin", "=", "np", ".", "digitize", "(", "eps", ",", "(", "0.", ",", "1.065", ",", "1.23", ",", "1.5", ",", "1.95", ",", "2.8", ",", "4.5", ",", "6.2", ")", ")", "ebin", "=", "np", ".", "array", "(", "ebin", ")", "# GH 642", "ebin", "[", "np", ".", "isnan", "(", "eps", ")", "]", "=", "0", "# correct for 0 indexing in coeffecient lookup", "# later, ebin = -1 will yield nan coefficients", "ebin", "-=", "1", "# The various possible sets of Perez coefficients are contained", "# in a subfunction to clean up the code.", "F1c", ",", "F2c", "=", "_get_perez_coefficients", "(", "model", ")", "# results in invalid eps (ebin = -1) being mapped to nans", "nans", "=", "np", ".", "array", "(", "[", "np", ".", "nan", ",", "np", ".", "nan", ",", "np", ".", "nan", "]", ")", "F1c", "=", "np", ".", "vstack", "(", "(", "F1c", ",", "nans", ")", ")", "F2c", "=", "np", ".", "vstack", "(", "(", "F2c", ",", "nans", ")", ")", "F1", "=", "(", "F1c", "[", "ebin", ",", "0", "]", "+", "F1c", "[", "ebin", ",", "1", "]", "*", "delta", "+", "F1c", "[", "ebin", ",", "2", "]", "*", "z", ")", "F1", "=", "np", ".", "maximum", "(", "F1", ",", "0", ")", "F2", "=", "(", "F2c", "[", "ebin", ",", "0", "]", "+", "F2c", "[", "ebin", ",", "1", "]", "*", "delta", "+", "F2c", "[", "ebin", ",", "2", "]", "*", "z", ")", "F2", "=", "np", ".", "maximum", "(", "F2", ",", "0", ")", "A", "=", "aoi_projection", "(", "surface_tilt", ",", "surface_azimuth", ",", "solar_zenith", ",", "solar_azimuth", ")", "A", "=", "np", ".", "maximum", "(", "A", ",", "0", ")", "B", "=", "tools", ".", "cosd", "(", "solar_zenith", ")", "B", "=", "np", ".", "maximum", "(", "B", ",", "tools", ".", "cosd", "(", "85", ")", ")", "# Calculate Diffuse POA from sky dome", "term1", "=", "0.5", "*", "(", "1", "-", "F1", ")", "*", "(", "1", "+", "tools", ".", "cosd", "(", "surface_tilt", ")", ")", "term2", "=", "F1", "*", "A", "/", "B", "term3", "=", "F2", "*", "tools", ".", "sind", "(", "surface_tilt", ")", "sky_diffuse", "=", "np", ".", "maximum", "(", "dhi", "*", "(", "term1", "+", "term2", "+", "term3", ")", ",", "0", ")", "# we've preserved the input type until now, so don't ruin it!", "if", "isinstance", "(", "sky_diffuse", ",", "pd", ".", "Series", ")", ":", "sky_diffuse", "[", "np", ".", "isnan", "(", "airmass", ")", "]", "=", "0", "else", ":", "sky_diffuse", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "airmass", ")", ",", "0", ",", "sky_diffuse", ")", "if", "return_components", ":", "diffuse_components", "=", "OrderedDict", "(", ")", "diffuse_components", "[", "'sky_diffuse'", "]", "=", "sky_diffuse", "# Calculate the different components", "diffuse_components", "[", "'isotropic'", "]", "=", "dhi", "*", "term1", "diffuse_components", "[", "'circumsolar'", "]", "=", "dhi", "*", "term2", "diffuse_components", "[", "'horizon'", "]", "=", "dhi", "*", "term3", "# Set values of components to 0 when sky_diffuse is 0", "mask", "=", "sky_diffuse", "==", "0", "if", "isinstance", "(", "sky_diffuse", ",", "pd", ".", "Series", ")", ":", "diffuse_components", "=", "pd", ".", "DataFrame", "(", "diffuse_components", ")", "diffuse_components", ".", "loc", "[", "mask", "]", "=", "0", "else", ":", "diffuse_components", "=", "{", "k", ":", "np", ".", "where", "(", "mask", ",", "0", ",", "v", ")", "for", "k", ",", "v", "in", "diffuse_components", ".", "items", "(", ")", "}", "return", "diffuse_components", "else", ":", "return", "sky_diffuse" ]
Determine diffuse irradiance from the sky on a tilted surface using one of the Perez models. Perez models determine the diffuse irradiance from the sky (ground reflected irradiance is not included in this algorithm) on a tilted surface using the surface tilt angle, surface azimuth angle, diffuse horizontal irradiance, direct normal irradiance, extraterrestrial irradiance, sun zenith angle, sun azimuth angle, and relative (not pressure-corrected) airmass. Optionally a selector may be used to use any of Perez's model coefficient sets. Parameters ---------- surface_tilt : numeric Surface tilt angles in decimal degrees. surface_tilt must be >=0 and <=180. The tilt angle is defined as degrees from horizontal (e.g. surface facing up = 0, surface facing horizon = 90) surface_azimuth : numeric Surface azimuth angles in decimal degrees. surface_azimuth must be >=0 and <=360. The azimuth convention is defined as degrees east of north (e.g. North = 0, South=180 East = 90, West = 270). dhi : numeric Diffuse horizontal irradiance in W/m^2. DHI must be >=0. dni : numeric Direct normal irradiance in W/m^2. DNI must be >=0. dni_extra : numeric Extraterrestrial normal irradiance in W/m^2. solar_zenith : numeric apparent (refraction-corrected) zenith angles in decimal degrees. solar_zenith must be >=0 and <=180. solar_azimuth : numeric Sun azimuth angles in decimal degrees. solar_azimuth must be >=0 and <=360. The azimuth convention is defined as degrees east of north (e.g. North = 0, East = 90, West = 270). airmass : numeric Relative (not pressure-corrected) airmass values. If AM is a DataFrame it must be of the same size as all other DataFrame inputs. AM must be >=0 (careful using the 1/sec(z) model of AM generation) model : string (optional, default='allsitescomposite1990') A string which selects the desired set of Perez coefficients. If model is not provided as an input, the default, '1990' will be used. All possible model selections are: * '1990' * 'allsitescomposite1990' (same as '1990') * 'allsitescomposite1988' * 'sandiacomposite1988' * 'usacomposite1988' * 'france1988' * 'phoenix1988' * 'elmonte1988' * 'osage1988' * 'albuquerque1988' * 'capecanaveral1988' * 'albany1988' return_components: bool (optional, default=False) Flag used to decide whether to return the calculated diffuse components or not. Returns -------- numeric, OrderedDict, or DataFrame Return type controlled by `return_components` argument. If ``return_components=False``, `sky_diffuse` is returned. If ``return_components=True``, `diffuse_components` is returned. sky_diffuse : numeric The sky diffuse component of the solar radiation on a tilted surface. diffuse_components : OrderedDict (array input) or DataFrame (Series input) Keys/columns are: * sky_diffuse: Total sky diffuse * isotropic * circumsolar * horizon References ---------- [1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute solar irradiance on inclined surfaces for building energy simulation" 2007, Solar Energy vol. 81. pp. 254-267 [2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D., 1987. A new simplified version of the Perez diffuse irradiance model for tilted surfaces. Solar Energy 39(3), 221-232. [3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R., 1990. Modeling daylight availability and irradiance components from direct and global irradiance. Solar Energy 44 (5), 271-289. [4] Perez, R. et. al 1988. "The Development and Verification of the Perez Diffuse Radiation Model". SAND88-7030
[ "Determine", "diffuse", "irradiance", "from", "the", "sky", "on", "a", "tilted", "surface", "using", "one", "of", "the", "Perez", "models", "." ]
python
train
36.832461
openego/ding0
ding0/core/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L106-L268
def run_ding0(self, session, mv_grid_districts_no=None, debug=False, export_figures=False): """ Let DING0 run by shouting at this method (or just call it from NetworkDing0 instance). This method is a wrapper for the main functionality of DING0. Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_grid_districts_no : List of Integers List of MV grid_districts/stations to be imported (if empty, all grid_districts & stations are imported) debug : bool, defaults to False If True, information is printed during process export_figures : bool, defaults to False If True, figures are shown or exported (default path: ~/.ding0/) during run. Returns ------- msg : str Message of invalidity of a grid district Notes ----- The steps performed in this method are to be kept in the given order since there are hard dependencies between them. Short description of all steps performed: * STEP 1: Import MV Grid Districts and subjacent objects Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts and MV-LV stations, instantiates and initiates objects. * STEP 2: Import generators Conventional and renewable generators of voltage levels 4..7 are imported and added to corresponding grid. * STEP 3: Parametrize grid Parameters of MV grid are set such as voltage level and cable/line types according to MV Grid District's characteristics. * STEP 4: Validate MV Grid Districts Tests MV grid districts for validity concerning imported data such as count of Load Areas. * STEP 5: Build LV grids Builds LV grids for every non-aggregated LA in every MV Grid District using model grids. * STEP 6: Build MV grids Builds MV grid by performing a routing on Load Area centres to build ring topology. * STEP 7: Connect MV and LV generators Generators are connected to grids, used approach depends on voltage level. * STEP 8: Set IDs for all branches in MV and LV grids While IDs of imported objects can be derived from dataset's ID, branches are created in steps 5+6 and need unique IDs (e.g. for PF calculation). * STEP 9: Relocate switch disconnectors in MV grid Switch disconnectors are set during routing process (step 6) according to the load distribution within a ring. After further modifications of the grid within step 6+7 they have to be relocated (note: switch disconnectors are called circuit breakers in DING0 for historical reasons). * STEP 10: Open all switch disconnectors in MV grid Under normal conditions, rings are operated in open state (half-rings). Furthermore, this is required to allow powerflow for MV grid. * STEP 11: Do power flow analysis of MV grid The technically working MV grid created in step 6 was extended by satellite loads and generators. It is finally tested again using powerflow calculation. * STEP 12: Reinforce MV grid MV grid is eventually reinforced persuant to results from step 11. STEP 13: Close all switch disconnectors in MV grid The rings are finally closed to hold a complete graph (if the SDs are open, the edges adjacent to a SD will not be exported!) """ if debug: start = time.time() # STEP 1: Import MV Grid Districts and subjacent objects self.import_mv_grid_districts(session, mv_grid_districts_no=mv_grid_districts_no) # STEP 2: Import generators self.import_generators(session, debug=debug) # STEP 3: Parametrize MV grid self.mv_parametrize_grid(debug=debug) # STEP 4: Validate MV Grid Districts msg = self.validate_grid_districts() # STEP 5: Build LV grids self.build_lv_grids() # STEP 6: Build MV grids self.mv_routing(debug=False) if export_figures: grid = self._mv_grid_districts[0].mv_grid plot_mv_topology(grid, subtitle='Routing completed', filename='1_routing_completed.png') # STEP 7: Connect MV and LV generators self.connect_generators(debug=False) if export_figures: plot_mv_topology(grid, subtitle='Generators connected', filename='2_generators_connected.png') # STEP 8: Set IDs for all branches in MV and LV grids self.set_branch_ids() # STEP 9: Relocate switch disconnectors in MV grid self.set_circuit_breakers(debug=debug) if export_figures: plot_mv_topology(grid, subtitle='Circuit breakers relocated', filename='3_circuit_breakers_relocated.png') # STEP 10: Open all switch disconnectors in MV grid self.control_circuit_breakers(mode='open') # STEP 11: Do power flow analysis of MV grid self.run_powerflow(session, method='onthefly', export_pypsa=False, debug=debug) if export_figures: plot_mv_topology(grid, subtitle='PF result (load case)', filename='4_PF_result_load.png', line_color='loading', node_color='voltage', testcase='load') plot_mv_topology(grid, subtitle='PF result (feedin case)', filename='5_PF_result_feedin.png', line_color='loading', node_color='voltage', testcase='feedin') # STEP 12: Reinforce MV grid self.reinforce_grid() # STEP 13: Close all switch disconnectors in MV grid self.control_circuit_breakers(mode='close') if export_figures: plot_mv_topology(grid, subtitle='Final grid PF result (load case)', filename='6_final_grid_PF_result_load.png', line_color='loading', node_color='voltage', testcase='load') plot_mv_topology(grid, subtitle='Final grid PF result (feedin case)', filename='7_final_grid_PF_result_feedin.png', line_color='loading', node_color='voltage', testcase='feedin') if debug: logger.info('Elapsed time for {0} MV Grid Districts (seconds): {1}'.format( str(len(mv_grid_districts_no)), time.time() - start)) return msg
[ "def", "run_ding0", "(", "self", ",", "session", ",", "mv_grid_districts_no", "=", "None", ",", "debug", "=", "False", ",", "export_figures", "=", "False", ")", ":", "if", "debug", ":", "start", "=", "time", ".", "time", "(", ")", "# STEP 1: Import MV Grid Districts and subjacent objects", "self", ".", "import_mv_grid_districts", "(", "session", ",", "mv_grid_districts_no", "=", "mv_grid_districts_no", ")", "# STEP 2: Import generators", "self", ".", "import_generators", "(", "session", ",", "debug", "=", "debug", ")", "# STEP 3: Parametrize MV grid", "self", ".", "mv_parametrize_grid", "(", "debug", "=", "debug", ")", "# STEP 4: Validate MV Grid Districts", "msg", "=", "self", ".", "validate_grid_districts", "(", ")", "# STEP 5: Build LV grids", "self", ".", "build_lv_grids", "(", ")", "# STEP 6: Build MV grids", "self", ".", "mv_routing", "(", "debug", "=", "False", ")", "if", "export_figures", ":", "grid", "=", "self", ".", "_mv_grid_districts", "[", "0", "]", ".", "mv_grid", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Routing completed'", ",", "filename", "=", "'1_routing_completed.png'", ")", "# STEP 7: Connect MV and LV generators", "self", ".", "connect_generators", "(", "debug", "=", "False", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Generators connected'", ",", "filename", "=", "'2_generators_connected.png'", ")", "# STEP 8: Set IDs for all branches in MV and LV grids", "self", ".", "set_branch_ids", "(", ")", "# STEP 9: Relocate switch disconnectors in MV grid", "self", ".", "set_circuit_breakers", "(", "debug", "=", "debug", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Circuit breakers relocated'", ",", "filename", "=", "'3_circuit_breakers_relocated.png'", ")", "# STEP 10: Open all switch disconnectors in MV grid", "self", ".", "control_circuit_breakers", "(", "mode", "=", "'open'", ")", "# STEP 11: Do power flow analysis of MV grid", "self", ".", "run_powerflow", "(", "session", ",", "method", "=", "'onthefly'", ",", "export_pypsa", "=", "False", ",", "debug", "=", "debug", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'PF result (load case)'", ",", "filename", "=", "'4_PF_result_load.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'load'", ")", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'PF result (feedin case)'", ",", "filename", "=", "'5_PF_result_feedin.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'feedin'", ")", "# STEP 12: Reinforce MV grid", "self", ".", "reinforce_grid", "(", ")", "# STEP 13: Close all switch disconnectors in MV grid", "self", ".", "control_circuit_breakers", "(", "mode", "=", "'close'", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Final grid PF result (load case)'", ",", "filename", "=", "'6_final_grid_PF_result_load.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'load'", ")", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Final grid PF result (feedin case)'", ",", "filename", "=", "'7_final_grid_PF_result_feedin.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'feedin'", ")", "if", "debug", ":", "logger", ".", "info", "(", "'Elapsed time for {0} MV Grid Districts (seconds): {1}'", ".", "format", "(", "str", "(", "len", "(", "mv_grid_districts_no", ")", ")", ",", "time", ".", "time", "(", ")", "-", "start", ")", ")", "return", "msg" ]
Let DING0 run by shouting at this method (or just call it from NetworkDing0 instance). This method is a wrapper for the main functionality of DING0. Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_grid_districts_no : List of Integers List of MV grid_districts/stations to be imported (if empty, all grid_districts & stations are imported) debug : bool, defaults to False If True, information is printed during process export_figures : bool, defaults to False If True, figures are shown or exported (default path: ~/.ding0/) during run. Returns ------- msg : str Message of invalidity of a grid district Notes ----- The steps performed in this method are to be kept in the given order since there are hard dependencies between them. Short description of all steps performed: * STEP 1: Import MV Grid Districts and subjacent objects Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts and MV-LV stations, instantiates and initiates objects. * STEP 2: Import generators Conventional and renewable generators of voltage levels 4..7 are imported and added to corresponding grid. * STEP 3: Parametrize grid Parameters of MV grid are set such as voltage level and cable/line types according to MV Grid District's characteristics. * STEP 4: Validate MV Grid Districts Tests MV grid districts for validity concerning imported data such as count of Load Areas. * STEP 5: Build LV grids Builds LV grids for every non-aggregated LA in every MV Grid District using model grids. * STEP 6: Build MV grids Builds MV grid by performing a routing on Load Area centres to build ring topology. * STEP 7: Connect MV and LV generators Generators are connected to grids, used approach depends on voltage level. * STEP 8: Set IDs for all branches in MV and LV grids While IDs of imported objects can be derived from dataset's ID, branches are created in steps 5+6 and need unique IDs (e.g. for PF calculation). * STEP 9: Relocate switch disconnectors in MV grid Switch disconnectors are set during routing process (step 6) according to the load distribution within a ring. After further modifications of the grid within step 6+7 they have to be relocated (note: switch disconnectors are called circuit breakers in DING0 for historical reasons). * STEP 10: Open all switch disconnectors in MV grid Under normal conditions, rings are operated in open state (half-rings). Furthermore, this is required to allow powerflow for MV grid. * STEP 11: Do power flow analysis of MV grid The technically working MV grid created in step 6 was extended by satellite loads and generators. It is finally tested again using powerflow calculation. * STEP 12: Reinforce MV grid MV grid is eventually reinforced persuant to results from step 11. STEP 13: Close all switch disconnectors in MV grid The rings are finally closed to hold a complete graph (if the SDs are open, the edges adjacent to a SD will not be exported!)
[ "Let", "DING0", "run", "by", "shouting", "at", "this", "method", "(", "or", "just", "call", "it", "from", "NetworkDing0", "instance", ")", ".", "This", "method", "is", "a", "wrapper", "for", "the", "main", "functionality", "of", "DING0", "." ]
python
train
41.644172
Knoema/knoema-python-driver
knoema/data_reader.py
https://github.com/Knoema/knoema-python-driver/blob/e98b13db3e4df51c208c272e2977bfbe4c6e5532/knoema/data_reader.py#L465-L467
def get_pandas_series(self): """The function creates pandas series based on index and values""" return pandas.Series(self.values, self.index, name=self.name)
[ "def", "get_pandas_series", "(", "self", ")", ":", "return", "pandas", ".", "Series", "(", "self", ".", "values", ",", "self", ".", "index", ",", "name", "=", "self", ".", "name", ")" ]
The function creates pandas series based on index and values
[ "The", "function", "creates", "pandas", "series", "based", "on", "index", "and", "values" ]
python
train
57.666667
gamechanger/confluent_schema_registry_client
confluent_schema_registry_client/__init__.py
https://github.com/gamechanger/confluent_schema_registry_client/blob/ac9196e366724eeb2f19f1a169fd2f9a0c8d68ae/confluent_schema_registry_client/__init__.py#L60-L66
def get_subjects(self): """ Returns the list of subject names present in the schema registry. """ res = requests.get(self._url('/subjects')) raise_if_failed(res) return res.json()
[ "def", "get_subjects", "(", "self", ")", ":", "res", "=", "requests", ".", "get", "(", "self", ".", "_url", "(", "'/subjects'", ")", ")", "raise_if_failed", "(", "res", ")", "return", "res", ".", "json", "(", ")" ]
Returns the list of subject names present in the schema registry.
[ "Returns", "the", "list", "of", "subject", "names", "present", "in", "the", "schema", "registry", "." ]
python
train
31.571429
optimizely/python-sdk
optimizely/optimizely.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L564-L581
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None): """ Returns value for a certain string variable attached to a feature. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Returns: String value of the variable. None if: - Feature key is invalid. - Variable key is invalid. - Mismatch with type of variable. """ variable_type = entities.Variable.Type.STRING return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
[ "def", "get_feature_variable_string", "(", "self", ",", "feature_key", ",", "variable_key", ",", "user_id", ",", "attributes", "=", "None", ")", ":", "variable_type", "=", "entities", ".", "Variable", ".", "Type", ".", "STRING", "return", "self", ".", "_get_feature_variable_for_type", "(", "feature_key", ",", "variable_key", ",", "variable_type", ",", "user_id", ",", "attributes", ")" ]
Returns value for a certain string variable attached to a feature. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Returns: String value of the variable. None if: - Feature key is invalid. - Variable key is invalid. - Mismatch with type of variable.
[ "Returns", "value", "for", "a", "certain", "string", "variable", "attached", "to", "a", "feature", "." ]
python
train
40.277778
vijayvarma392/surfinBH
surfinBH/_fit_evaluators/fit_3dq8.py
https://github.com/vijayvarma392/surfinBH/blob/9f2d25d00f894ee2ce9ffbb02f4e4a41fa7989eb/surfinBH/_fit_evaluators/fit_3dq8.py#L89-L94
def _load_fits(self, h5file): """ Loads fits from h5file and returns a dictionary of fits. """ fits = {} for key in ['mf', 'chifz', 'vfx', 'vfy']: fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file) return fits
[ "def", "_load_fits", "(", "self", ",", "h5file", ")", ":", "fits", "=", "{", "}", "for", "key", "in", "[", "'mf'", ",", "'chifz'", ",", "'vfx'", ",", "'vfy'", "]", ":", "fits", "[", "key", "]", "=", "self", ".", "_load_scalar_fit", "(", "fit_key", "=", "key", ",", "h5file", "=", "h5file", ")", "return", "fits" ]
Loads fits from h5file and returns a dictionary of fits.
[ "Loads", "fits", "from", "h5file", "and", "returns", "a", "dictionary", "of", "fits", "." ]
python
train
43.166667
ewels/MultiQC
multiqc/modules/qualimap/QM_BamQC.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/qualimap/QM_BamQC.py#L158-L193
def parse_insert_size(self, f): """ Parse the contents of the Qualimap BamQC Insert Size Histogram file """ # Get the sample name from the parent parent directory # Typical path: <sample name>/raw_data_qualimapReport/insert_size_histogram.txt s_name = self.get_s_name(f) d = dict() zero_insertsize = 0 for l in f['f']: if l.startswith('#'): continue insertsize, count = l.split(None, 1) insertsize = int(round(float(insertsize))) count = float(count) / 1000000 if(insertsize == 0): zero_insertsize = count else: d[insertsize] = count # Find median without importing anything to do it for us num_counts = sum(d.values()) cum_counts = 0 median_insert_size = None for thisins, thiscount in d.items(): cum_counts += thiscount if cum_counts >= num_counts/2: median_insert_size = thisins break # Add the median insert size to the general stats table self.general_stats_data[s_name]['median_insert_size'] = median_insert_size # Save results if s_name in self.qualimap_bamqc_insert_size_hist: log.debug("Duplicate insert size histogram sample name found! Overwriting: {}".format(s_name)) self.qualimap_bamqc_insert_size_hist[s_name] = d self.add_data_source(f, s_name=s_name, section='insert_size_histogram')
[ "def", "parse_insert_size", "(", "self", ",", "f", ")", ":", "# Get the sample name from the parent parent directory", "# Typical path: <sample name>/raw_data_qualimapReport/insert_size_histogram.txt", "s_name", "=", "self", ".", "get_s_name", "(", "f", ")", "d", "=", "dict", "(", ")", "zero_insertsize", "=", "0", "for", "l", "in", "f", "[", "'f'", "]", ":", "if", "l", ".", "startswith", "(", "'#'", ")", ":", "continue", "insertsize", ",", "count", "=", "l", ".", "split", "(", "None", ",", "1", ")", "insertsize", "=", "int", "(", "round", "(", "float", "(", "insertsize", ")", ")", ")", "count", "=", "float", "(", "count", ")", "/", "1000000", "if", "(", "insertsize", "==", "0", ")", ":", "zero_insertsize", "=", "count", "else", ":", "d", "[", "insertsize", "]", "=", "count", "# Find median without importing anything to do it for us", "num_counts", "=", "sum", "(", "d", ".", "values", "(", ")", ")", "cum_counts", "=", "0", "median_insert_size", "=", "None", "for", "thisins", ",", "thiscount", "in", "d", ".", "items", "(", ")", ":", "cum_counts", "+=", "thiscount", "if", "cum_counts", ">=", "num_counts", "/", "2", ":", "median_insert_size", "=", "thisins", "break", "# Add the median insert size to the general stats table", "self", ".", "general_stats_data", "[", "s_name", "]", "[", "'median_insert_size'", "]", "=", "median_insert_size", "# Save results", "if", "s_name", "in", "self", ".", "qualimap_bamqc_insert_size_hist", ":", "log", ".", "debug", "(", "\"Duplicate insert size histogram sample name found! Overwriting: {}\"", ".", "format", "(", "s_name", ")", ")", "self", ".", "qualimap_bamqc_insert_size_hist", "[", "s_name", "]", "=", "d", "self", ".", "add_data_source", "(", "f", ",", "s_name", "=", "s_name", ",", "section", "=", "'insert_size_histogram'", ")" ]
Parse the contents of the Qualimap BamQC Insert Size Histogram file
[ "Parse", "the", "contents", "of", "the", "Qualimap", "BamQC", "Insert", "Size", "Histogram", "file" ]
python
train
38.111111
sentinel-hub/eo-learn
mask/eolearn/mask/cloud_mask.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/mask/eolearn/mask/cloud_mask.py#L93-L106
def _get_wcs_request(self, bbox, time_interval, size_x, size_y, maxcc, time_difference, custom_url_params): """ Returns WCS request. """ return WcsRequest(layer=self.data_feature, bbox=bbox, time=time_interval, resx=size_x, resy=size_y, maxcc=maxcc, custom_url_params=custom_url_params, time_difference=time_difference, image_format=self.image_format, data_source=self.data_source, instance_id=self.instance_id)
[ "def", "_get_wcs_request", "(", "self", ",", "bbox", ",", "time_interval", ",", "size_x", ",", "size_y", ",", "maxcc", ",", "time_difference", ",", "custom_url_params", ")", ":", "return", "WcsRequest", "(", "layer", "=", "self", ".", "data_feature", ",", "bbox", "=", "bbox", ",", "time", "=", "time_interval", ",", "resx", "=", "size_x", ",", "resy", "=", "size_y", ",", "maxcc", "=", "maxcc", ",", "custom_url_params", "=", "custom_url_params", ",", "time_difference", "=", "time_difference", ",", "image_format", "=", "self", ".", "image_format", ",", "data_source", "=", "self", ".", "data_source", ",", "instance_id", "=", "self", ".", "instance_id", ")" ]
Returns WCS request.
[ "Returns", "WCS", "request", "." ]
python
train
47.428571
davidmogar/cucco
cucco/cucco.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L140-L159
def remove_accent_marks(text, excluded=None): """Remove accent marks from input text. This function removes accent marks in the text, but leaves unicode characters defined in the 'excluded' parameter. Args: text: The text to be processed. excluded: Set of unicode characters to exclude. Returns: The text without accent marks. """ if excluded is None: excluded = set() return unicodedata.normalize( 'NFKC', ''.join( c for c in unicodedata.normalize( 'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))
[ "def", "remove_accent_marks", "(", "text", ",", "excluded", "=", "None", ")", ":", "if", "excluded", "is", "None", ":", "excluded", "=", "set", "(", ")", "return", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "''", ".", "join", "(", "c", "for", "c", "in", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "text", ")", "if", "unicodedata", ".", "category", "(", "c", ")", "!=", "'Mn'", "or", "c", "in", "excluded", ")", ")" ]
Remove accent marks from input text. This function removes accent marks in the text, but leaves unicode characters defined in the 'excluded' parameter. Args: text: The text to be processed. excluded: Set of unicode characters to exclude. Returns: The text without accent marks.
[ "Remove", "accent", "marks", "from", "input", "text", "." ]
python
train
33.1
bcbio/bcbio-nextgen
bcbio/variation/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L671-L680
def _get_validate_plotdata_yaml(grading_file, data): """Retrieve validation plot data from grading YAML file (old style). """ with open(grading_file) as in_handle: grade_stats = yaml.safe_load(in_handle) for sample_stats in grade_stats: sample = sample_stats["sample"] for vtype, cat, val in _flatten_grading(sample_stats): yield [sample, variant.get("variantcaller", ""), vtype, cat, val]
[ "def", "_get_validate_plotdata_yaml", "(", "grading_file", ",", "data", ")", ":", "with", "open", "(", "grading_file", ")", "as", "in_handle", ":", "grade_stats", "=", "yaml", ".", "safe_load", "(", "in_handle", ")", "for", "sample_stats", "in", "grade_stats", ":", "sample", "=", "sample_stats", "[", "\"sample\"", "]", "for", "vtype", ",", "cat", ",", "val", "in", "_flatten_grading", "(", "sample_stats", ")", ":", "yield", "[", "sample", ",", "variant", ".", "get", "(", "\"variantcaller\"", ",", "\"\"", ")", ",", "vtype", ",", "cat", ",", "val", "]" ]
Retrieve validation plot data from grading YAML file (old style).
[ "Retrieve", "validation", "plot", "data", "from", "grading", "YAML", "file", "(", "old", "style", ")", "." ]
python
train
45.1
NiklasRosenstein-Python/nr-deprecated
nr/path.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/path.py#L246-L254
def rmvsuffix(subject): """ Remove the suffix from *subject*. """ index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): subject = subject[:index] return subject
[ "def", "rmvsuffix", "(", "subject", ")", ":", "index", "=", "subject", ".", "rfind", "(", "'.'", ")", "if", "index", ">", "subject", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ".", "rfind", "(", "'/'", ")", ":", "subject", "=", "subject", "[", ":", "index", "]", "return", "subject" ]
Remove the suffix from *subject*.
[ "Remove", "the", "suffix", "from", "*", "subject", "*", "." ]
python
train
21.333333
akfullfo/taskforce
taskforce/watch_files.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/watch_files.py#L242-L252
def _close(self, fd): """ Close the descriptor used for a path regardless of mode. """ if self._mode == WF_INOTIFYX: try: pynotifyx.rm_watch(self._inx_fd, fd) except: pass else: try: os.close(fd) except: pass
[ "def", "_close", "(", "self", ",", "fd", ")", ":", "if", "self", ".", "_mode", "==", "WF_INOTIFYX", ":", "try", ":", "pynotifyx", ".", "rm_watch", "(", "self", ".", "_inx_fd", ",", "fd", ")", "except", ":", "pass", "else", ":", "try", ":", "os", ".", "close", "(", "fd", ")", "except", ":", "pass" ]
Close the descriptor used for a path regardless of mode.
[ "Close", "the", "descriptor", "used", "for", "a", "path", "regardless", "of", "mode", "." ]
python
train
26.363636
h2oai/h2o-3
h2o-py/h2o/model/metrics_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/metrics_base.py#L501-L507
def mean_per_class_error(self, thresholds=None): """ :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used. :returns: mean per class error. """ return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)]
[ "def", "mean_per_class_error", "(", "self", ",", "thresholds", "=", "None", ")", ":", "return", "[", "[", "x", "[", "0", "]", ",", "1", "-", "x", "[", "1", "]", "]", "for", "x", "in", "self", ".", "metric", "(", "\"mean_per_class_accuracy\"", ",", "thresholds", "=", "thresholds", ")", "]" ]
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used. :returns: mean per class error.
[ ":", "param", "thresholds", ":", "thresholds", "parameter", "must", "be", "a", "list", "(", "i", ".", "e", ".", "[", "0", ".", "01", "0", ".", "5", "0", ".", "99", "]", ")", ".", "If", "None", "then", "the", "thresholds", "in", "this", "set", "of", "metrics", "will", "be", "used", ".", ":", "returns", ":", "mean", "per", "class", "error", "." ]
python
test
54
apache/incubator-mxnet
python/mxnet/ndarray/utils.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L149-L182
def load(fname): """Loads an array from file. See more details in ``save``. Parameters ---------- fname : str The filename. Returns ------- list of NDArray, RowSparseNDArray or CSRNDArray, or \ dict of str to NDArray, RowSparseNDArray or CSRNDArray Loaded data. """ if not isinstance(fname, string_types): raise TypeError('fname required to be a string') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoad(c_str(fname), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value))
[ "def", "load", "(", "fname", ")", ":", "if", "not", "isinstance", "(", "fname", ",", "string_types", ")", ":", "raise", "TypeError", "(", "'fname required to be a string'", ")", "out_size", "=", "mx_uint", "(", ")", "out_name_size", "=", "mx_uint", "(", ")", "handles", "=", "ctypes", ".", "POINTER", "(", "NDArrayHandle", ")", "(", ")", "names", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char_p", ")", "(", ")", "check_call", "(", "_LIB", ".", "MXNDArrayLoad", "(", "c_str", "(", "fname", ")", ",", "ctypes", ".", "byref", "(", "out_size", ")", ",", "ctypes", ".", "byref", "(", "handles", ")", ",", "ctypes", ".", "byref", "(", "out_name_size", ")", ",", "ctypes", ".", "byref", "(", "names", ")", ")", ")", "if", "out_name_size", ".", "value", "==", "0", ":", "return", "[", "_ndarray_cls", "(", "NDArrayHandle", "(", "handles", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "out_size", ".", "value", ")", "]", "else", ":", "assert", "out_name_size", ".", "value", "==", "out_size", ".", "value", "return", "dict", "(", "(", "py_str", "(", "names", "[", "i", "]", ")", ",", "_ndarray_cls", "(", "NDArrayHandle", "(", "handles", "[", "i", "]", ")", ")", ")", "for", "i", "in", "range", "(", "out_size", ".", "value", ")", ")" ]
Loads an array from file. See more details in ``save``. Parameters ---------- fname : str The filename. Returns ------- list of NDArray, RowSparseNDArray or CSRNDArray, or \ dict of str to NDArray, RowSparseNDArray or CSRNDArray Loaded data.
[ "Loads", "an", "array", "from", "file", "." ]
python
train
33.588235
saulpw/visidata
visidata/vdtui.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L644-L660
def toplevelTryFunc(func, *args, **kwargs): 'Thread entry-point for `func(*args, **kwargs)` with try/except wrapper' t = threading.current_thread() t.name = func.__name__ ret = None try: ret = func(*args, **kwargs) except EscapeException as e: # user aborted t.status += 'aborted by user' status('%s aborted' % t.name, priority=2) except Exception as e: t.exception = e exceptionCaught(e) if t.sheet: t.sheet.currentThreads.remove(t) return ret
[ "def", "toplevelTryFunc", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "t", "=", "threading", ".", "current_thread", "(", ")", "t", ".", "name", "=", "func", ".", "__name__", "ret", "=", "None", "try", ":", "ret", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "EscapeException", "as", "e", ":", "# user aborted", "t", ".", "status", "+=", "'aborted by user'", "status", "(", "'%s aborted'", "%", "t", ".", "name", ",", "priority", "=", "2", ")", "except", "Exception", "as", "e", ":", "t", ".", "exception", "=", "e", "exceptionCaught", "(", "e", ")", "if", "t", ".", "sheet", ":", "t", ".", "sheet", ".", "currentThreads", ".", "remove", "(", "t", ")", "return", "ret" ]
Thread entry-point for `func(*args, **kwargs)` with try/except wrapper
[ "Thread", "entry", "-", "point", "for", "func", "(", "*", "args", "**", "kwargs", ")", "with", "try", "/", "except", "wrapper" ]
python
train
33.764706
google/grr
appveyor/windows_templates/build_windows_templates.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/appveyor/windows_templates/build_windows_templates.py#L250-L282
def _RepackTemplates(self): """Repack templates with a dummy config.""" dummy_config = os.path.join( args.grr_src, "grr/test/grr_response_test/test_data/dummyconfig.yaml") if args.build_32: template_i386 = glob.glob(os.path.join(args.output_dir, "*_i386*.zip")).pop() template_amd64 = glob.glob(os.path.join(args.output_dir, "*_amd64*.zip")).pop() # We put the installers in the output dir so they get stored as build # artifacts and we can test the 32bit build manually. subprocess.check_call([ self.grr_client_build64, "--verbose", "--secondary_configs", dummy_config, "repack", "--template", template_amd64, "--output_dir", args.output_dir ]) subprocess.check_call([ self.grr_client_build64, "--verbose", "--context", "DebugClientBuild Context", "--secondary_configs", dummy_config, "repack", "--template", template_amd64, "--output_dir", args.output_dir ]) if args.build_32: subprocess.check_call([ self.grr_client_build32, "--verbose", "--secondary_configs", dummy_config, "repack", "--template", template_i386, "--output_dir", args.output_dir ]) subprocess.check_call([ self.grr_client_build32, "--verbose", "--context", "DebugClientBuild Context", "--secondary_configs", dummy_config, "repack", "--template", template_i386, "--output_dir", args.output_dir ])
[ "def", "_RepackTemplates", "(", "self", ")", ":", "dummy_config", "=", "os", ".", "path", ".", "join", "(", "args", ".", "grr_src", ",", "\"grr/test/grr_response_test/test_data/dummyconfig.yaml\"", ")", "if", "args", ".", "build_32", ":", "template_i386", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "args", ".", "output_dir", ",", "\"*_i386*.zip\"", ")", ")", ".", "pop", "(", ")", "template_amd64", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "args", ".", "output_dir", ",", "\"*_amd64*.zip\"", ")", ")", ".", "pop", "(", ")", "# We put the installers in the output dir so they get stored as build", "# artifacts and we can test the 32bit build manually.", "subprocess", ".", "check_call", "(", "[", "self", ".", "grr_client_build64", ",", "\"--verbose\"", ",", "\"--secondary_configs\"", ",", "dummy_config", ",", "\"repack\"", ",", "\"--template\"", ",", "template_amd64", ",", "\"--output_dir\"", ",", "args", ".", "output_dir", "]", ")", "subprocess", ".", "check_call", "(", "[", "self", ".", "grr_client_build64", ",", "\"--verbose\"", ",", "\"--context\"", ",", "\"DebugClientBuild Context\"", ",", "\"--secondary_configs\"", ",", "dummy_config", ",", "\"repack\"", ",", "\"--template\"", ",", "template_amd64", ",", "\"--output_dir\"", ",", "args", ".", "output_dir", "]", ")", "if", "args", ".", "build_32", ":", "subprocess", ".", "check_call", "(", "[", "self", ".", "grr_client_build32", ",", "\"--verbose\"", ",", "\"--secondary_configs\"", ",", "dummy_config", ",", "\"repack\"", ",", "\"--template\"", ",", "template_i386", ",", "\"--output_dir\"", ",", "args", ".", "output_dir", "]", ")", "subprocess", ".", "check_call", "(", "[", "self", ".", "grr_client_build32", ",", "\"--verbose\"", ",", "\"--context\"", ",", "\"DebugClientBuild Context\"", ",", "\"--secondary_configs\"", ",", "dummy_config", ",", "\"repack\"", ",", "\"--template\"", ",", "template_i386", ",", "\"--output_dir\"", ",", "args", ".", "output_dir", "]", ")" ]
Repack templates with a dummy config.
[ "Repack", "templates", "with", "a", "dummy", "config", "." ]
python
train
45.848485
huge-success/sanic
sanic/blueprint_group.py
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/blueprint_group.py#L102-L120
def middleware(self, *args, **kwargs): """ A decorator that can be used to implement a Middleware plugin to all of the Blueprints that belongs to this specific Blueprint Group. In case of nested Blueprint Groups, the same middleware is applied across each of the Blueprints recursively. :param args: Optional positional Parameters to be use middleware :param kwargs: Optional Keyword arg to use with Middleware :return: Partial function to apply the middleware """ kwargs["bp_group"] = True def register_middleware_for_blueprints(fn): for blueprint in self.blueprints: blueprint.middleware(fn, *args, **kwargs) return register_middleware_for_blueprints
[ "def", "middleware", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"bp_group\"", "]", "=", "True", "def", "register_middleware_for_blueprints", "(", "fn", ")", ":", "for", "blueprint", "in", "self", ".", "blueprints", ":", "blueprint", ".", "middleware", "(", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "register_middleware_for_blueprints" ]
A decorator that can be used to implement a Middleware plugin to all of the Blueprints that belongs to this specific Blueprint Group. In case of nested Blueprint Groups, the same middleware is applied across each of the Blueprints recursively. :param args: Optional positional Parameters to be use middleware :param kwargs: Optional Keyword arg to use with Middleware :return: Partial function to apply the middleware
[ "A", "decorator", "that", "can", "be", "used", "to", "implement", "a", "Middleware", "plugin", "to", "all", "of", "the", "Blueprints", "that", "belongs", "to", "this", "specific", "Blueprint", "Group", "." ]
python
train
40.105263
scanny/python-pptx
pptx/chart/xlsx.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xlsx.py#L125-L135
def _populate_worksheet(self, workbook, worksheet): """ Write the chart data contents to *worksheet* in category chart layout. Write categories starting in the first column starting in the second row, and proceeding one column per category level (for charts having multi-level categories). Write series as columns starting in the next following column, placing the series title in the first cell. """ self._write_categories(workbook, worksheet) self._write_series(workbook, worksheet)
[ "def", "_populate_worksheet", "(", "self", ",", "workbook", ",", "worksheet", ")", ":", "self", ".", "_write_categories", "(", "workbook", ",", "worksheet", ")", "self", ".", "_write_series", "(", "workbook", ",", "worksheet", ")" ]
Write the chart data contents to *worksheet* in category chart layout. Write categories starting in the first column starting in the second row, and proceeding one column per category level (for charts having multi-level categories). Write series as columns starting in the next following column, placing the series title in the first cell.
[ "Write", "the", "chart", "data", "contents", "to", "*", "worksheet", "*", "in", "category", "chart", "layout", ".", "Write", "categories", "starting", "in", "the", "first", "column", "starting", "in", "the", "second", "row", "and", "proceeding", "one", "column", "per", "category", "level", "(", "for", "charts", "having", "multi", "-", "level", "categories", ")", ".", "Write", "series", "as", "columns", "starting", "in", "the", "next", "following", "column", "placing", "the", "series", "title", "in", "the", "first", "cell", "." ]
python
train
50.363636
trailofbits/manticore
manticore/core/smtlib/solver.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/smtlib/solver.py#L490-L543
def get_value(self, constraints, expression): """ Ask the solver for one possible result of given expression using given set of constraints. """ if not issymbolic(expression): return expression assert isinstance(expression, (Bool, BitVec, Array)) with constraints as temp_cs: if isinstance(expression, Bool): var = temp_cs.new_bool() elif isinstance(expression, BitVec): var = temp_cs.new_bitvec(expression.size) elif isinstance(expression, Array): var = [] result = [] for i in range(expression.index_max): subvar = temp_cs.new_bitvec(expression.value_bits) var.append(subvar) temp_cs.add(subvar == simplify(expression[i])) self._reset(temp_cs) if not self._is_sat(): raise SolverError('Model is not available') for i in range(expression.index_max): self._send('(get-value (%s))' % var[i].name) ret = self._recv() assert ret.startswith('((') and ret.endswith('))') pattern, base = self._get_value_fmt m = pattern.match(ret) expr, value = m.group('expr'), m.group('value') result.append(int(value, base)) return bytes(result) temp_cs.add(var == expression) self._reset(temp_cs) if not self._is_sat(): raise SolverError('Model is not available') self._send('(get-value (%s))' % var.name) ret = self._recv() if not (ret.startswith('((') and ret.endswith('))')): raise SolverError('SMTLIB error parsing response: %s' % ret) if isinstance(expression, Bool): return {'true': True, 'false': False}[ret[2:-2].split(' ')[1]] if isinstance(expression, BitVec): pattern, base = self._get_value_fmt m = pattern.match(ret) expr, value = m.group('expr'), m.group('value') return int(value, base) raise NotImplementedError("get_value only implemented for Bool and BitVec")
[ "def", "get_value", "(", "self", ",", "constraints", ",", "expression", ")", ":", "if", "not", "issymbolic", "(", "expression", ")", ":", "return", "expression", "assert", "isinstance", "(", "expression", ",", "(", "Bool", ",", "BitVec", ",", "Array", ")", ")", "with", "constraints", "as", "temp_cs", ":", "if", "isinstance", "(", "expression", ",", "Bool", ")", ":", "var", "=", "temp_cs", ".", "new_bool", "(", ")", "elif", "isinstance", "(", "expression", ",", "BitVec", ")", ":", "var", "=", "temp_cs", ".", "new_bitvec", "(", "expression", ".", "size", ")", "elif", "isinstance", "(", "expression", ",", "Array", ")", ":", "var", "=", "[", "]", "result", "=", "[", "]", "for", "i", "in", "range", "(", "expression", ".", "index_max", ")", ":", "subvar", "=", "temp_cs", ".", "new_bitvec", "(", "expression", ".", "value_bits", ")", "var", ".", "append", "(", "subvar", ")", "temp_cs", ".", "add", "(", "subvar", "==", "simplify", "(", "expression", "[", "i", "]", ")", ")", "self", ".", "_reset", "(", "temp_cs", ")", "if", "not", "self", ".", "_is_sat", "(", ")", ":", "raise", "SolverError", "(", "'Model is not available'", ")", "for", "i", "in", "range", "(", "expression", ".", "index_max", ")", ":", "self", ".", "_send", "(", "'(get-value (%s))'", "%", "var", "[", "i", "]", ".", "name", ")", "ret", "=", "self", ".", "_recv", "(", ")", "assert", "ret", ".", "startswith", "(", "'(('", ")", "and", "ret", ".", "endswith", "(", "'))'", ")", "pattern", ",", "base", "=", "self", ".", "_get_value_fmt", "m", "=", "pattern", ".", "match", "(", "ret", ")", "expr", ",", "value", "=", "m", ".", "group", "(", "'expr'", ")", ",", "m", ".", "group", "(", "'value'", ")", "result", ".", "append", "(", "int", "(", "value", ",", "base", ")", ")", "return", "bytes", "(", "result", ")", "temp_cs", ".", "add", "(", "var", "==", "expression", ")", "self", ".", "_reset", "(", "temp_cs", ")", "if", "not", "self", ".", "_is_sat", "(", ")", ":", "raise", "SolverError", "(", "'Model is not available'", ")", "self", ".", "_send", "(", "'(get-value (%s))'", "%", "var", ".", "name", ")", "ret", "=", "self", ".", "_recv", "(", ")", "if", "not", "(", "ret", ".", "startswith", "(", "'(('", ")", "and", "ret", ".", "endswith", "(", "'))'", ")", ")", ":", "raise", "SolverError", "(", "'SMTLIB error parsing response: %s'", "%", "ret", ")", "if", "isinstance", "(", "expression", ",", "Bool", ")", ":", "return", "{", "'true'", ":", "True", ",", "'false'", ":", "False", "}", "[", "ret", "[", "2", ":", "-", "2", "]", ".", "split", "(", "' '", ")", "[", "1", "]", "]", "if", "isinstance", "(", "expression", ",", "BitVec", ")", ":", "pattern", ",", "base", "=", "self", ".", "_get_value_fmt", "m", "=", "pattern", ".", "match", "(", "ret", ")", "expr", ",", "value", "=", "m", ".", "group", "(", "'expr'", ")", ",", "m", ".", "group", "(", "'value'", ")", "return", "int", "(", "value", ",", "base", ")", "raise", "NotImplementedError", "(", "\"get_value only implemented for Bool and BitVec\"", ")" ]
Ask the solver for one possible result of given expression using given set of constraints.
[ "Ask", "the", "solver", "for", "one", "possible", "result", "of", "given", "expression", "using", "given", "set", "of", "constraints", "." ]
python
valid
41.407407
nephics/mat4py
mat4py/loadmat.py
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L146-L167
def read_element_tag(fd, endian): """Read data element tag: type and number of bytes. If tag is of the Small Data Element (SDE) type the element data is also returned. """ data = fd.read(8) mtpn = unpack(endian, 'I', data[:4]) # The most significant two bytes of mtpn will always be 0, # if they are not, this must be SDE format num_bytes = mtpn >> 16 if num_bytes > 0: # small data element format mtpn = mtpn & 0xFFFF if num_bytes > 4: raise ParseError('Error parsing Small Data Element (SDE) ' 'formatted data') data = data[4:4 + num_bytes] else: # regular element num_bytes = unpack(endian, 'I', data[4:]) data = None return (mtpn, num_bytes, data)
[ "def", "read_element_tag", "(", "fd", ",", "endian", ")", ":", "data", "=", "fd", ".", "read", "(", "8", ")", "mtpn", "=", "unpack", "(", "endian", ",", "'I'", ",", "data", "[", ":", "4", "]", ")", "# The most significant two bytes of mtpn will always be 0,", "# if they are not, this must be SDE format", "num_bytes", "=", "mtpn", ">>", "16", "if", "num_bytes", ">", "0", ":", "# small data element format", "mtpn", "=", "mtpn", "&", "0xFFFF", "if", "num_bytes", ">", "4", ":", "raise", "ParseError", "(", "'Error parsing Small Data Element (SDE) '", "'formatted data'", ")", "data", "=", "data", "[", "4", ":", "4", "+", "num_bytes", "]", "else", ":", "# regular element", "num_bytes", "=", "unpack", "(", "endian", ",", "'I'", ",", "data", "[", "4", ":", "]", ")", "data", "=", "None", "return", "(", "mtpn", ",", "num_bytes", ",", "data", ")" ]
Read data element tag: type and number of bytes. If tag is of the Small Data Element (SDE) type the element data is also returned.
[ "Read", "data", "element", "tag", ":", "type", "and", "number", "of", "bytes", ".", "If", "tag", "is", "of", "the", "Small", "Data", "Element", "(", "SDE", ")", "type", "the", "element", "data", "is", "also", "returned", "." ]
python
valid
35.227273
jkocherhans/alto
alto/urlviz.py
https://github.com/jkocherhans/alto/blob/79edc058022636fece7902b946ecac710713dfa4/alto/urlviz.py#L142-L160
def get_decorators(func): """ Return a list of decorator names for this function. """ decorators = [] # Parse the source code of the function with ast to find the names of # all of its decorators. tree = ast.parse(inspect.getsource(func)) for node in ast.iter_child_nodes(tree): for dnode in node.decorator_list: if isinstance(dnode, ast.Name): decorator = func.func_globals[dnode.id] elif isinstance(dnode, ast.Attribute): module = func.func_globals[dnode.value.id] decorator = getattr(module, dnode.attr) else: raise Exception("Unable to handle decorator node: %s" % dnode) decorators.append(decorator) return decorators
[ "def", "get_decorators", "(", "func", ")", ":", "decorators", "=", "[", "]", "# Parse the source code of the function with ast to find the names of", "# all of its decorators.", "tree", "=", "ast", ".", "parse", "(", "inspect", ".", "getsource", "(", "func", ")", ")", "for", "node", "in", "ast", ".", "iter_child_nodes", "(", "tree", ")", ":", "for", "dnode", "in", "node", ".", "decorator_list", ":", "if", "isinstance", "(", "dnode", ",", "ast", ".", "Name", ")", ":", "decorator", "=", "func", ".", "func_globals", "[", "dnode", ".", "id", "]", "elif", "isinstance", "(", "dnode", ",", "ast", ".", "Attribute", ")", ":", "module", "=", "func", ".", "func_globals", "[", "dnode", ".", "value", ".", "id", "]", "decorator", "=", "getattr", "(", "module", ",", "dnode", ".", "attr", ")", "else", ":", "raise", "Exception", "(", "\"Unable to handle decorator node: %s\"", "%", "dnode", ")", "decorators", ".", "append", "(", "decorator", ")", "return", "decorators" ]
Return a list of decorator names for this function.
[ "Return", "a", "list", "of", "decorator", "names", "for", "this", "function", "." ]
python
train
40
vaexio/vaex
packages/vaex-core/vaex/utils.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/utils.py#L588-L595
def write_to(f, mode): """Flexible writing, where f can be a filename or f object, if filename, closed after writing""" if hasattr(f, 'write'): yield f else: f = open(f, mode) yield f f.close()
[ "def", "write_to", "(", "f", ",", "mode", ")", ":", "if", "hasattr", "(", "f", ",", "'write'", ")", ":", "yield", "f", "else", ":", "f", "=", "open", "(", "f", ",", "mode", ")", "yield", "f", "f", ".", "close", "(", ")" ]
Flexible writing, where f can be a filename or f object, if filename, closed after writing
[ "Flexible", "writing", "where", "f", "can", "be", "a", "filename", "or", "f", "object", "if", "filename", "closed", "after", "writing" ]
python
test
28.75
Naresh1318/crystal
crystal/app.py
https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L104-L121
def get_projects(): """ Send a dictionary of projects that are available on the database. Usage description: This function is usually called to get and display the list of projects available in the database. :return: JSON, {<int_keys>: <project_name>} """ assert request.method == "GET", "GET request expected received {}".format(request.method) try: if request.method == 'GET': projects = utils.get_projects() return jsonify(projects) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
[ "def", "get_projects", "(", ")", ":", "assert", "request", ".", "method", "==", "\"GET\"", ",", "\"GET request expected received {}\"", ".", "format", "(", "request", ".", "method", ")", "try", ":", "if", "request", ".", "method", "==", "'GET'", ":", "projects", "=", "utils", ".", "get_projects", "(", ")", "return", "jsonify", "(", "projects", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "return", "jsonify", "(", "{", "\"0\"", ":", "\"__EMPTY\"", "}", ")" ]
Send a dictionary of projects that are available on the database. Usage description: This function is usually called to get and display the list of projects available in the database. :return: JSON, {<int_keys>: <project_name>}
[ "Send", "a", "dictionary", "of", "projects", "that", "are", "available", "on", "the", "database", "." ]
python
train
31.888889
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L473-L479
def errored_tasks(self): """List of errored tasks.""" etasks = [] for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]: etasks.extend(list(self.iflat_tasks(status=status))) return set(etasks)
[ "def", "errored_tasks", "(", "self", ")", ":", "etasks", "=", "[", "]", "for", "status", "in", "[", "self", ".", "S_ERROR", ",", "self", ".", "S_QCRITICAL", ",", "self", ".", "S_ABICRITICAL", "]", ":", "etasks", ".", "extend", "(", "list", "(", "self", ".", "iflat_tasks", "(", "status", "=", "status", ")", ")", ")", "return", "set", "(", "etasks", ")" ]
List of errored tasks.
[ "List", "of", "errored", "tasks", "." ]
python
train
34.857143
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L272-L406
def security_rule_create_or_update(name, access, direction, priority, protocol, security_group, resource_group, source_address_prefix=None, destination_address_prefix=None, source_port_range=None, destination_port_range=None, source_address_prefixes=None, destination_address_prefixes=None, source_port_ranges=None, destination_port_ranges=None, **kwargs): ''' .. versionadded:: 2019.2.0 Create or update a security rule within a specified network security group. :param name: The name of the security rule to create. :param access: 'allow' or 'deny' :param direction: 'inbound' or 'outbound' :param priority: Integer between 100 and 4096 used for ordering rule application. :param protocol: 'tcp', 'udp', or '*' :param destination_address_prefix: The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param source_address_prefix: The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param destination_address_prefixes: A list of destination_address_prefix values. This parameter overrides destination_address_prefix and will cause any value entered there to be ignored. :param destination_port_ranges: A list of destination_port_range values. This parameter overrides destination_port_range and will cause any value entered there to be ignored. :param source_address_prefixes: A list of source_address_prefix values. This parameter overrides source_address_prefix and will cause any value entered there to be ignored. :param source_port_ranges: A list of source_port_range values. This parameter overrides source_port_range and will cause any value entered there to be ignored. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \ source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \ destination_port_range='1-1024' ''' exclusive_params = [ ('source_port_ranges', 'source_port_range'), ('source_address_prefixes', 'source_address_prefix'), ('destination_port_ranges', 'destination_port_range'), ('destination_address_prefixes', 'destination_address_prefix'), ] for params in exclusive_params: # pylint: disable=eval-used if not eval(params[0]) and not eval(params[1]): log.error( 'Either the %s or %s parameter must be provided!', params[0], params[1] ) return False # pylint: disable=eval-used if eval(params[0]): # pylint: disable=exec-used exec('{0} = None'.format(params[1])) netconn = __utils__['azurearm.get_client']('network', **kwargs) try: rulemodel = __utils__['azurearm.create_object_model']( 'network', 'SecurityRule', name=name, access=access, direction=direction, priority=priority, protocol=protocol, source_port_ranges=source_port_ranges, source_port_range=source_port_range, source_address_prefixes=source_address_prefixes, source_address_prefix=source_address_prefix, destination_port_ranges=destination_port_ranges, destination_port_range=destination_port_range, destination_address_prefixes=destination_address_prefixes, destination_address_prefix=destination_address_prefix, **kwargs ) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: secrule = netconn.security_rules.create_or_update( resource_group_name=resource_group, network_security_group_name=security_group, security_rule_name=name, security_rule_parameters=rulemodel ) secrule.wait() secrule_result = secrule.result() result = secrule_result.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
[ "def", "security_rule_create_or_update", "(", "name", ",", "access", ",", "direction", ",", "priority", ",", "protocol", ",", "security_group", ",", "resource_group", ",", "source_address_prefix", "=", "None", ",", "destination_address_prefix", "=", "None", ",", "source_port_range", "=", "None", ",", "destination_port_range", "=", "None", ",", "source_address_prefixes", "=", "None", ",", "destination_address_prefixes", "=", "None", ",", "source_port_ranges", "=", "None", ",", "destination_port_ranges", "=", "None", ",", "*", "*", "kwargs", ")", ":", "exclusive_params", "=", "[", "(", "'source_port_ranges'", ",", "'source_port_range'", ")", ",", "(", "'source_address_prefixes'", ",", "'source_address_prefix'", ")", ",", "(", "'destination_port_ranges'", ",", "'destination_port_range'", ")", ",", "(", "'destination_address_prefixes'", ",", "'destination_address_prefix'", ")", ",", "]", "for", "params", "in", "exclusive_params", ":", "# pylint: disable=eval-used", "if", "not", "eval", "(", "params", "[", "0", "]", ")", "and", "not", "eval", "(", "params", "[", "1", "]", ")", ":", "log", ".", "error", "(", "'Either the %s or %s parameter must be provided!'", ",", "params", "[", "0", "]", ",", "params", "[", "1", "]", ")", "return", "False", "# pylint: disable=eval-used", "if", "eval", "(", "params", "[", "0", "]", ")", ":", "# pylint: disable=exec-used", "exec", "(", "'{0} = None'", ".", "format", "(", "params", "[", "1", "]", ")", ")", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "rulemodel", "=", "__utils__", "[", "'azurearm.create_object_model'", "]", "(", "'network'", ",", "'SecurityRule'", ",", "name", "=", "name", ",", "access", "=", "access", ",", "direction", "=", "direction", ",", "priority", "=", "priority", ",", "protocol", "=", "protocol", ",", "source_port_ranges", "=", "source_port_ranges", ",", "source_port_range", "=", "source_port_range", ",", "source_address_prefixes", "=", "source_address_prefixes", ",", "source_address_prefix", "=", "source_address_prefix", ",", "destination_port_ranges", "=", "destination_port_ranges", ",", "destination_port_range", "=", "destination_port_range", ",", "destination_address_prefixes", "=", "destination_address_prefixes", ",", "destination_address_prefix", "=", "destination_address_prefix", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "exc", ":", "result", "=", "{", "'error'", ":", "'The object model could not be built. ({0})'", ".", "format", "(", "str", "(", "exc", ")", ")", "}", "return", "result", "try", ":", "secrule", "=", "netconn", ".", "security_rules", ".", "create_or_update", "(", "resource_group_name", "=", "resource_group", ",", "network_security_group_name", "=", "security_group", ",", "security_rule_name", "=", "name", ",", "security_rule_parameters", "=", "rulemodel", ")", "secrule", ".", "wait", "(", ")", "secrule_result", "=", "secrule", ".", "result", "(", ")", "result", "=", "secrule_result", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "except", "SerializationError", "as", "exc", ":", "result", "=", "{", "'error'", ":", "'The object model could not be parsed. ({0})'", ".", "format", "(", "str", "(", "exc", ")", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 Create or update a security rule within a specified network security group. :param name: The name of the security rule to create. :param access: 'allow' or 'deny' :param direction: 'inbound' or 'outbound' :param priority: Integer between 100 and 4096 used for ordering rule application. :param protocol: 'tcp', 'udp', or '*' :param destination_address_prefix: The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param source_address_prefix: The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :param destination_address_prefixes: A list of destination_address_prefix values. This parameter overrides destination_address_prefix and will cause any value entered there to be ignored. :param destination_port_ranges: A list of destination_port_range values. This parameter overrides destination_port_range and will cause any value entered there to be ignored. :param source_address_prefixes: A list of source_address_prefix values. This parameter overrides source_address_prefix and will cause any value entered there to be ignored. :param source_port_ranges: A list of source_port_range values. This parameter overrides source_port_range and will cause any value entered there to be ignored. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \ source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \ destination_port_range='1-1024'
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
40.266667
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L1151-L1240
def check_standard_name(self, ds): ''' Check a variables's standard_name attribute to ensure that it meets CF compliance. CF §3.3 A standard name is associated with a variable via the attribute standard_name which takes a string value comprised of a standard name optionally followed by one or more blanks and a standard name modifier :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] coord_vars = self._find_coord_vars(ds) aux_coord_vars = self._find_aux_coord_vars(ds) axis_vars = cfutil.get_axis_variables(ds) flag_vars = cfutil.get_flag_variables(ds) geophysical_vars = self._find_geophysical_vars(ds) variables_requiring_standard_names = coord_vars + aux_coord_vars + axis_vars + flag_vars + geophysical_vars for name in set(variables_requiring_standard_names): # Compression indices used in reduced horizontal grids or # compression schemes do not require attributes other than compress if cfutil.is_compression_coordinate(ds, name): continue ncvar = ds.variables[name] # §9 doesn't explicitly allow instance variables as coordinates but # it's loosely implied. Just in case, skip it. if hasattr(ncvar, 'cf_role'): continue # Unfortunately, §6.1 allows for string types to be listed as # coordinates. if ncvar.dtype.char == 'S': continue standard_name = getattr(ncvar, 'standard_name', None) standard_name, standard_name_modifier = self._split_standard_name(standard_name) long_name = getattr(ncvar, 'long_name', None) long_or_std_name = TestCtx(BaseCheck.HIGH, self.section_titles['3.3']) if long_name is not None: long_name_present = True long_or_std_name.assert_true(isinstance(long_name, basestring), "Attribute long_name for variable {} must be a string".format(name)) else: long_name_present = False # §1.3 The long_name and standard_name attributes are used to # describe the content of each variable. For backwards # compatibility with COARDS neither is required, but use of at # least one of them is strongly recommended. # If standard_name is not defined but long_name is, don't continue # the check for this variable if standard_name is not None: standard_name_present = True valid_std_name = TestCtx(BaseCheck.HIGH, self.section_titles['3.3']) valid_std_name.assert_true(isinstance(standard_name, basestring), "Attribute standard_name for variable {} must be a string".format(name)) if isinstance(standard_name, basestring): valid_std_name.assert_true(standard_name in self._std_names, "standard_name {} is not defined in Standard Name Table v{}".format( standard_name or 'undefined', self._std_names._version)) ret_val.append(valid_std_name.to_result()) # 2) optional - if modifiers, should be in table if standard_name_modifier is not None: valid_modifier = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"]) allowed = ['detection_minimum', 'number_of_observations', 'standard_error', 'status_flag'] valid_modifier.assert_true(standard_name_modifier in allowed, "standard_name modifier {} for variable {} is not a valid modifier " "according to appendix C".format(standard_name_modifier, name)) ret_val.append(valid_modifier.to_result()) else: standard_name_present = False long_or_std_name.assert_true(long_name_present or standard_name_present, "Attribute long_name or/and standard_name is highly recommended for variable {}".format(name)) ret_val.append(long_or_std_name.to_result()) return ret_val
[ "def", "check_standard_name", "(", "self", ",", "ds", ")", ":", "ret_val", "=", "[", "]", "coord_vars", "=", "self", ".", "_find_coord_vars", "(", "ds", ")", "aux_coord_vars", "=", "self", ".", "_find_aux_coord_vars", "(", "ds", ")", "axis_vars", "=", "cfutil", ".", "get_axis_variables", "(", "ds", ")", "flag_vars", "=", "cfutil", ".", "get_flag_variables", "(", "ds", ")", "geophysical_vars", "=", "self", ".", "_find_geophysical_vars", "(", "ds", ")", "variables_requiring_standard_names", "=", "coord_vars", "+", "aux_coord_vars", "+", "axis_vars", "+", "flag_vars", "+", "geophysical_vars", "for", "name", "in", "set", "(", "variables_requiring_standard_names", ")", ":", "# Compression indices used in reduced horizontal grids or", "# compression schemes do not require attributes other than compress", "if", "cfutil", ".", "is_compression_coordinate", "(", "ds", ",", "name", ")", ":", "continue", "ncvar", "=", "ds", ".", "variables", "[", "name", "]", "# §9 doesn't explicitly allow instance variables as coordinates but", "# it's loosely implied. Just in case, skip it.", "if", "hasattr", "(", "ncvar", ",", "'cf_role'", ")", ":", "continue", "# Unfortunately, §6.1 allows for string types to be listed as", "# coordinates.", "if", "ncvar", ".", "dtype", ".", "char", "==", "'S'", ":", "continue", "standard_name", "=", "getattr", "(", "ncvar", ",", "'standard_name'", ",", "None", ")", "standard_name", ",", "standard_name_modifier", "=", "self", ".", "_split_standard_name", "(", "standard_name", ")", "long_name", "=", "getattr", "(", "ncvar", ",", "'long_name'", ",", "None", ")", "long_or_std_name", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "self", ".", "section_titles", "[", "'3.3'", "]", ")", "if", "long_name", "is", "not", "None", ":", "long_name_present", "=", "True", "long_or_std_name", ".", "assert_true", "(", "isinstance", "(", "long_name", ",", "basestring", ")", ",", "\"Attribute long_name for variable {} must be a string\"", ".", "format", "(", "name", ")", ")", "else", ":", "long_name_present", "=", "False", "# §1.3 The long_name and standard_name attributes are used to", "# describe the content of each variable. For backwards", "# compatibility with COARDS neither is required, but use of at", "# least one of them is strongly recommended.", "# If standard_name is not defined but long_name is, don't continue", "# the check for this variable", "if", "standard_name", "is", "not", "None", ":", "standard_name_present", "=", "True", "valid_std_name", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "self", ".", "section_titles", "[", "'3.3'", "]", ")", "valid_std_name", ".", "assert_true", "(", "isinstance", "(", "standard_name", ",", "basestring", ")", ",", "\"Attribute standard_name for variable {} must be a string\"", ".", "format", "(", "name", ")", ")", "if", "isinstance", "(", "standard_name", ",", "basestring", ")", ":", "valid_std_name", ".", "assert_true", "(", "standard_name", "in", "self", ".", "_std_names", ",", "\"standard_name {} is not defined in Standard Name Table v{}\"", ".", "format", "(", "standard_name", "or", "'undefined'", ",", "self", ".", "_std_names", ".", "_version", ")", ")", "ret_val", ".", "append", "(", "valid_std_name", ".", "to_result", "(", ")", ")", "# 2) optional - if modifiers, should be in table", "if", "standard_name_modifier", "is", "not", "None", ":", "valid_modifier", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "self", ".", "section_titles", "[", "\"3.3\"", "]", ")", "allowed", "=", "[", "'detection_minimum'", ",", "'number_of_observations'", ",", "'standard_error'", ",", "'status_flag'", "]", "valid_modifier", ".", "assert_true", "(", "standard_name_modifier", "in", "allowed", ",", "\"standard_name modifier {} for variable {} is not a valid modifier \"", "\"according to appendix C\"", ".", "format", "(", "standard_name_modifier", ",", "name", ")", ")", "ret_val", ".", "append", "(", "valid_modifier", ".", "to_result", "(", ")", ")", "else", ":", "standard_name_present", "=", "False", "long_or_std_name", ".", "assert_true", "(", "long_name_present", "or", "standard_name_present", ",", "\"Attribute long_name or/and standard_name is highly recommended for variable {}\"", ".", "format", "(", "name", ")", ")", "ret_val", ".", "append", "(", "long_or_std_name", ".", "to_result", "(", ")", ")", "return", "ret_val" ]
Check a variables's standard_name attribute to ensure that it meets CF compliance. CF §3.3 A standard name is associated with a variable via the attribute standard_name which takes a string value comprised of a standard name optionally followed by one or more blanks and a standard name modifier :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
[ "Check", "a", "variables", "s", "standard_name", "attribute", "to", "ensure", "that", "it", "meets", "CF", "compliance", "." ]
python
train
50.822222
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L159-L172
def from_urlsafe(cls, urlsafe): """ Returns an instance of the model from a urlsafe string. :param urlsafe: urlsafe key :return: Instance of cls """ try: key = ndb.Key(urlsafe=urlsafe) except: return None obj = key.get() if obj and isinstance(obj, cls): return obj
[ "def", "from_urlsafe", "(", "cls", ",", "urlsafe", ")", ":", "try", ":", "key", "=", "ndb", ".", "Key", "(", "urlsafe", "=", "urlsafe", ")", "except", ":", "return", "None", "obj", "=", "key", ".", "get", "(", ")", "if", "obj", "and", "isinstance", "(", "obj", ",", "cls", ")", ":", "return", "obj" ]
Returns an instance of the model from a urlsafe string. :param urlsafe: urlsafe key :return: Instance of cls
[ "Returns", "an", "instance", "of", "the", "model", "from", "a", "urlsafe", "string", "." ]
python
train
25.714286
secure-systems-lab/securesystemslib
securesystemslib/keys.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/keys.py#L744-L882
def verify_signature(key_dict, signature, data): """ <Purpose> Determine whether the private key belonging to 'key_dict' produced 'signature'. verify_signature() will use the public key found in 'key_dict', the 'sig' objects contained in 'signature', and 'data' to complete the verification. >>> ed25519_key = generate_ed25519_key() >>> data = 'The quick brown fox jumps over the lazy dog' >>> signature = create_signature(ed25519_key, data) >>> verify_signature(ed25519_key, signature, data) True >>> verify_signature(ed25519_key, signature, 'bad_data') False >>> rsa_key = generate_rsa_key() >>> signature = create_signature(rsa_key, data) >>> verify_signature(rsa_key, signature, data) True >>> verify_signature(rsa_key, signature, 'bad_data') False >>> ecdsa_key = generate_ecdsa_key() >>> signature = create_signature(ecdsa_key, data) >>> verify_signature(ecdsa_key, signature, data) True >>> verify_signature(ecdsa_key, signature, 'bad_data') False <Arguments> key_dict: A dictionary containing the keys and other identifying information. If 'key_dict' is an RSA key, it has the form: {'keytype': 'rsa', 'scheme': 'rsassa-pss-sha256', 'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...', 'private': '-----BEGIN RSA PRIVATE KEY----- ...'}} The public and private keys are strings in PEM format. signature: The signature dictionary produced by one of the key generation functions. 'signature' has the form: {'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'sig': sig}. Conformant to 'securesystemslib.formats.SIGNATURE_SCHEMA'. data: Data that the signature is expected to be over. This should be a bytes object; data should be encoded/serialized before it is passed here.) This is the same value that can be passed into securesystemslib.create_signature() in order to create the signature. <Exceptions> securesystemslib.exceptions.FormatError, raised if either 'key_dict' or 'signature' are improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' or 'signature' specifies an unsupported algorithm. securesystemslib.exceptions.CryptoError, if the KEYID in the given 'key_dict' does not match the KEYID in 'signature'. <Side Effects> The cryptography library specified in 'settings' called to do the actual verification. <Returns> Boolean. True if the signature is valid, False otherwise. """ # Does 'key_dict' have the correct format? # This check will ensure 'key_dict' has the appropriate number # of objects and object types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if the check fails. securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict) # Does 'signature' have the correct format? securesystemslib.formats.SIGNATURE_SCHEMA.check_match(signature) # Verify that the KEYID in 'key_dict' matches the KEYID listed in the # 'signature'. if key_dict['keyid'] != signature['keyid']: raise securesystemslib.exceptions.CryptoError('The KEYID (' ' ' + repr(key_dict['keyid']) + ' ) in the given key does not match' ' the KEYID ( ' + repr(signature['keyid']) + ' ) in the signature.') else: logger.debug('The KEYIDs of key_dict and the signature match.') # Using the public key belonging to 'key_dict' # (i.e., rsakey_dict['keyval']['public']), verify whether 'signature' # was produced by key_dict's corresponding private key # key_dict['keyval']['private']. sig = signature['sig'] sig = binascii.unhexlify(sig.encode('utf-8')) public = key_dict['keyval']['public'] keytype = key_dict['keytype'] scheme = key_dict['scheme'] valid_signature = False if keytype == 'rsa': if scheme == 'rsassa-pss-sha256': valid_signature = securesystemslib.pyca_crypto_keys.verify_rsa_signature(sig, scheme, public, data) else: raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported' ' signature scheme is specified: ' + repr(scheme)) elif keytype == 'ed25519': if scheme == 'ed25519': public = binascii.unhexlify(public.encode('utf-8')) valid_signature = securesystemslib.ed25519_keys.verify_signature(public, scheme, sig, data, use_pynacl=USE_PYNACL) else: raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported' ' signature scheme is specified: ' + repr(scheme)) elif keytype == 'ecdsa-sha2-nistp256': if scheme == 'ecdsa-sha2-nistp256': valid_signature = securesystemslib.ecdsa_keys.verify_signature(public, scheme, sig, data) else: raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported' ' signature scheme is specified: ' + repr(scheme)) # 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key # types. This is a defensive check against an invalid key type. else: # pragma: no cover raise TypeError('Unsupported key type.') return valid_signature
[ "def", "verify_signature", "(", "key_dict", ",", "signature", ",", "data", ")", ":", "# Does 'key_dict' have the correct format?", "# This check will ensure 'key_dict' has the appropriate number", "# of objects and object types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if the check fails.", "securesystemslib", ".", "formats", ".", "ANYKEY_SCHEMA", ".", "check_match", "(", "key_dict", ")", "# Does 'signature' have the correct format?", "securesystemslib", ".", "formats", ".", "SIGNATURE_SCHEMA", ".", "check_match", "(", "signature", ")", "# Verify that the KEYID in 'key_dict' matches the KEYID listed in the", "# 'signature'.", "if", "key_dict", "[", "'keyid'", "]", "!=", "signature", "[", "'keyid'", "]", ":", "raise", "securesystemslib", ".", "exceptions", ".", "CryptoError", "(", "'The KEYID ('", "' '", "+", "repr", "(", "key_dict", "[", "'keyid'", "]", ")", "+", "' ) in the given key does not match'", "' the KEYID ( '", "+", "repr", "(", "signature", "[", "'keyid'", "]", ")", "+", "' ) in the signature.'", ")", "else", ":", "logger", ".", "debug", "(", "'The KEYIDs of key_dict and the signature match.'", ")", "# Using the public key belonging to 'key_dict'", "# (i.e., rsakey_dict['keyval']['public']), verify whether 'signature'", "# was produced by key_dict's corresponding private key", "# key_dict['keyval']['private'].", "sig", "=", "signature", "[", "'sig'", "]", "sig", "=", "binascii", ".", "unhexlify", "(", "sig", ".", "encode", "(", "'utf-8'", ")", ")", "public", "=", "key_dict", "[", "'keyval'", "]", "[", "'public'", "]", "keytype", "=", "key_dict", "[", "'keytype'", "]", "scheme", "=", "key_dict", "[", "'scheme'", "]", "valid_signature", "=", "False", "if", "keytype", "==", "'rsa'", ":", "if", "scheme", "==", "'rsassa-pss-sha256'", ":", "valid_signature", "=", "securesystemslib", ".", "pyca_crypto_keys", ".", "verify_rsa_signature", "(", "sig", ",", "scheme", ",", "public", ",", "data", ")", "else", ":", "raise", "securesystemslib", ".", "exceptions", ".", "UnsupportedAlgorithmError", "(", "'Unsupported'", "' signature scheme is specified: '", "+", "repr", "(", "scheme", ")", ")", "elif", "keytype", "==", "'ed25519'", ":", "if", "scheme", "==", "'ed25519'", ":", "public", "=", "binascii", ".", "unhexlify", "(", "public", ".", "encode", "(", "'utf-8'", ")", ")", "valid_signature", "=", "securesystemslib", ".", "ed25519_keys", ".", "verify_signature", "(", "public", ",", "scheme", ",", "sig", ",", "data", ",", "use_pynacl", "=", "USE_PYNACL", ")", "else", ":", "raise", "securesystemslib", ".", "exceptions", ".", "UnsupportedAlgorithmError", "(", "'Unsupported'", "' signature scheme is specified: '", "+", "repr", "(", "scheme", ")", ")", "elif", "keytype", "==", "'ecdsa-sha2-nistp256'", ":", "if", "scheme", "==", "'ecdsa-sha2-nistp256'", ":", "valid_signature", "=", "securesystemslib", ".", "ecdsa_keys", ".", "verify_signature", "(", "public", ",", "scheme", ",", "sig", ",", "data", ")", "else", ":", "raise", "securesystemslib", ".", "exceptions", ".", "UnsupportedAlgorithmError", "(", "'Unsupported'", "' signature scheme is specified: '", "+", "repr", "(", "scheme", ")", ")", "# 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key", "# types. This is a defensive check against an invalid key type.", "else", ":", "# pragma: no cover", "raise", "TypeError", "(", "'Unsupported key type.'", ")", "return", "valid_signature" ]
<Purpose> Determine whether the private key belonging to 'key_dict' produced 'signature'. verify_signature() will use the public key found in 'key_dict', the 'sig' objects contained in 'signature', and 'data' to complete the verification. >>> ed25519_key = generate_ed25519_key() >>> data = 'The quick brown fox jumps over the lazy dog' >>> signature = create_signature(ed25519_key, data) >>> verify_signature(ed25519_key, signature, data) True >>> verify_signature(ed25519_key, signature, 'bad_data') False >>> rsa_key = generate_rsa_key() >>> signature = create_signature(rsa_key, data) >>> verify_signature(rsa_key, signature, data) True >>> verify_signature(rsa_key, signature, 'bad_data') False >>> ecdsa_key = generate_ecdsa_key() >>> signature = create_signature(ecdsa_key, data) >>> verify_signature(ecdsa_key, signature, data) True >>> verify_signature(ecdsa_key, signature, 'bad_data') False <Arguments> key_dict: A dictionary containing the keys and other identifying information. If 'key_dict' is an RSA key, it has the form: {'keytype': 'rsa', 'scheme': 'rsassa-pss-sha256', 'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...', 'private': '-----BEGIN RSA PRIVATE KEY----- ...'}} The public and private keys are strings in PEM format. signature: The signature dictionary produced by one of the key generation functions. 'signature' has the form: {'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'sig': sig}. Conformant to 'securesystemslib.formats.SIGNATURE_SCHEMA'. data: Data that the signature is expected to be over. This should be a bytes object; data should be encoded/serialized before it is passed here.) This is the same value that can be passed into securesystemslib.create_signature() in order to create the signature. <Exceptions> securesystemslib.exceptions.FormatError, raised if either 'key_dict' or 'signature' are improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' or 'signature' specifies an unsupported algorithm. securesystemslib.exceptions.CryptoError, if the KEYID in the given 'key_dict' does not match the KEYID in 'signature'. <Side Effects> The cryptography library specified in 'settings' called to do the actual verification. <Returns> Boolean. True if the signature is valid, False otherwise.
[ "<Purpose", ">", "Determine", "whether", "the", "private", "key", "belonging", "to", "key_dict", "produced", "signature", ".", "verify_signature", "()", "will", "use", "the", "public", "key", "found", "in", "key_dict", "the", "sig", "objects", "contained", "in", "signature", "and", "data", "to", "complete", "the", "verification", "." ]
python
train
36.978417
saltstack/salt
salt/modules/zookeeper.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zookeeper.py#L234-L268
def exists(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): ''' Check if path exists path path to check profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.exists /test/name profile=prod ''' conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) return bool(conn.exists(path))
[ "def", "exists", "(", "path", ",", "profile", "=", "None", ",", "hosts", "=", "None", ",", "scheme", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "default_acl", "=", "None", ")", ":", "conn", "=", "_get_zk_conn", "(", "profile", "=", "profile", ",", "hosts", "=", "hosts", ",", "scheme", "=", "scheme", ",", "username", "=", "username", ",", "password", "=", "password", ",", "default_acl", "=", "default_acl", ")", "return", "bool", "(", "conn", ".", "exists", "(", "path", ")", ")" ]
Check if path exists path path to check profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.exists /test/name profile=prod
[ "Check", "if", "path", "exists" ]
python
train
25.657143
philgyford/django-spectator
spectator/events/migrations/0039_populate_exhibitions.py
https://github.com/philgyford/django-spectator/blob/f3c72004f9caa1fde0f5a3b2f0d2bf285fc01ada/spectator/events/migrations/0039_populate_exhibitions.py#L11-L24
def generate_slug(value): """ Generates a slug using a Hashid of `value`. COPIED from spectator.core.models.SluggedModelMixin() because migrations don't make this happen automatically and perhaps the least bad thing is to copy the method here, ugh. """ alphabet = app_settings.SLUG_ALPHABET salt = app_settings.SLUG_SALT hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5) return hashids.encode(value)
[ "def", "generate_slug", "(", "value", ")", ":", "alphabet", "=", "app_settings", ".", "SLUG_ALPHABET", "salt", "=", "app_settings", ".", "SLUG_SALT", "hashids", "=", "Hashids", "(", "alphabet", "=", "alphabet", ",", "salt", "=", "salt", ",", "min_length", "=", "5", ")", "return", "hashids", ".", "encode", "(", "value", ")" ]
Generates a slug using a Hashid of `value`. COPIED from spectator.core.models.SluggedModelMixin() because migrations don't make this happen automatically and perhaps the least bad thing is to copy the method here, ugh.
[ "Generates", "a", "slug", "using", "a", "Hashid", "of", "value", "." ]
python
train
31.5
rpcope1/PythonConfluenceAPI
PythonConfluenceAPI/api.py
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/api.py#L273-L294
def get_content_macro_by_macro_id(self, content_id, version, macro_id, callback=None): """ Returns the body of a macro (in storage format) with the given id. This resource is primarily used by connect applications that require the body of macro to perform their work. When content is created, if no macroId is specified, then Confluence will generate a random id. The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs. To preserve backwards compatibility this resource will also match on the hash of the macro body, even if a macroId is found. This check will become redundant as pages get macroId's generated for them and transparently propagate out to all instances. :param content_id (string): A string containing the id of the content. :param version (int): The version of the content to search. :param macro_id (string): The macroID to find the corresponding macro. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ return self._service_get_request("rest/api/content/{id}/history/{version}/macro/id/{macro_id}" "".format(id=content_id, version=int(version), macro_id=macro_id), callback=callback)
[ "def", "get_content_macro_by_macro_id", "(", "self", ",", "content_id", ",", "version", ",", "macro_id", ",", "callback", "=", "None", ")", ":", "return", "self", ".", "_service_get_request", "(", "\"rest/api/content/{id}/history/{version}/macro/id/{macro_id}\"", "\"\"", ".", "format", "(", "id", "=", "content_id", ",", "version", "=", "int", "(", "version", ")", ",", "macro_id", "=", "macro_id", ")", ",", "callback", "=", "callback", ")" ]
Returns the body of a macro (in storage format) with the given id. This resource is primarily used by connect applications that require the body of macro to perform their work. When content is created, if no macroId is specified, then Confluence will generate a random id. The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs. To preserve backwards compatibility this resource will also match on the hash of the macro body, even if a macroId is found. This check will become redundant as pages get macroId's generated for them and transparently propagate out to all instances. :param content_id (string): A string containing the id of the content. :param version (int): The version of the content to search. :param macro_id (string): The macroID to find the corresponding macro. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
[ "Returns", "the", "body", "of", "a", "macro", "(", "in", "storage", "format", ")", "with", "the", "given", "id", ".", "This", "resource", "is", "primarily", "used", "by", "connect", "applications", "that", "require", "the", "body", "of", "macro", "to", "perform", "their", "work", "." ]
python
train
73.636364
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L1498-L1522
def list_disks(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 List disks associated with the account CLI Example: .. code-block:: bash salt-cloud -f list_disks my-azure ''' if call != 'function': raise SaltCloudSystemExit( 'The list_disks function must be called with -f or --function.' ) if not conn: conn = get_conn() data = conn.list_disks() ret = {} for item in data.disks: ret[item.name] = object_to_dict(item) return ret
[ "def", "list_disks", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_disks function must be called with -f or --function.'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "data", "=", "conn", ".", "list_disks", "(", ")", "ret", "=", "{", "}", "for", "item", "in", "data", ".", "disks", ":", "ret", "[", "item", ".", "name", "]", "=", "object_to_dict", "(", "item", ")", "return", "ret" ]
.. versionadded:: 2015.8.0 List disks associated with the account CLI Example: .. code-block:: bash salt-cloud -f list_disks my-azure
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
21.12
cjdrake/pyeda
pyeda/logic/aes.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/logic/aes.py#L202-L216
def add_round_key(state, rkey): """ Transformation in the Cipher and Inverse Cipher in which a Round Key is added to the State using an XOR operation. The length of a Round Key equals the size of the State (i.e., for Nb = 4, the Round Key length equals 128 bits/16 bytes). """ state = state.reshape(4, 32) rkey = rkey.reshape(4, 32) return fcat( state[0] ^ rkey[0], state[1] ^ rkey[1], state[2] ^ rkey[2], state[3] ^ rkey[3], )
[ "def", "add_round_key", "(", "state", ",", "rkey", ")", ":", "state", "=", "state", ".", "reshape", "(", "4", ",", "32", ")", "rkey", "=", "rkey", ".", "reshape", "(", "4", ",", "32", ")", "return", "fcat", "(", "state", "[", "0", "]", "^", "rkey", "[", "0", "]", ",", "state", "[", "1", "]", "^", "rkey", "[", "1", "]", ",", "state", "[", "2", "]", "^", "rkey", "[", "2", "]", ",", "state", "[", "3", "]", "^", "rkey", "[", "3", "]", ",", ")" ]
Transformation in the Cipher and Inverse Cipher in which a Round Key is added to the State using an XOR operation. The length of a Round Key equals the size of the State (i.e., for Nb = 4, the Round Key length equals 128 bits/16 bytes).
[ "Transformation", "in", "the", "Cipher", "and", "Inverse", "Cipher", "in", "which", "a", "Round", "Key", "is", "added", "to", "the", "State", "using", "an", "XOR", "operation", ".", "The", "length", "of", "a", "Round", "Key", "equals", "the", "size", "of", "the", "State", "(", "i", ".", "e", ".", "for", "Nb", "=", "4", "the", "Round", "Key", "length", "equals", "128", "bits", "/", "16", "bytes", ")", "." ]
python
train
32.333333
joequant/cryptoexchange
cryptoexchange/util/bitmex-generate-api-key.py
https://github.com/joequant/cryptoexchange/blob/6690fbd9a2ba00e40d7484425808c84d44233f0c/cryptoexchange/util/bitmex-generate-api-key.py#L110-L120
def disable_key(self): """Disable an existing API Key.""" print("This command will disable a enabled key.") apiKeyID = input("API Key ID: ") try: key = self._curl_bitmex("/apiKey/disable", postdict={"apiKeyID": apiKeyID}) print("Key with ID %s disabled." % key["id"]) except: print("Unable to disable key, please try again.") self.disable_key()
[ "def", "disable_key", "(", "self", ")", ":", "print", "(", "\"This command will disable a enabled key.\"", ")", "apiKeyID", "=", "input", "(", "\"API Key ID: \"", ")", "try", ":", "key", "=", "self", ".", "_curl_bitmex", "(", "\"/apiKey/disable\"", ",", "postdict", "=", "{", "\"apiKeyID\"", ":", "apiKeyID", "}", ")", "print", "(", "\"Key with ID %s disabled.\"", "%", "key", "[", "\"id\"", "]", ")", "except", ":", "print", "(", "\"Unable to disable key, please try again.\"", ")", "self", ".", "disable_key", "(", ")" ]
Disable an existing API Key.
[ "Disable", "an", "existing", "API", "Key", "." ]
python
train
41.636364
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L207-L226
def check_required_fields(method, uri, body, field_names): """ Check required fields in the request body. Raises: BadRequestError with reason 3: Missing request body BadRequestError with reason 5: Missing required field in request body """ # Check presence of request body if body is None: raise BadRequestError(method, uri, reason=3, message="Missing request body") # Check required input fields for field_name in field_names: if field_name not in body: raise BadRequestError(method, uri, reason=5, message="Missing required field in request " "body: {}".format(field_name))
[ "def", "check_required_fields", "(", "method", ",", "uri", ",", "body", ",", "field_names", ")", ":", "# Check presence of request body", "if", "body", "is", "None", ":", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "3", ",", "message", "=", "\"Missing request body\"", ")", "# Check required input fields", "for", "field_name", "in", "field_names", ":", "if", "field_name", "not", "in", "body", ":", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "5", ",", "message", "=", "\"Missing required field in request \"", "\"body: {}\"", ".", "format", "(", "field_name", ")", ")" ]
Check required fields in the request body. Raises: BadRequestError with reason 3: Missing request body BadRequestError with reason 5: Missing required field in request body
[ "Check", "required", "fields", "in", "the", "request", "body", "." ]
python
train
36.45
Bystroushaak/pyDHTMLParser
src/dhtmlparser/__init__.py
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L43-L150
def _raw_split(itxt): """ Parse HTML from text into array filled with tags end text. Source code is little bit unintutive, because it is state machine parser. For better understanding, look at http://bit.ly/1rXRcJj Example:: >>> dhtmlparser._raw_split('<html><tag params="true"></html>') ['<html>', '<tag params="true">', '</html>'] Args: itxt (str): Input HTML text, which will be parsed. Returns: list: List of strings (input splitted to tags and text). """ echr = "" buff = ["", "", "", ""] content = "" array = [] next_state = 0 inside_tag = False escaped = False COMMENT_START = ["-", "!", "<"] COMMENT_END = ["-", "-"] gc.disable() for c in itxt: # content if next_state == StateEnum.content: if c == "<": if content: array.append(content) content = c next_state = StateEnum.tag inside_tag = False else: content += c # html tag elif next_state == StateEnum.tag: if c == ">": array.append(content + c) content = "" next_state = StateEnum.content elif c == "'" or c == '"': echr = c content += c next_state = StateEnum.parameter elif c == "-" and buff[:3] == COMMENT_START: if content[:-3]: array.append(content[:-3]) content = content[-3:] + c next_state = StateEnum.comment else: if c == "<": # jump back into tag instead of content array.append(content) inside_tag = True content = "" content += c # quotes "" / '' elif next_state == StateEnum.parameter: if c == echr and not escaped: # end of quotes next_state = StateEnum.tag # unescaped end of line - this is good for invalid HTML like # <a href=something">..., because it allows recovery if c == "\n" and not escaped and buff[0] == ">": next_state = StateEnum.content inside_tag = False content += c escaped = not escaped if c == "\\" else False # html comments elif next_state == StateEnum.comment: if c == ">" and buff[:2] == COMMENT_END: next_state = StateEnum.tag if inside_tag else StateEnum.content inside_tag = False array.append(content + c) content = "" else: content += c # rotate buffer buff = _rotate_buff(buff) buff[0] = c gc.enable() if content: array.append(content) return array
[ "def", "_raw_split", "(", "itxt", ")", ":", "echr", "=", "\"\"", "buff", "=", "[", "\"\"", ",", "\"\"", ",", "\"\"", ",", "\"\"", "]", "content", "=", "\"\"", "array", "=", "[", "]", "next_state", "=", "0", "inside_tag", "=", "False", "escaped", "=", "False", "COMMENT_START", "=", "[", "\"-\"", ",", "\"!\"", ",", "\"<\"", "]", "COMMENT_END", "=", "[", "\"-\"", ",", "\"-\"", "]", "gc", ".", "disable", "(", ")", "for", "c", "in", "itxt", ":", "# content", "if", "next_state", "==", "StateEnum", ".", "content", ":", "if", "c", "==", "\"<\"", ":", "if", "content", ":", "array", ".", "append", "(", "content", ")", "content", "=", "c", "next_state", "=", "StateEnum", ".", "tag", "inside_tag", "=", "False", "else", ":", "content", "+=", "c", "# html tag", "elif", "next_state", "==", "StateEnum", ".", "tag", ":", "if", "c", "==", "\">\"", ":", "array", ".", "append", "(", "content", "+", "c", ")", "content", "=", "\"\"", "next_state", "=", "StateEnum", ".", "content", "elif", "c", "==", "\"'\"", "or", "c", "==", "'\"'", ":", "echr", "=", "c", "content", "+=", "c", "next_state", "=", "StateEnum", ".", "parameter", "elif", "c", "==", "\"-\"", "and", "buff", "[", ":", "3", "]", "==", "COMMENT_START", ":", "if", "content", "[", ":", "-", "3", "]", ":", "array", ".", "append", "(", "content", "[", ":", "-", "3", "]", ")", "content", "=", "content", "[", "-", "3", ":", "]", "+", "c", "next_state", "=", "StateEnum", ".", "comment", "else", ":", "if", "c", "==", "\"<\"", ":", "# jump back into tag instead of content", "array", ".", "append", "(", "content", ")", "inside_tag", "=", "True", "content", "=", "\"\"", "content", "+=", "c", "# quotes \"\" / ''", "elif", "next_state", "==", "StateEnum", ".", "parameter", ":", "if", "c", "==", "echr", "and", "not", "escaped", ":", "# end of quotes", "next_state", "=", "StateEnum", ".", "tag", "# unescaped end of line - this is good for invalid HTML like", "# <a href=something\">..., because it allows recovery", "if", "c", "==", "\"\\n\"", "and", "not", "escaped", "and", "buff", "[", "0", "]", "==", "\">\"", ":", "next_state", "=", "StateEnum", ".", "content", "inside_tag", "=", "False", "content", "+=", "c", "escaped", "=", "not", "escaped", "if", "c", "==", "\"\\\\\"", "else", "False", "# html comments", "elif", "next_state", "==", "StateEnum", ".", "comment", ":", "if", "c", "==", "\">\"", "and", "buff", "[", ":", "2", "]", "==", "COMMENT_END", ":", "next_state", "=", "StateEnum", ".", "tag", "if", "inside_tag", "else", "StateEnum", ".", "content", "inside_tag", "=", "False", "array", ".", "append", "(", "content", "+", "c", ")", "content", "=", "\"\"", "else", ":", "content", "+=", "c", "# rotate buffer", "buff", "=", "_rotate_buff", "(", "buff", ")", "buff", "[", "0", "]", "=", "c", "gc", ".", "enable", "(", ")", "if", "content", ":", "array", ".", "append", "(", "content", ")", "return", "array" ]
Parse HTML from text into array filled with tags end text. Source code is little bit unintutive, because it is state machine parser. For better understanding, look at http://bit.ly/1rXRcJj Example:: >>> dhtmlparser._raw_split('<html><tag params="true"></html>') ['<html>', '<tag params="true">', '</html>'] Args: itxt (str): Input HTML text, which will be parsed. Returns: list: List of strings (input splitted to tags and text).
[ "Parse", "HTML", "from", "text", "into", "array", "filled", "with", "tags", "end", "text", "." ]
python
train
26.342593
StackStorm/pybind
pybind/nos/v6_0_2f/snmp_server/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/snmp_server/__init__.py#L250-L271
def _set_host(self, v, load=False): """ Setter method for host, mapped from YANG variable /snmp_server/host (list) If this variable is read-only (config: false) in the source YANG file, then _set_host is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_host() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("ip community",host.host, yang_name="host", rest_name="host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """host must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("ip community",host.host, yang_name="host", rest_name="host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)""", }) self.__host = t if hasattr(self, '_set'): self._set()
[ "def", "_set_host", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"ip community\"", ",", "host", ".", "host", ",", "yang_name", "=", "\"host\"", ",", "rest_name", "=", "\"host\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'ip community'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Holds IP Address, community string, version\\n(v1 | v2c), port number used to send traps\\nand severity level'", ",", "u'sort-priority'", ":", "u'23'", ",", "u'callpoint'", ":", "u'snmphost'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"host\"", ",", "rest_name", "=", "\"host\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Holds IP Address, community string, version\\n(v1 | v2c), port number used to send traps\\nand severity level'", ",", "u'sort-priority'", ":", "u'23'", ",", "u'callpoint'", ":", "u'snmphost'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-snmp'", ",", "defining_module", "=", "'brocade-snmp'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"host must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"ip community\",host.host, yang_name=\"host\", rest_name=\"host\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\\n(v1 | v2c), port number used to send traps\\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name=\"host\", rest_name=\"host\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\\n(v1 | v2c), port number used to send traps\\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__host", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for host, mapped from YANG variable /snmp_server/host (list) If this variable is read-only (config: false) in the source YANG file, then _set_host is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_host() directly.
[ "Setter", "method", "for", "host", "mapped", "from", "YANG", "variable", "/", "snmp_server", "/", "host", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_host", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_host", "()", "directly", "." ]
python
train
124.772727
monarch-initiative/dipper
dipper/sources/Coriell.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L150-L224
def fetch(self, is_dl_forced=False): """ Here we connect to the coriell sftp server using private connection details. They dump bi-weekly files with a timestamp in the filename. For each catalog, we ping the remote site and pull the most-recently updated file, renaming it to our local latest.csv. Be sure to have pg user/password connection details in your conf.yaml file, like: dbauth : {"coriell" : { "user" : "<username>", "password" : "<password>", "host" : <host>, "private_key"=path/to/rsa_key} } :param is_dl_forced: :return: """ host = config.get_config()['dbauth']['coriell']['host'] key = config.get_config()['dbauth']['coriell']['private_key'] user = config.get_config()['user']['coriell'] passwd = config.get_config()['keys'][user] with pysftp.Connection( host, username=user, password=passwd, private_key=key) as sftp: # check to make sure each file is in there # get the remote files remote_files = sftp.listdir_attr() files_by_repo = {} for attr in remote_files: # for each catalog, get the most-recent filename mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename) if mch is not None and len(mch.groups()) > 0: # there should just be one now files_by_repo[mch.group(1)] = attr # sort each array in hash, # & get the name and time of the most-recent file for each catalog for rmt in self.files: LOG.info("Checking on %s catalog file", rmt) fname = self.files[rmt]['file'] remotef = files_by_repo[rmt] target_name = '/'.join((self.rawdir, fname)) # check if the local file is out of date, if so, download. # otherwise, skip. # we rename (for simplicity) the original file fstat = None if os.path.exists(target_name): fstat = os.stat(target_name) LOG.info( "Local file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]: if fstat is None: LOG.info("File does not exist locally; downloading...") else: LOG.info( "New version of %s catalog available; downloading...", rmt) sftp.get(remotef.filename, target_name) LOG.info( "Fetched remote %s -> %s", remotef.filename, target_name) fstat = os.stat(target_name) filedate = datetime.utcfromtimestamp( remotef.st_mtime).strftime("%Y-%m-%d") LOG.info( "New file date: %s", datetime.utcfromtimestamp(fstat[stat.ST_CTIME])) else: LOG.info("File %s exists; using local copy", fname) filedate = datetime.utcfromtimestamp( fstat[stat.ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setFileAccessUrl(remotef.filename, True) self.dataset.setVersion(filedate) return
[ "def", "fetch", "(", "self", ",", "is_dl_forced", "=", "False", ")", ":", "host", "=", "config", ".", "get_config", "(", ")", "[", "'dbauth'", "]", "[", "'coriell'", "]", "[", "'host'", "]", "key", "=", "config", ".", "get_config", "(", ")", "[", "'dbauth'", "]", "[", "'coriell'", "]", "[", "'private_key'", "]", "user", "=", "config", ".", "get_config", "(", ")", "[", "'user'", "]", "[", "'coriell'", "]", "passwd", "=", "config", ".", "get_config", "(", ")", "[", "'keys'", "]", "[", "user", "]", "with", "pysftp", ".", "Connection", "(", "host", ",", "username", "=", "user", ",", "password", "=", "passwd", ",", "private_key", "=", "key", ")", "as", "sftp", ":", "# check to make sure each file is in there", "# get the remote files", "remote_files", "=", "sftp", ".", "listdir_attr", "(", ")", "files_by_repo", "=", "{", "}", "for", "attr", "in", "remote_files", ":", "# for each catalog, get the most-recent filename", "mch", "=", "re", ".", "match", "(", "'(NIGMS|NIA|NHGRI|NINDS)'", ",", "attr", ".", "filename", ")", "if", "mch", "is", "not", "None", "and", "len", "(", "mch", ".", "groups", "(", ")", ")", ">", "0", ":", "# there should just be one now", "files_by_repo", "[", "mch", ".", "group", "(", "1", ")", "]", "=", "attr", "# sort each array in hash,", "# & get the name and time of the most-recent file for each catalog", "for", "rmt", "in", "self", ".", "files", ":", "LOG", ".", "info", "(", "\"Checking on %s catalog file\"", ",", "rmt", ")", "fname", "=", "self", ".", "files", "[", "rmt", "]", "[", "'file'", "]", "remotef", "=", "files_by_repo", "[", "rmt", "]", "target_name", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "fname", ")", ")", "# check if the local file is out of date, if so, download.", "# otherwise, skip.", "# we rename (for simplicity) the original file", "fstat", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "target_name", ")", ":", "fstat", "=", "os", ".", "stat", "(", "target_name", ")", "LOG", ".", "info", "(", "\"Local file date: %s\"", ",", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "stat", ".", "ST_CTIME", "]", ")", ")", "if", "fstat", "is", "None", "or", "remotef", ".", "st_mtime", ">", "fstat", "[", "stat", ".", "ST_CTIME", "]", ":", "if", "fstat", "is", "None", ":", "LOG", ".", "info", "(", "\"File does not exist locally; downloading...\"", ")", "else", ":", "LOG", ".", "info", "(", "\"New version of %s catalog available; downloading...\"", ",", "rmt", ")", "sftp", ".", "get", "(", "remotef", ".", "filename", ",", "target_name", ")", "LOG", ".", "info", "(", "\"Fetched remote %s -> %s\"", ",", "remotef", ".", "filename", ",", "target_name", ")", "fstat", "=", "os", ".", "stat", "(", "target_name", ")", "filedate", "=", "datetime", ".", "utcfromtimestamp", "(", "remotef", ".", "st_mtime", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "LOG", ".", "info", "(", "\"New file date: %s\"", ",", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "stat", ".", "ST_CTIME", "]", ")", ")", "else", ":", "LOG", ".", "info", "(", "\"File %s exists; using local copy\"", ",", "fname", ")", "filedate", "=", "datetime", ".", "utcfromtimestamp", "(", "fstat", "[", "stat", ".", "ST_CTIME", "]", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "self", ".", "dataset", ".", "setFileAccessUrl", "(", "remotef", ".", "filename", ",", "True", ")", "self", ".", "dataset", ".", "setVersion", "(", "filedate", ")", "return" ]
Here we connect to the coriell sftp server using private connection details. They dump bi-weekly files with a timestamp in the filename. For each catalog, we ping the remote site and pull the most-recently updated file, renaming it to our local latest.csv. Be sure to have pg user/password connection details in your conf.yaml file, like: dbauth : {"coriell" : { "user" : "<username>", "password" : "<password>", "host" : <host>, "private_key"=path/to/rsa_key} } :param is_dl_forced: :return:
[ "Here", "we", "connect", "to", "the", "coriell", "sftp", "server", "using", "private", "connection", "details", ".", "They", "dump", "bi", "-", "weekly", "files", "with", "a", "timestamp", "in", "the", "filename", ".", "For", "each", "catalog", "we", "ping", "the", "remote", "site", "and", "pull", "the", "most", "-", "recently", "updated", "file", "renaming", "it", "to", "our", "local", "latest", ".", "csv", "." ]
python
train
46
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L968-L971
def update_loadbalancer(self, lbaas_loadbalancer, body=None): """Updates a load balancer.""" return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer), body=body)
[ "def", "update_loadbalancer", "(", "self", ",", "lbaas_loadbalancer", ",", "body", "=", "None", ")", ":", "return", "self", ".", "put", "(", "self", ".", "lbaas_loadbalancer_path", "%", "(", "lbaas_loadbalancer", ")", ",", "body", "=", "body", ")" ]
Updates a load balancer.
[ "Updates", "a", "load", "balancer", "." ]
python
train
52.25
photo/openphoto-python
trovebox/api/api_photo.py
https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/api/api_photo.py#L73-L83
def delete_source(self, photo, **kwds): """ Endpoint: /photo/<id>/source/delete.json Delete the source files of a photo. Returns True if successful. Raises a TroveboxError if not. """ return self._client.post("/photo/%s/source/delete.json" % self._extract_id(photo), **kwds)["result"]
[ "def", "delete_source", "(", "self", ",", "photo", ",", "*", "*", "kwds", ")", ":", "return", "self", ".", "_client", ".", "post", "(", "\"/photo/%s/source/delete.json\"", "%", "self", ".", "_extract_id", "(", "photo", ")", ",", "*", "*", "kwds", ")", "[", "\"result\"", "]" ]
Endpoint: /photo/<id>/source/delete.json Delete the source files of a photo. Returns True if successful. Raises a TroveboxError if not.
[ "Endpoint", ":", "/", "photo", "/", "<id", ">", "/", "source", "/", "delete", ".", "json" ]
python
train
36.090909
publysher/rdflib-django
src/rdflib_django/store.py
https://github.com/publysher/rdflib-django/blob/e26992af75f96ef27a6ceaf820574e3bca645953/src/rdflib_django/store.py#L41-L48
def _get_named_graph(context): """ Returns the named graph for this context. """ if context is None: return None return models.NamedGraph.objects.get_or_create(identifier=context.identifier)[0]
[ "def", "_get_named_graph", "(", "context", ")", ":", "if", "context", "is", "None", ":", "return", "None", "return", "models", ".", "NamedGraph", ".", "objects", ".", "get_or_create", "(", "identifier", "=", "context", ".", "identifier", ")", "[", "0", "]" ]
Returns the named graph for this context.
[ "Returns", "the", "named", "graph", "for", "this", "context", "." ]
python
train
26.875
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L26448-L26459
def modify_log_flags(self, settings): """Modifies the debug or release logger flags. in settings of type str The flags settings string. See iprt/log.h for details. To target the release logger, prefix the string with "release:". """ if not isinstance(settings, basestring): raise TypeError("settings can only be an instance of type basestring") self._call("modifyLogFlags", in_p=[settings])
[ "def", "modify_log_flags", "(", "self", ",", "settings", ")", ":", "if", "not", "isinstance", "(", "settings", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"settings can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"modifyLogFlags\"", ",", "in_p", "=", "[", "settings", "]", ")" ]
Modifies the debug or release logger flags. in settings of type str The flags settings string. See iprt/log.h for details. To target the release logger, prefix the string with "release:".
[ "Modifies", "the", "debug", "or", "release", "logger", "flags", "." ]
python
train
39.833333
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1686-L1733
def relabeled(self, label, new_label): """Return a new table with ``label`` specifying column label(s) replaced by corresponding ``new_label``. Args: ``label`` -- (str or array of str) The label(s) of columns to be changed. ``new_label`` -- (str or array of str): The new label(s) of columns to be changed. Same number of elements as label. Raises: ``ValueError`` -- if ``label`` does not exist in table, or if the ``label`` and ``new_label`` are not not of equal length. Also, raised if ``label`` and/or ``new_label`` are not ``str``. Returns: New table with ``new_label`` in place of ``label``. >>> tiles = Table().with_columns('letter', make_array('c', 'd'), ... 'count', make_array(2, 4)) >>> tiles letter | count c | 2 d | 4 >>> tiles.relabeled('count', 'number') letter | number c | 2 d | 4 >>> tiles # original table unmodified letter | count c | 2 d | 4 >>> tiles.relabeled(make_array('letter', 'count'), ... make_array('column1', 'column2')) column1 | column2 c | 2 d | 4 >>> tiles.relabeled(make_array('letter', 'number'), ... make_array('column1', 'column2')) Traceback (most recent call last): ... ValueError: Invalid labels. Column labels must already exist in table in order to be replaced. """ copy = self.copy() copy.relabel(label, new_label) return copy
[ "def", "relabeled", "(", "self", ",", "label", ",", "new_label", ")", ":", "copy", "=", "self", ".", "copy", "(", ")", "copy", ".", "relabel", "(", "label", ",", "new_label", ")", "return", "copy" ]
Return a new table with ``label`` specifying column label(s) replaced by corresponding ``new_label``. Args: ``label`` -- (str or array of str) The label(s) of columns to be changed. ``new_label`` -- (str or array of str): The new label(s) of columns to be changed. Same number of elements as label. Raises: ``ValueError`` -- if ``label`` does not exist in table, or if the ``label`` and ``new_label`` are not not of equal length. Also, raised if ``label`` and/or ``new_label`` are not ``str``. Returns: New table with ``new_label`` in place of ``label``. >>> tiles = Table().with_columns('letter', make_array('c', 'd'), ... 'count', make_array(2, 4)) >>> tiles letter | count c | 2 d | 4 >>> tiles.relabeled('count', 'number') letter | number c | 2 d | 4 >>> tiles # original table unmodified letter | count c | 2 d | 4 >>> tiles.relabeled(make_array('letter', 'count'), ... make_array('column1', 'column2')) column1 | column2 c | 2 d | 4 >>> tiles.relabeled(make_array('letter', 'number'), ... make_array('column1', 'column2')) Traceback (most recent call last): ... ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.
[ "Return", "a", "new", "table", "with", "label", "specifying", "column", "label", "(", "s", ")", "replaced", "by", "corresponding", "new_label", "." ]
python
train
34.854167
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L1360-L1394
def parse_resource_extended(library, session, resource_name): """Parse a resource string to get extended interface information. Corresponds to viParseRsrcEx function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Resource Manager session (should always be the Default Resource Manager for VISA returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :return: Resource information, return value of the library call. :rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode` """ interface_type = ViUInt16() interface_board_number = ViUInt16() resource_class = create_string_buffer(constants.VI_FIND_BUFLEN) unaliased_expanded_resource_name = create_string_buffer(constants.VI_FIND_BUFLEN) alias_if_exists = create_string_buffer(constants.VI_FIND_BUFLEN) # [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar] # ViRsrc converts from (str, unicode, bytes) to bytes ret = library.viParseRsrcEx(session, resource_name, byref(interface_type), byref(interface_board_number), resource_class, unaliased_expanded_resource_name, alias_if_exists) res = [buffer_to_text(val) for val in (resource_class, unaliased_expanded_resource_name, alias_if_exists)] if res[-1] == '': res[-1] = None return ResourceInfo(constants.InterfaceType(interface_type.value), interface_board_number.value, *res), ret
[ "def", "parse_resource_extended", "(", "library", ",", "session", ",", "resource_name", ")", ":", "interface_type", "=", "ViUInt16", "(", ")", "interface_board_number", "=", "ViUInt16", "(", ")", "resource_class", "=", "create_string_buffer", "(", "constants", ".", "VI_FIND_BUFLEN", ")", "unaliased_expanded_resource_name", "=", "create_string_buffer", "(", "constants", ".", "VI_FIND_BUFLEN", ")", "alias_if_exists", "=", "create_string_buffer", "(", "constants", ".", "VI_FIND_BUFLEN", ")", "# [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar]", "# ViRsrc converts from (str, unicode, bytes) to bytes", "ret", "=", "library", ".", "viParseRsrcEx", "(", "session", ",", "resource_name", ",", "byref", "(", "interface_type", ")", ",", "byref", "(", "interface_board_number", ")", ",", "resource_class", ",", "unaliased_expanded_resource_name", ",", "alias_if_exists", ")", "res", "=", "[", "buffer_to_text", "(", "val", ")", "for", "val", "in", "(", "resource_class", ",", "unaliased_expanded_resource_name", ",", "alias_if_exists", ")", "]", "if", "res", "[", "-", "1", "]", "==", "''", ":", "res", "[", "-", "1", "]", "=", "None", "return", "ResourceInfo", "(", "constants", ".", "InterfaceType", "(", "interface_type", ".", "value", ")", ",", "interface_board_number", ".", "value", ",", "*", "res", ")", ",", "ret" ]
Parse a resource string to get extended interface information. Corresponds to viParseRsrcEx function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Resource Manager session (should always be the Default Resource Manager for VISA returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :return: Resource information, return value of the library call. :rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
[ "Parse", "a", "resource", "string", "to", "get", "extended", "interface", "information", "." ]
python
train
47.685714
apache/incubator-mxnet
python/mxnet/image/detection.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/detection.py#L213-L217
def _calculate_areas(self, label): """Calculate areas for multiple labels""" heights = np.maximum(0, label[:, 3] - label[:, 1]) widths = np.maximum(0, label[:, 2] - label[:, 0]) return heights * widths
[ "def", "_calculate_areas", "(", "self", ",", "label", ")", ":", "heights", "=", "np", ".", "maximum", "(", "0", ",", "label", "[", ":", ",", "3", "]", "-", "label", "[", ":", ",", "1", "]", ")", "widths", "=", "np", ".", "maximum", "(", "0", ",", "label", "[", ":", ",", "2", "]", "-", "label", "[", ":", ",", "0", "]", ")", "return", "heights", "*", "widths" ]
Calculate areas for multiple labels
[ "Calculate", "areas", "for", "multiple", "labels" ]
python
train
45.8
nugget/python-anthemav
anthemav/protocol.py
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L852-L856
def dump_rawdata(self): """Return contents of transport object for debugging forensics.""" if hasattr(self, 'transport'): attrs = vars(self.transport) return ', '.join("%s: %s" % item for item in attrs.items())
[ "def", "dump_rawdata", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'transport'", ")", ":", "attrs", "=", "vars", "(", "self", ".", "transport", ")", "return", "', '", ".", "join", "(", "\"%s: %s\"", "%", "item", "for", "item", "in", "attrs", ".", "items", "(", ")", ")" ]
Return contents of transport object for debugging forensics.
[ "Return", "contents", "of", "transport", "object", "for", "debugging", "forensics", "." ]
python
train
49.2
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4440-L4450
def get_indexer_for(self, target, **kwargs): """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_nonunique as appropriate. """ if self.is_unique: return self.get_indexer(target, **kwargs) indexer, _ = self.get_indexer_non_unique(target, **kwargs) return indexer
[ "def", "get_indexer_for", "(", "self", ",", "target", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "is_unique", ":", "return", "self", ".", "get_indexer", "(", "target", ",", "*", "*", "kwargs", ")", "indexer", ",", "_", "=", "self", ".", "get_indexer_non_unique", "(", "target", ",", "*", "*", "kwargs", ")", "return", "indexer" ]
Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_nonunique as appropriate.
[ "Guaranteed", "return", "of", "an", "indexer", "even", "when", "non", "-", "unique", "." ]
python
train
34.545455
PonteIneptique/collatinus-python
pycollatinus/ch.py
https://github.com/PonteIneptique/collatinus-python/blob/fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5/pycollatinus/ch.py#L20-L45
def listeI(l): """ Fonction importante permettant de renvoyer une liste d'entiers à partir d'une chaîne. La chaîne est une liste de sections séparées par des virgules. Une section peut être soit un entier, soit un intervalle d'entiers. On donne alors les limites inférieure et supérieure de l'intervale, séparées par le caractère '-'. Nombreux exemples d'intervalles dans le fichier data/modeles.la. :param l: Chaîne à transformer :type l: str :return: Liste des sections étendues :rtype: list of int """ result = [] lvirg = l.split(',') for virg in lvirg: if "-" in virg: deb, fin = tuple(virg.split("-")) result += [i for i in range(int(deb), int(fin) + 1)] else: result.append(int(virg)) return result
[ "def", "listeI", "(", "l", ")", ":", "result", "=", "[", "]", "lvirg", "=", "l", ".", "split", "(", "','", ")", "for", "virg", "in", "lvirg", ":", "if", "\"-\"", "in", "virg", ":", "deb", ",", "fin", "=", "tuple", "(", "virg", ".", "split", "(", "\"-\"", ")", ")", "result", "+=", "[", "i", "for", "i", "in", "range", "(", "int", "(", "deb", ")", ",", "int", "(", "fin", ")", "+", "1", ")", "]", "else", ":", "result", ".", "append", "(", "int", "(", "virg", ")", ")", "return", "result" ]
Fonction importante permettant de renvoyer une liste d'entiers à partir d'une chaîne. La chaîne est une liste de sections séparées par des virgules. Une section peut être soit un entier, soit un intervalle d'entiers. On donne alors les limites inférieure et supérieure de l'intervale, séparées par le caractère '-'. Nombreux exemples d'intervalles dans le fichier data/modeles.la. :param l: Chaîne à transformer :type l: str :return: Liste des sections étendues :rtype: list of int
[ "Fonction", "importante", "permettant", "de", "renvoyer", "une", "liste", "d", "entiers", "à", "partir", "d", "une", "chaîne", ".", "La", "chaîne", "est", "une", "liste", "de", "sections", "séparées", "par", "des", "virgules", ".", "Une", "section", "peut", "être", "soit", "un", "entier", "soit", "un", "intervalle", "d", "entiers", ".", "On", "donne", "alors", "les", "limites", "inférieure", "et", "supérieure", "de", "l", "intervale", "séparées", "par", "le", "caractère", "-", ".", "Nombreux", "exemples", "d", "intervalles", "dans", "le", "fichier", "data", "/", "modeles", ".", "la", "." ]
python
train
33.423077
googleapis/google-cloud-python
datastore/google/cloud/datastore/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/query.py#L184-L192
def ancestor(self, value): """Set the ancestor for the query :type value: :class:`~google.cloud.datastore.key.Key` :param value: the new ancestor key """ if not isinstance(value, Key): raise TypeError("Ancestor must be a Key") self._ancestor = value
[ "def", "ancestor", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "Key", ")", ":", "raise", "TypeError", "(", "\"Ancestor must be a Key\"", ")", "self", ".", "_ancestor", "=", "value" ]
Set the ancestor for the query :type value: :class:`~google.cloud.datastore.key.Key` :param value: the new ancestor key
[ "Set", "the", "ancestor", "for", "the", "query" ]
python
train
33.555556
googleapis/google-auth-library-python
google/auth/transport/urllib3.py
https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/auth/transport/urllib3.py#L198-L246
def urlopen(self, method, url, body=None, headers=None, **kwargs): """Implementation of urllib3's urlopen.""" # pylint: disable=arguments-differ # We use kwargs to collect additional args that we don't need to # introspect here. However, we do explicitly collect the two # positional arguments. # Use a kwarg for this instead of an attribute to maintain # thread-safety. _credential_refresh_attempt = kwargs.pop( '_credential_refresh_attempt', 0) if headers is None: headers = self.headers # Make a copy of the headers. They will be modified by the credentials # and we want to pass the original headers if we recurse. request_headers = headers.copy() self.credentials.before_request( self._request, method, url, request_headers) response = self.http.urlopen( method, url, body=body, headers=request_headers, **kwargs) # If the response indicated that the credentials needed to be # refreshed, then refresh the credentials and re-attempt the # request. # A stored token may expire between the time it is retrieved and # the time the request is made, so we may need to try twice. # The reason urllib3's retries aren't used is because they # don't allow you to modify the request headers. :/ if (response.status in self._refresh_status_codes and _credential_refresh_attempt < self._max_refresh_attempts): _LOGGER.info( 'Refreshing credentials due to a %s response. Attempt %s/%s.', response.status, _credential_refresh_attempt + 1, self._max_refresh_attempts) self.credentials.refresh(self._request) # Recurse. Pass in the original headers, not our modified set. return self.urlopen( method, url, body=body, headers=headers, _credential_refresh_attempt=_credential_refresh_attempt + 1, **kwargs) return response
[ "def", "urlopen", "(", "self", ",", "method", ",", "url", ",", "body", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=arguments-differ", "# We use kwargs to collect additional args that we don't need to", "# introspect here. However, we do explicitly collect the two", "# positional arguments.", "# Use a kwarg for this instead of an attribute to maintain", "# thread-safety.", "_credential_refresh_attempt", "=", "kwargs", ".", "pop", "(", "'_credential_refresh_attempt'", ",", "0", ")", "if", "headers", "is", "None", ":", "headers", "=", "self", ".", "headers", "# Make a copy of the headers. They will be modified by the credentials", "# and we want to pass the original headers if we recurse.", "request_headers", "=", "headers", ".", "copy", "(", ")", "self", ".", "credentials", ".", "before_request", "(", "self", ".", "_request", ",", "method", ",", "url", ",", "request_headers", ")", "response", "=", "self", ".", "http", ".", "urlopen", "(", "method", ",", "url", ",", "body", "=", "body", ",", "headers", "=", "request_headers", ",", "*", "*", "kwargs", ")", "# If the response indicated that the credentials needed to be", "# refreshed, then refresh the credentials and re-attempt the", "# request.", "# A stored token may expire between the time it is retrieved and", "# the time the request is made, so we may need to try twice.", "# The reason urllib3's retries aren't used is because they", "# don't allow you to modify the request headers. :/", "if", "(", "response", ".", "status", "in", "self", ".", "_refresh_status_codes", "and", "_credential_refresh_attempt", "<", "self", ".", "_max_refresh_attempts", ")", ":", "_LOGGER", ".", "info", "(", "'Refreshing credentials due to a %s response. Attempt %s/%s.'", ",", "response", ".", "status", ",", "_credential_refresh_attempt", "+", "1", ",", "self", ".", "_max_refresh_attempts", ")", "self", ".", "credentials", ".", "refresh", "(", "self", ".", "_request", ")", "# Recurse. Pass in the original headers, not our modified set.", "return", "self", ".", "urlopen", "(", "method", ",", "url", ",", "body", "=", "body", ",", "headers", "=", "headers", ",", "_credential_refresh_attempt", "=", "_credential_refresh_attempt", "+", "1", ",", "*", "*", "kwargs", ")", "return", "response" ]
Implementation of urllib3's urlopen.
[ "Implementation", "of", "urllib3", "s", "urlopen", "." ]
python
train
42.102041
thebigmunch/google-music
src/google_music/clients/mobileclient.py
https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1840-L1864
def stations_iter(self, *, page_size=250): """Get a paged iterator of library stations. Parameters: page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Station dicts. """ start_token = None while True: response = self._call( mc_calls.RadioStation, max_results=page_size, start_token=start_token ) yield response.body.get('data', {}).get('items', []) start_token = response.body.get('nextPageToken') if start_token is None: break
[ "def", "stations_iter", "(", "self", ",", "*", ",", "page_size", "=", "250", ")", ":", "start_token", "=", "None", "while", "True", ":", "response", "=", "self", ".", "_call", "(", "mc_calls", ".", "RadioStation", ",", "max_results", "=", "page_size", ",", "start_token", "=", "start_token", ")", "yield", "response", ".", "body", ".", "get", "(", "'data'", ",", "{", "}", ")", ".", "get", "(", "'items'", ",", "[", "]", ")", "start_token", "=", "response", ".", "body", ".", "get", "(", "'nextPageToken'", ")", "if", "start_token", "is", "None", ":", "break" ]
Get a paged iterator of library stations. Parameters: page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Station dicts.
[ "Get", "a", "paged", "iterator", "of", "library", "stations", "." ]
python
train
21.88
genialis/resolwe
resolwe/flow/views/mixins.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/mixins.py#L30-L32
def define_contributor(self, request): """Define contributor by adding it to request.data.""" request.data['contributor'] = self.resolve_user(request.user).pk
[ "def", "define_contributor", "(", "self", ",", "request", ")", ":", "request", ".", "data", "[", "'contributor'", "]", "=", "self", ".", "resolve_user", "(", "request", ".", "user", ")", ".", "pk" ]
Define contributor by adding it to request.data.
[ "Define", "contributor", "by", "adding", "it", "to", "request", ".", "data", "." ]
python
train
57.333333
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L237-L255
def update_pointed(self): ''' Grabs the latest file contents based on the pointer uri ''' # only grab file if our pointer is still good (not None) if not self.pointed_at_expired: try: conf_string, stat2 = self.zoo_client.get(self.point_path, watch=self.watch_pointed) except ZookeeperError: self.old_data = '' self.set_valid(False) self.pointed_at_expired = True self.call_error(self.INVALID_PATH) return if self.compare_data(conf_string): self.call_config(conf_string) self.set_valid(True)
[ "def", "update_pointed", "(", "self", ")", ":", "# only grab file if our pointer is still good (not None)", "if", "not", "self", ".", "pointed_at_expired", ":", "try", ":", "conf_string", ",", "stat2", "=", "self", ".", "zoo_client", ".", "get", "(", "self", ".", "point_path", ",", "watch", "=", "self", ".", "watch_pointed", ")", "except", "ZookeeperError", ":", "self", ".", "old_data", "=", "''", "self", ".", "set_valid", "(", "False", ")", "self", ".", "pointed_at_expired", "=", "True", "self", ".", "call_error", "(", "self", ".", "INVALID_PATH", ")", "return", "if", "self", ".", "compare_data", "(", "conf_string", ")", ":", "self", ".", "call_config", "(", "conf_string", ")", "self", ".", "set_valid", "(", "True", ")" ]
Grabs the latest file contents based on the pointer uri
[ "Grabs", "the", "latest", "file", "contents", "based", "on", "the", "pointer", "uri" ]
python
train
38.157895
rkcosmos/deepcut
deepcut/train.py
https://github.com/rkcosmos/deepcut/blob/9a2729071d01972af805acede85d7aa9e7a6da30/deepcut/train.py#L39-L62
def create_char_dataframe(words): """ Give list of input tokenized words, create dataframe of characters where first character of the word is tagged as 1, otherwise 0 Example ======= ['กิน', 'หมด'] to dataframe of [{'char': 'ก', 'type': ..., 'target': 1}, ..., {'char': 'ด', 'type': ..., 'target': 0}] """ char_dict = [] for word in words: for i, char in enumerate(word): if i == 0: char_dict.append({'char': char, 'type': CHAR_TYPE_FLATTEN.get(char, 'o'), 'target': True}) else: char_dict.append({'char': char, 'type': CHAR_TYPE_FLATTEN.get(char, 'o'), 'target': False}) return pd.DataFrame(char_dict)
[ "def", "create_char_dataframe", "(", "words", ")", ":", "char_dict", "=", "[", "]", "for", "word", "in", "words", ":", "for", "i", ",", "char", "in", "enumerate", "(", "word", ")", ":", "if", "i", "==", "0", ":", "char_dict", ".", "append", "(", "{", "'char'", ":", "char", ",", "'type'", ":", "CHAR_TYPE_FLATTEN", ".", "get", "(", "char", ",", "'o'", ")", ",", "'target'", ":", "True", "}", ")", "else", ":", "char_dict", ".", "append", "(", "{", "'char'", ":", "char", ",", "'type'", ":", "CHAR_TYPE_FLATTEN", ".", "get", "(", "char", ",", "'o'", ")", ",", "'target'", ":", "False", "}", ")", "return", "pd", ".", "DataFrame", "(", "char_dict", ")" ]
Give list of input tokenized words, create dataframe of characters where first character of the word is tagged as 1, otherwise 0 Example ======= ['กิน', 'หมด'] to dataframe of [{'char': 'ก', 'type': ..., 'target': 1}, ..., {'char': 'ด', 'type': ..., 'target': 0}]
[ "Give", "list", "of", "input", "tokenized", "words", "create", "dataframe", "of", "characters", "where", "first", "character", "of", "the", "word", "is", "tagged", "as", "1", "otherwise", "0" ]
python
valid
34.708333
sernst/cauldron
cauldron/environ/logger.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/environ/logger.py#L14-L27
def add_output_path(path: str = None) -> str: """ Adds the specified path to the output logging paths if it is not already in the listed paths. :param path: The path to add to the logging output paths. If the path is empty or no path is given, the current working directory will be used instead. """ cleaned = paths.clean(path or os.getcwd()) if cleaned not in _logging_paths: _logging_paths.append(cleaned) return cleaned
[ "def", "add_output_path", "(", "path", ":", "str", "=", "None", ")", "->", "str", ":", "cleaned", "=", "paths", ".", "clean", "(", "path", "or", "os", ".", "getcwd", "(", ")", ")", "if", "cleaned", "not", "in", "_logging_paths", ":", "_logging_paths", ".", "append", "(", "cleaned", ")", "return", "cleaned" ]
Adds the specified path to the output logging paths if it is not already in the listed paths. :param path: The path to add to the logging output paths. If the path is empty or no path is given, the current working directory will be used instead.
[ "Adds", "the", "specified", "path", "to", "the", "output", "logging", "paths", "if", "it", "is", "not", "already", "in", "the", "listed", "paths", "." ]
python
train
33.857143
odlgroup/odl
odl/discr/grid.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/grid.py#L234-L241
def shape(self): """Number of grid points per axis.""" try: return self.__shape except AttributeError: shape = tuple(len(vec) for vec in self.coord_vectors) self.__shape = shape return shape
[ "def", "shape", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__shape", "except", "AttributeError", ":", "shape", "=", "tuple", "(", "len", "(", "vec", ")", "for", "vec", "in", "self", ".", "coord_vectors", ")", "self", ".", "__shape", "=", "shape", "return", "shape" ]
Number of grid points per axis.
[ "Number", "of", "grid", "points", "per", "axis", "." ]
python
train
31.875
CitrineInformatics/pypif-sdk
pypif_sdk/interop/datacite.py
https://github.com/CitrineInformatics/pypif-sdk/blob/8b01d10d9a1426d5eef12e4b2f31c4657aa0fe59/pypif_sdk/interop/datacite.py#L42-L57
def datacite_to_pif_reference(dc): """ Parse a top-level datacite dictionary into a Reference :param dc: dictionary containing datacite metadata :return: Reference corresponding to that datacite entry """ ref = Reference() if dc.get('identifier', {}).get('identifierType') == "DOI": ref.doi = dc.get('identifier', {}).get('identifier') ref.title = dc.get('title') ref.publisher = dc.get('publisher') ref.year = dc.get('publicationYear') ref.authors = [creator_to_person(x).name for x in dc.get('creators', [])] or None return ref
[ "def", "datacite_to_pif_reference", "(", "dc", ")", ":", "ref", "=", "Reference", "(", ")", "if", "dc", ".", "get", "(", "'identifier'", ",", "{", "}", ")", ".", "get", "(", "'identifierType'", ")", "==", "\"DOI\"", ":", "ref", ".", "doi", "=", "dc", ".", "get", "(", "'identifier'", ",", "{", "}", ")", ".", "get", "(", "'identifier'", ")", "ref", ".", "title", "=", "dc", ".", "get", "(", "'title'", ")", "ref", ".", "publisher", "=", "dc", ".", "get", "(", "'publisher'", ")", "ref", ".", "year", "=", "dc", ".", "get", "(", "'publicationYear'", ")", "ref", ".", "authors", "=", "[", "creator_to_person", "(", "x", ")", ".", "name", "for", "x", "in", "dc", ".", "get", "(", "'creators'", ",", "[", "]", ")", "]", "or", "None", "return", "ref" ]
Parse a top-level datacite dictionary into a Reference :param dc: dictionary containing datacite metadata :return: Reference corresponding to that datacite entry
[ "Parse", "a", "top", "-", "level", "datacite", "dictionary", "into", "a", "Reference", ":", "param", "dc", ":", "dictionary", "containing", "datacite", "metadata", ":", "return", ":", "Reference", "corresponding", "to", "that", "datacite", "entry" ]
python
train
35.75
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3554-L3558
def reshape_range(tensor, i, j, shape): """Reshapes a tensor between dimensions i and j.""" t_shape = common_layers.shape_list(tensor) target_shape = t_shape[:i] + shape + t_shape[j:] return tf.reshape(tensor, target_shape)
[ "def", "reshape_range", "(", "tensor", ",", "i", ",", "j", ",", "shape", ")", ":", "t_shape", "=", "common_layers", ".", "shape_list", "(", "tensor", ")", "target_shape", "=", "t_shape", "[", ":", "i", "]", "+", "shape", "+", "t_shape", "[", "j", ":", "]", "return", "tf", ".", "reshape", "(", "tensor", ",", "target_shape", ")" ]
Reshapes a tensor between dimensions i and j.
[ "Reshapes", "a", "tensor", "between", "dimensions", "i", "and", "j", "." ]
python
train
45.4
bitesofcode/projexui
projexui/widgets/xchart/xchart.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchart.py#L353-L361
def pointAt(self, **axis_values): """ Returns the point on the chart where the inputed values are located. :return <QPointF> """ scene_point = self.renderer().pointAt(self.axes(), axis_values) chart_point = self.uiChartVIEW.mapFromScene(scene_point) return self.uiChartVIEW.mapToParent(chart_point)
[ "def", "pointAt", "(", "self", ",", "*", "*", "axis_values", ")", ":", "scene_point", "=", "self", ".", "renderer", "(", ")", ".", "pointAt", "(", "self", ".", "axes", "(", ")", ",", "axis_values", ")", "chart_point", "=", "self", ".", "uiChartVIEW", ".", "mapFromScene", "(", "scene_point", ")", "return", "self", ".", "uiChartVIEW", ".", "mapToParent", "(", "chart_point", ")" ]
Returns the point on the chart where the inputed values are located. :return <QPointF>
[ "Returns", "the", "point", "on", "the", "chart", "where", "the", "inputed", "values", "are", "located", ".", ":", "return", "<QPointF", ">" ]
python
train
40.777778
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/passwd_reader.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L153-L167
def _encode_config(conf_dict): """Encode `conf_dict` to string.""" out = [] # get variables in order defined in settings._ALLOWED_MERGES for var in settings._ALLOWED_MERGES: out.append(conf_dict[var]) # convert bools to chars out = map( lambda x: "t" if x else "f", out ) return "".join(out)
[ "def", "_encode_config", "(", "conf_dict", ")", ":", "out", "=", "[", "]", "# get variables in order defined in settings._ALLOWED_MERGES", "for", "var", "in", "settings", ".", "_ALLOWED_MERGES", ":", "out", ".", "append", "(", "conf_dict", "[", "var", "]", ")", "# convert bools to chars", "out", "=", "map", "(", "lambda", "x", ":", "\"t\"", "if", "x", "else", "\"f\"", ",", "out", ")", "return", "\"\"", ".", "join", "(", "out", ")" ]
Encode `conf_dict` to string.
[ "Encode", "conf_dict", "to", "string", "." ]
python
train
22.4
maceoutliner/django-fiction-outlines
fiction_outlines/models.py
https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/models.py#L829-L878
def impact_rating(self): ''' Returns the impact rating for this node. Impact rating is a measure of how powerful this moment in the story is by evaluting how many simultaneous arc elements are associated with it. There is also a generational bleed element, where the impact score creates shockwaves throughout their direct ancestor and descendant nodes. This echo fades fast, but the bigger the impact, the farther it goes. Currently, the impact bleed does not extend to sibling nodes. WARNING: Here be dragons. ''' if self.depth == 1: logger.debug('Root node. Skipping.') return 0 # pragma: no cover impact_bleed = { 'mile': 0.5, # A milestone extends it's influence by 50% per generation 'tf_beat': 0.25, } inherited_impact = 0 base_impact, add_impact, mile_impact = self._local_impact_rating() local_impact = base_impact + add_impact + mile_impact logger.debug("Local impact is %f" % local_impact) parents = self.get_ancestors().filter(depth__gt=1) children = self.get_descendants() logger.debug('Found %d parents and %d children' % (parents.count(), children.count())) for node in parents | children: if node.depth == 1: logger.debug("Skipping root node...") else: logger.debug('Checking a related node...') b, a, m = node._local_impact_rating() logger.debug('Related node has %f of additional impact and %f of milestone impact.' % (a, m)) if (a + m) > 0: if node.depth > self.depth: depth_diff = node.depth - self.depth else: depth_diff = self.depth - node.depth logger.debug('There is a generational difference of %f. Adjusting impact bleed.' % depth_diff) for x in range(depth_diff): a = a * impact_bleed['tf_beat'] m = m * impact_bleed['mile'] logger.debug('Additional impact bleed of %f. Milestone impact bleed of %f' % (a, m)) inherited_impact += a + m logger.debug('Final impact bleed of %f. Adding to inherited impact.' % inherited_impact) else: logger.debug('Node had 0 bleedworthy impact. Skipping...') logger.debug('Inherited impact of %f. Adding to local impact of %f' % (inherited_impact, local_impact)) return local_impact + inherited_impact
[ "def", "impact_rating", "(", "self", ")", ":", "if", "self", ".", "depth", "==", "1", ":", "logger", ".", "debug", "(", "'Root node. Skipping.'", ")", "return", "0", "# pragma: no cover", "impact_bleed", "=", "{", "'mile'", ":", "0.5", ",", "# A milestone extends it's influence by 50% per generation", "'tf_beat'", ":", "0.25", ",", "}", "inherited_impact", "=", "0", "base_impact", ",", "add_impact", ",", "mile_impact", "=", "self", ".", "_local_impact_rating", "(", ")", "local_impact", "=", "base_impact", "+", "add_impact", "+", "mile_impact", "logger", ".", "debug", "(", "\"Local impact is %f\"", "%", "local_impact", ")", "parents", "=", "self", ".", "get_ancestors", "(", ")", ".", "filter", "(", "depth__gt", "=", "1", ")", "children", "=", "self", ".", "get_descendants", "(", ")", "logger", ".", "debug", "(", "'Found %d parents and %d children'", "%", "(", "parents", ".", "count", "(", ")", ",", "children", ".", "count", "(", ")", ")", ")", "for", "node", "in", "parents", "|", "children", ":", "if", "node", ".", "depth", "==", "1", ":", "logger", ".", "debug", "(", "\"Skipping root node...\"", ")", "else", ":", "logger", ".", "debug", "(", "'Checking a related node...'", ")", "b", ",", "a", ",", "m", "=", "node", ".", "_local_impact_rating", "(", ")", "logger", ".", "debug", "(", "'Related node has %f of additional impact and %f of milestone impact.'", "%", "(", "a", ",", "m", ")", ")", "if", "(", "a", "+", "m", ")", ">", "0", ":", "if", "node", ".", "depth", ">", "self", ".", "depth", ":", "depth_diff", "=", "node", ".", "depth", "-", "self", ".", "depth", "else", ":", "depth_diff", "=", "self", ".", "depth", "-", "node", ".", "depth", "logger", ".", "debug", "(", "'There is a generational difference of %f. Adjusting impact bleed.'", "%", "depth_diff", ")", "for", "x", "in", "range", "(", "depth_diff", ")", ":", "a", "=", "a", "*", "impact_bleed", "[", "'tf_beat'", "]", "m", "=", "m", "*", "impact_bleed", "[", "'mile'", "]", "logger", ".", "debug", "(", "'Additional impact bleed of %f. Milestone impact bleed of %f'", "%", "(", "a", ",", "m", ")", ")", "inherited_impact", "+=", "a", "+", "m", "logger", ".", "debug", "(", "'Final impact bleed of %f. Adding to inherited impact.'", "%", "inherited_impact", ")", "else", ":", "logger", ".", "debug", "(", "'Node had 0 bleedworthy impact. Skipping...'", ")", "logger", ".", "debug", "(", "'Inherited impact of %f. Adding to local impact of %f'", "%", "(", "inherited_impact", ",", "local_impact", ")", ")", "return", "local_impact", "+", "inherited_impact" ]
Returns the impact rating for this node. Impact rating is a measure of how powerful this moment in the story is by evaluting how many simultaneous arc elements are associated with it. There is also a generational bleed element, where the impact score creates shockwaves throughout their direct ancestor and descendant nodes. This echo fades fast, but the bigger the impact, the farther it goes. Currently, the impact bleed does not extend to sibling nodes. WARNING: Here be dragons.
[ "Returns", "the", "impact", "rating", "for", "this", "node", ".", "Impact", "rating", "is", "a", "measure", "of", "how", "powerful", "this", "moment", "in", "the", "story", "is", "by", "evaluting", "how", "many", "simultaneous", "arc", "elements", "are", "associated", "with", "it", ".", "There", "is", "also", "a", "generational", "bleed", "element", "where", "the", "impact", "score", "creates", "shockwaves", "throughout", "their", "direct", "ancestor", "and", "descendant", "nodes", ".", "This", "echo", "fades", "fast", "but", "the", "bigger", "the", "impact", "the", "farther", "it", "goes", "." ]
python
train
52.44
blackecho/Deep-Learning-TensorFlow
yadlt/models/boltzmann/deep_autoencoder.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/boltzmann/deep_autoencoder.py#L237-L246
def _create_variables_pretrain(self): """Create model variables (previous unsupervised pretraining). :return: self """ for l, layer in enumerate(self.layers): self.encoding_w_[l] = tf.Variable( self.encoding_w_[l], name='enc-w-{}'.format(l)) self.encoding_b_[l] = tf.Variable( self.encoding_b_[l], name='enc-b-{}'.format(l))
[ "def", "_create_variables_pretrain", "(", "self", ")", ":", "for", "l", ",", "layer", "in", "enumerate", "(", "self", ".", "layers", ")", ":", "self", ".", "encoding_w_", "[", "l", "]", "=", "tf", ".", "Variable", "(", "self", ".", "encoding_w_", "[", "l", "]", ",", "name", "=", "'enc-w-{}'", ".", "format", "(", "l", ")", ")", "self", ".", "encoding_b_", "[", "l", "]", "=", "tf", ".", "Variable", "(", "self", ".", "encoding_b_", "[", "l", "]", ",", "name", "=", "'enc-b-{}'", ".", "format", "(", "l", ")", ")" ]
Create model variables (previous unsupervised pretraining). :return: self
[ "Create", "model", "variables", "(", "previous", "unsupervised", "pretraining", ")", "." ]
python
train
40.4
ets-labs/python-dependency-injector
examples/providers/factory_delegation.py
https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/factory_delegation.py#L20-L24
def main_photo(self): """Return user's main photo.""" if not self._main_photo: self._main_photo = self.photos_factory() return self._main_photo
[ "def", "main_photo", "(", "self", ")", ":", "if", "not", "self", ".", "_main_photo", ":", "self", ".", "_main_photo", "=", "self", ".", "photos_factory", "(", ")", "return", "self", ".", "_main_photo" ]
Return user's main photo.
[ "Return", "user", "s", "main", "photo", "." ]
python
train
35
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/collectionseditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/collectionseditor.py#L1311-L1314
def get_len(self, key): """Return sequence length""" data = self.model.get_data() return len(data[key])
[ "def", "get_len", "(", "self", ",", "key", ")", ":", "data", "=", "self", ".", "model", ".", "get_data", "(", ")", "return", "len", "(", "data", "[", "key", "]", ")" ]
Return sequence length
[ "Return", "sequence", "length" ]
python
train
31.75
sveetch/boussole
boussole/watcher.py
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/watcher.py#L265-L273
def compile_dependencies(self, sourcepath, include_self=True): """ Same as inherit method but the default value for keyword argument ``ìnclude_self`` is ``True``. """ return super(SassProjectEventHandler, self).compile_dependencies( sourcepath, include_self=include_self )
[ "def", "compile_dependencies", "(", "self", ",", "sourcepath", ",", "include_self", "=", "True", ")", ":", "return", "super", "(", "SassProjectEventHandler", ",", "self", ")", ".", "compile_dependencies", "(", "sourcepath", ",", "include_self", "=", "include_self", ")" ]
Same as inherit method but the default value for keyword argument ``ìnclude_self`` is ``True``.
[ "Same", "as", "inherit", "method", "but", "the", "default", "value", "for", "keyword", "argument", "ìnclude_self", "is", "True", "." ]
python
train
37.333333
ReFirmLabs/binwalk
src/binwalk/core/module.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/module.py#L528-L540
def header(self): ''' Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT. Returns None. ''' self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT) self.config.display.add_custom_header(self.VERBOSE_FORMAT, self.VERBOSE) if type(self.HEADER) == type([]): self.config.display.header(*self.HEADER, file_name=self.current_target_file_name) elif self.HEADER: self.config.display.header(self.HEADER, file_name=self.current_target_file_name)
[ "def", "header", "(", "self", ")", ":", "self", ".", "config", ".", "display", ".", "format_strings", "(", "self", ".", "HEADER_FORMAT", ",", "self", ".", "RESULT_FORMAT", ")", "self", ".", "config", ".", "display", ".", "add_custom_header", "(", "self", ".", "VERBOSE_FORMAT", ",", "self", ".", "VERBOSE", ")", "if", "type", "(", "self", ".", "HEADER", ")", "==", "type", "(", "[", "]", ")", ":", "self", ".", "config", ".", "display", ".", "header", "(", "*", "self", ".", "HEADER", ",", "file_name", "=", "self", ".", "current_target_file_name", ")", "elif", "self", ".", "HEADER", ":", "self", ".", "config", ".", "display", ".", "header", "(", "self", ".", "HEADER", ",", "file_name", "=", "self", ".", "current_target_file_name", ")" ]
Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT. Returns None.
[ "Displays", "the", "scan", "header", "as", "defined", "by", "self", ".", "HEADER", "and", "self", ".", "HEADER_FORMAT", "." ]
python
train
42.769231
offu/WeRoBot
werobot/client.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/client.py#L97-L112
def get_access_token(self): """ 判断现有的token是否过期。 用户需要多进程或者多机部署可以手动重写这个函数 来自定义token的存储,刷新策略。 :return: 返回token """ if self._token: now = time.time() if self.token_expires_at - now > 60: return self._token json = self.grant_token() self._token = json["access_token"] self.token_expires_at = int(time.time()) + json["expires_in"] return self._token
[ "def", "get_access_token", "(", "self", ")", ":", "if", "self", ".", "_token", ":", "now", "=", "time", ".", "time", "(", ")", "if", "self", ".", "token_expires_at", "-", "now", ">", "60", ":", "return", "self", ".", "_token", "json", "=", "self", ".", "grant_token", "(", ")", "self", ".", "_token", "=", "json", "[", "\"access_token\"", "]", "self", ".", "token_expires_at", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "json", "[", "\"expires_in\"", "]", "return", "self", ".", "_token" ]
判断现有的token是否过期。 用户需要多进程或者多机部署可以手动重写这个函数 来自定义token的存储,刷新策略。 :return: 返回token
[ "判断现有的token是否过期。", "用户需要多进程或者多机部署可以手动重写这个函数", "来自定义token的存储,刷新策略。" ]
python
train
28.5625
evhub/coconut
coconut/compiler/compiler.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L392-L445
def bind(self): """Binds reference objects to the proper parse actions.""" self.endline <<= attach(self.endline_ref, self.endline_handle) self.moduledoc_item <<= trace(attach(self.moduledoc, self.set_docstring)) self.name <<= trace(attach(self.base_name, self.name_check)) # comments are evaluated greedily because we need to know about them even if we're going to suppress them self.comment <<= trace(attach(self.comment_ref, self.comment_handle, greedy=True)) self.set_literal <<= trace(attach(self.set_literal_ref, self.set_literal_handle)) self.set_letter_literal <<= trace(attach(self.set_letter_literal_ref, self.set_letter_literal_handle)) self.classlist <<= trace(attach(self.classlist_ref, self.classlist_handle)) self.import_stmt <<= trace(attach(self.import_stmt_ref, self.import_handle)) self.complex_raise_stmt <<= trace(attach(self.complex_raise_stmt_ref, self.complex_raise_stmt_handle)) self.augassign_stmt <<= trace(attach(self.augassign_stmt_ref, self.augassign_handle)) self.dict_comp <<= trace(attach(self.dict_comp_ref, self.dict_comp_handle)) self.destructuring_stmt <<= trace(attach(self.destructuring_stmt_ref, self.destructuring_stmt_handle)) self.name_match_funcdef <<= trace(attach(self.name_match_funcdef_ref, self.name_match_funcdef_handle)) self.op_match_funcdef <<= trace(attach(self.op_match_funcdef_ref, self.op_match_funcdef_handle)) self.yield_from <<= trace(attach(self.yield_from_ref, self.yield_from_handle)) self.exec_stmt <<= trace(attach(self.exec_stmt_ref, self.exec_stmt_handle)) self.stmt_lambdef <<= trace(attach(self.stmt_lambdef_ref, self.stmt_lambdef_handle)) self.typedef <<= trace(attach(self.typedef_ref, self.typedef_handle)) self.typedef_default <<= trace(attach(self.typedef_default_ref, self.typedef_handle)) self.unsafe_typedef_default <<= trace(attach(self.unsafe_typedef_default_ref, self.unsafe_typedef_handle)) self.return_typedef <<= trace(attach(self.return_typedef_ref, self.typedef_handle)) self.typed_assign_stmt <<= trace(attach(self.typed_assign_stmt_ref, self.typed_assign_stmt_handle)) self.datadef <<= trace(attach(self.datadef_ref, self.data_handle)) self.with_stmt <<= trace(attach(self.with_stmt_ref, self.with_stmt_handle)) self.await_item <<= trace(attach(self.await_item_ref, self.await_item_handle)) self.ellipsis <<= trace(attach(self.ellipsis_ref, self.ellipsis_handle)) self.case_stmt <<= trace(attach(self.case_stmt_ref, self.case_stmt_handle)) self.decoratable_normal_funcdef_stmt <<= trace(attach( self.decoratable_normal_funcdef_stmt_ref, self.decoratable_funcdef_stmt_handle, )) self.decoratable_async_funcdef_stmt <<= trace(attach( self.decoratable_async_funcdef_stmt_ref, partial(self.decoratable_funcdef_stmt_handle, is_async=True), )) self.u_string <<= attach(self.u_string_ref, self.u_string_check) self.matrix_at <<= attach(self.matrix_at_ref, self.matrix_at_check) self.nonlocal_stmt <<= attach(self.nonlocal_stmt_ref, self.nonlocal_check) self.star_assign_item <<= attach(self.star_assign_item_ref, self.star_assign_item_check) self.classic_lambdef <<= attach(self.classic_lambdef_ref, self.lambdef_check) self.star_expr <<= attach(self.star_expr_ref, self.star_expr_check) self.dubstar_expr <<= attach(self.dubstar_expr_ref, self.star_expr_check) self.star_sep_arg <<= attach(self.star_sep_arg_ref, self.star_sep_check) self.star_sep_vararg <<= attach(self.star_sep_vararg_ref, self.star_sep_check) self.endline_semicolon <<= attach(self.endline_semicolon_ref, self.endline_semicolon_check) self.async_stmt <<= attach(self.async_stmt_ref, self.async_stmt_check) self.async_comp_for <<= attach(self.async_comp_for_ref, self.async_comp_check) self.f_string <<= attach(self.f_string_ref, self.f_string_check)
[ "def", "bind", "(", "self", ")", ":", "self", ".", "endline", "<<=", "attach", "(", "self", ".", "endline_ref", ",", "self", ".", "endline_handle", ")", "self", ".", "moduledoc_item", "<<=", "trace", "(", "attach", "(", "self", ".", "moduledoc", ",", "self", ".", "set_docstring", ")", ")", "self", ".", "name", "<<=", "trace", "(", "attach", "(", "self", ".", "base_name", ",", "self", ".", "name_check", ")", ")", "# comments are evaluated greedily because we need to know about them even if we're going to suppress them", "self", ".", "comment", "<<=", "trace", "(", "attach", "(", "self", ".", "comment_ref", ",", "self", ".", "comment_handle", ",", "greedy", "=", "True", ")", ")", "self", ".", "set_literal", "<<=", "trace", "(", "attach", "(", "self", ".", "set_literal_ref", ",", "self", ".", "set_literal_handle", ")", ")", "self", ".", "set_letter_literal", "<<=", "trace", "(", "attach", "(", "self", ".", "set_letter_literal_ref", ",", "self", ".", "set_letter_literal_handle", ")", ")", "self", ".", "classlist", "<<=", "trace", "(", "attach", "(", "self", ".", "classlist_ref", ",", "self", ".", "classlist_handle", ")", ")", "self", ".", "import_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "import_stmt_ref", ",", "self", ".", "import_handle", ")", ")", "self", ".", "complex_raise_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "complex_raise_stmt_ref", ",", "self", ".", "complex_raise_stmt_handle", ")", ")", "self", ".", "augassign_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "augassign_stmt_ref", ",", "self", ".", "augassign_handle", ")", ")", "self", ".", "dict_comp", "<<=", "trace", "(", "attach", "(", "self", ".", "dict_comp_ref", ",", "self", ".", "dict_comp_handle", ")", ")", "self", ".", "destructuring_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "destructuring_stmt_ref", ",", "self", ".", "destructuring_stmt_handle", ")", ")", "self", ".", "name_match_funcdef", "<<=", "trace", "(", "attach", "(", "self", ".", "name_match_funcdef_ref", ",", "self", ".", "name_match_funcdef_handle", ")", ")", "self", ".", "op_match_funcdef", "<<=", "trace", "(", "attach", "(", "self", ".", "op_match_funcdef_ref", ",", "self", ".", "op_match_funcdef_handle", ")", ")", "self", ".", "yield_from", "<<=", "trace", "(", "attach", "(", "self", ".", "yield_from_ref", ",", "self", ".", "yield_from_handle", ")", ")", "self", ".", "exec_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "exec_stmt_ref", ",", "self", ".", "exec_stmt_handle", ")", ")", "self", ".", "stmt_lambdef", "<<=", "trace", "(", "attach", "(", "self", ".", "stmt_lambdef_ref", ",", "self", ".", "stmt_lambdef_handle", ")", ")", "self", ".", "typedef", "<<=", "trace", "(", "attach", "(", "self", ".", "typedef_ref", ",", "self", ".", "typedef_handle", ")", ")", "self", ".", "typedef_default", "<<=", "trace", "(", "attach", "(", "self", ".", "typedef_default_ref", ",", "self", ".", "typedef_handle", ")", ")", "self", ".", "unsafe_typedef_default", "<<=", "trace", "(", "attach", "(", "self", ".", "unsafe_typedef_default_ref", ",", "self", ".", "unsafe_typedef_handle", ")", ")", "self", ".", "return_typedef", "<<=", "trace", "(", "attach", "(", "self", ".", "return_typedef_ref", ",", "self", ".", "typedef_handle", ")", ")", "self", ".", "typed_assign_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "typed_assign_stmt_ref", ",", "self", ".", "typed_assign_stmt_handle", ")", ")", "self", ".", "datadef", "<<=", "trace", "(", "attach", "(", "self", ".", "datadef_ref", ",", "self", ".", "data_handle", ")", ")", "self", ".", "with_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "with_stmt_ref", ",", "self", ".", "with_stmt_handle", ")", ")", "self", ".", "await_item", "<<=", "trace", "(", "attach", "(", "self", ".", "await_item_ref", ",", "self", ".", "await_item_handle", ")", ")", "self", ".", "ellipsis", "<<=", "trace", "(", "attach", "(", "self", ".", "ellipsis_ref", ",", "self", ".", "ellipsis_handle", ")", ")", "self", ".", "case_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "case_stmt_ref", ",", "self", ".", "case_stmt_handle", ")", ")", "self", ".", "decoratable_normal_funcdef_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "decoratable_normal_funcdef_stmt_ref", ",", "self", ".", "decoratable_funcdef_stmt_handle", ",", ")", ")", "self", ".", "decoratable_async_funcdef_stmt", "<<=", "trace", "(", "attach", "(", "self", ".", "decoratable_async_funcdef_stmt_ref", ",", "partial", "(", "self", ".", "decoratable_funcdef_stmt_handle", ",", "is_async", "=", "True", ")", ",", ")", ")", "self", ".", "u_string", "<<=", "attach", "(", "self", ".", "u_string_ref", ",", "self", ".", "u_string_check", ")", "self", ".", "matrix_at", "<<=", "attach", "(", "self", ".", "matrix_at_ref", ",", "self", ".", "matrix_at_check", ")", "self", ".", "nonlocal_stmt", "<<=", "attach", "(", "self", ".", "nonlocal_stmt_ref", ",", "self", ".", "nonlocal_check", ")", "self", ".", "star_assign_item", "<<=", "attach", "(", "self", ".", "star_assign_item_ref", ",", "self", ".", "star_assign_item_check", ")", "self", ".", "classic_lambdef", "<<=", "attach", "(", "self", ".", "classic_lambdef_ref", ",", "self", ".", "lambdef_check", ")", "self", ".", "star_expr", "<<=", "attach", "(", "self", ".", "star_expr_ref", ",", "self", ".", "star_expr_check", ")", "self", ".", "dubstar_expr", "<<=", "attach", "(", "self", ".", "dubstar_expr_ref", ",", "self", ".", "star_expr_check", ")", "self", ".", "star_sep_arg", "<<=", "attach", "(", "self", ".", "star_sep_arg_ref", ",", "self", ".", "star_sep_check", ")", "self", ".", "star_sep_vararg", "<<=", "attach", "(", "self", ".", "star_sep_vararg_ref", ",", "self", ".", "star_sep_check", ")", "self", ".", "endline_semicolon", "<<=", "attach", "(", "self", ".", "endline_semicolon_ref", ",", "self", ".", "endline_semicolon_check", ")", "self", ".", "async_stmt", "<<=", "attach", "(", "self", ".", "async_stmt_ref", ",", "self", ".", "async_stmt_check", ")", "self", ".", "async_comp_for", "<<=", "attach", "(", "self", ".", "async_comp_for_ref", ",", "self", ".", "async_comp_check", ")", "self", ".", "f_string", "<<=", "attach", "(", "self", ".", "f_string_ref", ",", "self", ".", "f_string_check", ")" ]
Binds reference objects to the proper parse actions.
[ "Binds", "reference", "objects", "to", "the", "proper", "parse", "actions", "." ]
python
train
75.277778
dims/etcd3-gateway
etcd3gw/client.py
https://github.com/dims/etcd3-gateway/blob/ad566c29cbde135aee20cfd32e0a4815ca3b5ee6/etcd3gw/client.py#L120-L127
def lock(self, id=str(uuid.uuid4()), ttl=DEFAULT_TIMEOUT): """Create a Lock object given an ID and timeout :param id: ID for the lock, creates a new uuid if not provided :param ttl: timeout :return: Lock object """ return Lock(id, ttl=ttl, client=self)
[ "def", "lock", "(", "self", ",", "id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ",", "ttl", "=", "DEFAULT_TIMEOUT", ")", ":", "return", "Lock", "(", "id", ",", "ttl", "=", "ttl", ",", "client", "=", "self", ")" ]
Create a Lock object given an ID and timeout :param id: ID for the lock, creates a new uuid if not provided :param ttl: timeout :return: Lock object
[ "Create", "a", "Lock", "object", "given", "an", "ID", "and", "timeout" ]
python
train
36.75
pydanny/simplicity
simplicity.py
https://github.com/pydanny/simplicity/blob/aef4ce39b0965b8d333c67c9d6ec5baecee9c617/simplicity.py#L24-L28
def text_cleanup(data, key, last_type): """ I strip extra whitespace off multi-line strings if they are ready to be stripped!""" if key in data and last_type == STRING_TYPE: data[key] = data[key].strip() return data
[ "def", "text_cleanup", "(", "data", ",", "key", ",", "last_type", ")", ":", "if", "key", "in", "data", "and", "last_type", "==", "STRING_TYPE", ":", "data", "[", "key", "]", "=", "data", "[", "key", "]", ".", "strip", "(", ")", "return", "data" ]
I strip extra whitespace off multi-line strings if they are ready to be stripped!
[ "I", "strip", "extra", "whitespace", "off", "multi", "-", "line", "strings", "if", "they", "are", "ready", "to", "be", "stripped!" ]
python
train
46.2
tensorpack/tensorpack
tensorpack/predict/concurrency.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/predict/concurrency.py#L110-L129
def fetch_batch(self): """ Fetch a batch of data without waiting""" inp, f = self.queue.get() nr_input_var = len(inp) batched, futures = [[] for _ in range(nr_input_var)], [] for k in range(nr_input_var): batched[k].append(inp[k]) futures.append(f) while len(futures) < self.batch_size: try: inp, f = self.queue.get_nowait() for k in range(nr_input_var): batched[k].append(inp[k]) futures.append(f) except queue.Empty: break # do not wait for k in range(nr_input_var): batched[k] = np.asarray(batched[k]) return batched, futures
[ "def", "fetch_batch", "(", "self", ")", ":", "inp", ",", "f", "=", "self", ".", "queue", ".", "get", "(", ")", "nr_input_var", "=", "len", "(", "inp", ")", "batched", ",", "futures", "=", "[", "[", "]", "for", "_", "in", "range", "(", "nr_input_var", ")", "]", ",", "[", "]", "for", "k", "in", "range", "(", "nr_input_var", ")", ":", "batched", "[", "k", "]", ".", "append", "(", "inp", "[", "k", "]", ")", "futures", ".", "append", "(", "f", ")", "while", "len", "(", "futures", ")", "<", "self", ".", "batch_size", ":", "try", ":", "inp", ",", "f", "=", "self", ".", "queue", ".", "get_nowait", "(", ")", "for", "k", "in", "range", "(", "nr_input_var", ")", ":", "batched", "[", "k", "]", ".", "append", "(", "inp", "[", "k", "]", ")", "futures", ".", "append", "(", "f", ")", "except", "queue", ".", "Empty", ":", "break", "# do not wait", "for", "k", "in", "range", "(", "nr_input_var", ")", ":", "batched", "[", "k", "]", "=", "np", ".", "asarray", "(", "batched", "[", "k", "]", ")", "return", "batched", ",", "futures" ]
Fetch a batch of data without waiting
[ "Fetch", "a", "batch", "of", "data", "without", "waiting" ]
python
train
35.8
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/transaction.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/transaction.py#L134-L163
def _make_params_pb(params, param_types): """Helper for :meth:`execute_update`. :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``dml``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :rtype: Union[None, :class:`Struct`] :returns: a struct message for the passed params, or None :raises ValueError: If ``param_types`` is None but ``params`` is not None. :raises ValueError: If ``params`` is None but ``param_types`` is not None. """ if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") return Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: if param_types is not None: raise ValueError("Specify 'params' when passing 'param_types'.") return None
[ "def", "_make_params_pb", "(", "params", ",", "param_types", ")", ":", "if", "params", "is", "not", "None", ":", "if", "param_types", "is", "None", ":", "raise", "ValueError", "(", "\"Specify 'param_types' when passing 'params'.\"", ")", "return", "Struct", "(", "fields", "=", "{", "key", ":", "_make_value_pb", "(", "value", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", "}", ")", "else", ":", "if", "param_types", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Specify 'params' when passing 'param_types'.\"", ")", "return", "None" ]
Helper for :meth:`execute_update`. :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``dml``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :rtype: Union[None, :class:`Struct`] :returns: a struct message for the passed params, or None :raises ValueError: If ``param_types`` is None but ``params`` is not None. :raises ValueError: If ``params`` is None but ``param_types`` is not None.
[ "Helper", "for", ":", "meth", ":", "execute_update", "." ]
python
train
39.7
dusktreader/flask-praetorian
flask_praetorian/base.py
https://github.com/dusktreader/flask-praetorian/blob/d530cf3ffeffd61bfff1b8c79e8b45e9bfa0db0c/flask_praetorian/base.py#L140-L165
def _validate_user_class(cls, user_class): """ Validates the supplied user_class to make sure that it has the class methods necessary to function correctly. Requirements: - ``lookup`` method. Accepts a string parameter, returns instance - ``identify`` method. Accepts an identity parameter, returns instance """ PraetorianError.require_condition( getattr(user_class, 'lookup', None) is not None, textwrap.dedent(""" The user_class must have a lookup class method: user_class.lookup(<str>) -> <user instance> """), ) PraetorianError.require_condition( getattr(user_class, 'identify', None) is not None, textwrap.dedent(""" The user_class must have an identify class method: user_class.identify(<identity>) -> <user instance> """), ) # TODO: Figure out how to check for an identity property return user_class
[ "def", "_validate_user_class", "(", "cls", ",", "user_class", ")", ":", "PraetorianError", ".", "require_condition", "(", "getattr", "(", "user_class", ",", "'lookup'", ",", "None", ")", "is", "not", "None", ",", "textwrap", ".", "dedent", "(", "\"\"\"\n The user_class must have a lookup class method:\n user_class.lookup(<str>) -> <user instance>\n \"\"\"", ")", ",", ")", "PraetorianError", ".", "require_condition", "(", "getattr", "(", "user_class", ",", "'identify'", ",", "None", ")", "is", "not", "None", ",", "textwrap", ".", "dedent", "(", "\"\"\"\n The user_class must have an identify class method:\n user_class.identify(<identity>) -> <user instance>\n \"\"\"", ")", ",", ")", "# TODO: Figure out how to check for an identity property", "return", "user_class" ]
Validates the supplied user_class to make sure that it has the class methods necessary to function correctly. Requirements: - ``lookup`` method. Accepts a string parameter, returns instance - ``identify`` method. Accepts an identity parameter, returns instance
[ "Validates", "the", "supplied", "user_class", "to", "make", "sure", "that", "it", "has", "the", "class", "methods", "necessary", "to", "function", "correctly", "." ]
python
train
39.346154
dslackw/sun
sun/utils.py
https://github.com/dslackw/sun/blob/ff3501757ce1cc2f0db195f7a6b1d23f601dce32/sun/utils.py#L38-L49
def urlopen(link): """Return urllib2 urlopen """ try: return urllib2.urlopen(link) except urllib2.URLError: pass except ValueError: return "" except KeyboardInterrupt: print("") raise SystemExit()
[ "def", "urlopen", "(", "link", ")", ":", "try", ":", "return", "urllib2", ".", "urlopen", "(", "link", ")", "except", "urllib2", ".", "URLError", ":", "pass", "except", "ValueError", ":", "return", "\"\"", "except", "KeyboardInterrupt", ":", "print", "(", "\"\"", ")", "raise", "SystemExit", "(", ")" ]
Return urllib2 urlopen
[ "Return", "urllib2", "urlopen" ]
python
train
20.75
zsimic/runez
src/runez/config.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/config.py#L62-L67
def set_providers(self, *providers): """Replace current providers with given ones""" if self.providers: self.clear() for provider in providers: self.add(provider)
[ "def", "set_providers", "(", "self", ",", "*", "providers", ")", ":", "if", "self", ".", "providers", ":", "self", ".", "clear", "(", ")", "for", "provider", "in", "providers", ":", "self", ".", "add", "(", "provider", ")" ]
Replace current providers with given ones
[ "Replace", "current", "providers", "with", "given", "ones" ]
python
train
34.166667
PyCQA/pylint
pylint/checkers/utils.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/utils.py#L538-L568
def collect_string_fields(format_string) -> Iterable[Optional[str]]: """ Given a format string, return an iterator of all the valid format fields. It handles nested fields as well. """ formatter = string.Formatter() try: parseiterator = formatter.parse(format_string) for result in parseiterator: if all(item is None for item in result[1:]): # not a replacement format continue name = result[1] nested = result[2] yield name if nested: for field in collect_string_fields(nested): yield field except ValueError as exc: # Probably the format string is invalid. if exc.args[0].startswith("cannot switch from manual"): # On Jython, parsing a string with both manual # and automatic positions will fail with a ValueError, # while on CPython it will simply return the fields, # the validation being done in the interpreter (?). # We're just returning two mixed fields in order # to trigger the format-combined-specification check. yield "" yield "1" return raise IncompleteFormatString(format_string)
[ "def", "collect_string_fields", "(", "format_string", ")", "->", "Iterable", "[", "Optional", "[", "str", "]", "]", ":", "formatter", "=", "string", ".", "Formatter", "(", ")", "try", ":", "parseiterator", "=", "formatter", ".", "parse", "(", "format_string", ")", "for", "result", "in", "parseiterator", ":", "if", "all", "(", "item", "is", "None", "for", "item", "in", "result", "[", "1", ":", "]", ")", ":", "# not a replacement format", "continue", "name", "=", "result", "[", "1", "]", "nested", "=", "result", "[", "2", "]", "yield", "name", "if", "nested", ":", "for", "field", "in", "collect_string_fields", "(", "nested", ")", ":", "yield", "field", "except", "ValueError", "as", "exc", ":", "# Probably the format string is invalid.", "if", "exc", ".", "args", "[", "0", "]", ".", "startswith", "(", "\"cannot switch from manual\"", ")", ":", "# On Jython, parsing a string with both manual", "# and automatic positions will fail with a ValueError,", "# while on CPython it will simply return the fields,", "# the validation being done in the interpreter (?).", "# We're just returning two mixed fields in order", "# to trigger the format-combined-specification check.", "yield", "\"\"", "yield", "\"1\"", "return", "raise", "IncompleteFormatString", "(", "format_string", ")" ]
Given a format string, return an iterator of all the valid format fields. It handles nested fields as well.
[ "Given", "a", "format", "string", "return", "an", "iterator", "of", "all", "the", "valid", "format", "fields", ".", "It", "handles", "nested", "fields", "as", "well", "." ]
python
test
40.903226
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/corebluetooth/provider.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/provider.py#L245-L270
def run_mainloop_with(self, target): """Start the OS's main loop to process asyncronous BLE events and then run the specified target function in a background thread. Target function should be a function that takes no parameters and optionally return an integer response code. When the target function stops executing or returns with value then the main loop will be stopped and the program will exit with the returned code. Note that an OS main loop is required to process asyncronous BLE events and this function is provided as a convenience for writing simple tools and scripts that don't need to be full-blown GUI applications. If you are writing a GUI application that has a main loop (a GTK glib main loop on Linux, or a Cocoa main loop on OSX) then you don't need to call this function. """ # Create background thread to run user code. self._user_thread = threading.Thread(target=self._user_thread_main, args=(target,)) self._user_thread.daemon = True self._user_thread.start() # Run main loop. This call will never return! try: AppHelper.runConsoleEventLoop(installInterrupt=True) except KeyboardInterrupt: AppHelper.stopEventLoop() sys.exit(0)
[ "def", "run_mainloop_with", "(", "self", ",", "target", ")", ":", "# Create background thread to run user code.", "self", ".", "_user_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_user_thread_main", ",", "args", "=", "(", "target", ",", ")", ")", "self", ".", "_user_thread", ".", "daemon", "=", "True", "self", ".", "_user_thread", ".", "start", "(", ")", "# Run main loop. This call will never return!", "try", ":", "AppHelper", ".", "runConsoleEventLoop", "(", "installInterrupt", "=", "True", ")", "except", "KeyboardInterrupt", ":", "AppHelper", ".", "stopEventLoop", "(", ")", "sys", ".", "exit", "(", "0", ")" ]
Start the OS's main loop to process asyncronous BLE events and then run the specified target function in a background thread. Target function should be a function that takes no parameters and optionally return an integer response code. When the target function stops executing or returns with value then the main loop will be stopped and the program will exit with the returned code. Note that an OS main loop is required to process asyncronous BLE events and this function is provided as a convenience for writing simple tools and scripts that don't need to be full-blown GUI applications. If you are writing a GUI application that has a main loop (a GTK glib main loop on Linux, or a Cocoa main loop on OSX) then you don't need to call this function.
[ "Start", "the", "OS", "s", "main", "loop", "to", "process", "asyncronous", "BLE", "events", "and", "then", "run", "the", "specified", "target", "function", "in", "a", "background", "thread", ".", "Target", "function", "should", "be", "a", "function", "that", "takes", "no", "parameters", "and", "optionally", "return", "an", "integer", "response", "code", ".", "When", "the", "target", "function", "stops", "executing", "or", "returns", "with", "value", "then", "the", "main", "loop", "will", "be", "stopped", "and", "the", "program", "will", "exit", "with", "the", "returned", "code", "." ]
python
valid
52.769231
bitesofcode/projexui
projexui/widgets/xdropzonewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdropzonewidget.py#L131-L142
def currentRegion(self): """ Returns the current region based on the current cursor position. :return <XDropZoneWidget> """ pos = QtGui.QCursor.pos() pos = self.mapFromGlobal(pos) for region in self.regions(): if region.testHovered(pos): return region return None
[ "def", "currentRegion", "(", "self", ")", ":", "pos", "=", "QtGui", ".", "QCursor", ".", "pos", "(", ")", "pos", "=", "self", ".", "mapFromGlobal", "(", "pos", ")", "for", "region", "in", "self", ".", "regions", "(", ")", ":", "if", "region", ".", "testHovered", "(", "pos", ")", ":", "return", "region", "return", "None" ]
Returns the current region based on the current cursor position. :return <XDropZoneWidget>
[ "Returns", "the", "current", "region", "based", "on", "the", "current", "cursor", "position", ".", ":", "return", "<XDropZoneWidget", ">" ]
python
train
30.666667
bcoe/secure-smtpd
secure_smtpd/smtp_channel.py
https://github.com/bcoe/secure-smtpd/blob/f3f76e72c7d610759097921405e88782a19129fe/secure_smtpd/smtp_channel.py#L13-L17
def decode_b64(data): '''Wrapper for b64decode, without having to struggle with bytestrings.''' byte_string = data.encode('utf-8') decoded = base64.b64decode(byte_string) return decoded.decode('utf-8')
[ "def", "decode_b64", "(", "data", ")", ":", "byte_string", "=", "data", ".", "encode", "(", "'utf-8'", ")", "decoded", "=", "base64", ".", "b64decode", "(", "byte_string", ")", "return", "decoded", ".", "decode", "(", "'utf-8'", ")" ]
Wrapper for b64decode, without having to struggle with bytestrings.
[ "Wrapper", "for", "b64decode", "without", "having", "to", "struggle", "with", "bytestrings", "." ]
python
train
42.6
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/settings.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/settings.py#L22-L103
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ """ POST request """ course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) errors = [] course_content = {} try: data = web.input() course_content = self.course_factory.get_course_descriptor_content(courseid) course_content['name'] = data['name'] if course_content['name'] == "": errors.append(_('Invalid name')) course_content['description'] = data['description'] course_content['admins'] = list(map(str.strip, data['admins'].split(','))) if not self.user_manager.user_is_superadmin() and self.user_manager.session_username() not in course_content['admins']: errors.append(_('You cannot remove yourself from the administrators of this course')) course_content['tutors'] = list(map(str.strip, data['tutors'].split(','))) if len(course_content['tutors']) == 1 and course_content['tutors'][0].strip() == "": course_content['tutors'] = [] course_content['groups_student_choice'] = True if data["groups_student_choice"] == "true" else False if course_content.get('use_classrooms', True) != (data['use_classrooms'] == "true"): self.database.aggregations.delete_many({"courseid": course.get_id()}) course_content['use_classrooms'] = True if data["use_classrooms"] == "true" else False if data["accessible"] == "custom": course_content['accessible'] = "{}/{}".format(data["accessible_start"], data["accessible_end"]) elif data["accessible"] == "true": course_content['accessible'] = True else: course_content['accessible'] = False try: AccessibleTime(course_content['accessible']) except: errors.append(_('Invalid accessibility dates')) course_content['allow_unregister'] = True if data["allow_unregister"] == "true" else False course_content['allow_preview'] = True if data["allow_preview"] == "true" else False if data["registration"] == "custom": course_content['registration'] = "{}/{}".format(data["registration_start"], data["registration_end"]) elif data["registration"] == "true": course_content['registration'] = True else: course_content['registration'] = False try: AccessibleTime(course_content['registration']) except: errors.append(_('Invalid registration dates')) course_content['registration_password'] = data['registration_password'] if course_content['registration_password'] == "": course_content['registration_password'] = None course_content['registration_ac'] = data['registration_ac'] if course_content['registration_ac'] not in ["None", "username", "binding", "email"]: errors.append(_('Invalid ACL value')) if course_content['registration_ac'] == "None": course_content['registration_ac'] = None course_content['registration_ac_list'] = data['registration_ac_list'].splitlines() course_content['is_lti'] = 'lti' in data and data['lti'] == "true" course_content['lti_keys'] = dict([x.split(":") for x in data['lti_keys'].splitlines() if x]) for lti_key in course_content['lti_keys'].keys(): if not re.match("^[a-zA-Z0-9]*$", lti_key): errors.append(_("LTI keys must be alphanumerical.")) course_content['lti_send_back_grade'] = 'lti_send_back_grade' in data and data['lti_send_back_grade'] == "true" except: errors.append(_('User returned an invalid form.')) if len(errors) == 0: self.course_factory.update_course_descriptor_content(courseid, course_content) errors = None course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) # don't forget to reload the modified course return self.page(course, errors, errors is None)
[ "def", "POST_AUTH", "(", "self", ",", "courseid", ")", ":", "# pylint: disable=arguments-differ", "course", ",", "__", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ",", "allow_all_staff", "=", "False", ")", "errors", "=", "[", "]", "course_content", "=", "{", "}", "try", ":", "data", "=", "web", ".", "input", "(", ")", "course_content", "=", "self", ".", "course_factory", ".", "get_course_descriptor_content", "(", "courseid", ")", "course_content", "[", "'name'", "]", "=", "data", "[", "'name'", "]", "if", "course_content", "[", "'name'", "]", "==", "\"\"", ":", "errors", ".", "append", "(", "_", "(", "'Invalid name'", ")", ")", "course_content", "[", "'description'", "]", "=", "data", "[", "'description'", "]", "course_content", "[", "'admins'", "]", "=", "list", "(", "map", "(", "str", ".", "strip", ",", "data", "[", "'admins'", "]", ".", "split", "(", "','", ")", ")", ")", "if", "not", "self", ".", "user_manager", ".", "user_is_superadmin", "(", ")", "and", "self", ".", "user_manager", ".", "session_username", "(", ")", "not", "in", "course_content", "[", "'admins'", "]", ":", "errors", ".", "append", "(", "_", "(", "'You cannot remove yourself from the administrators of this course'", ")", ")", "course_content", "[", "'tutors'", "]", "=", "list", "(", "map", "(", "str", ".", "strip", ",", "data", "[", "'tutors'", "]", ".", "split", "(", "','", ")", ")", ")", "if", "len", "(", "course_content", "[", "'tutors'", "]", ")", "==", "1", "and", "course_content", "[", "'tutors'", "]", "[", "0", "]", ".", "strip", "(", ")", "==", "\"\"", ":", "course_content", "[", "'tutors'", "]", "=", "[", "]", "course_content", "[", "'groups_student_choice'", "]", "=", "True", "if", "data", "[", "\"groups_student_choice\"", "]", "==", "\"true\"", "else", "False", "if", "course_content", ".", "get", "(", "'use_classrooms'", ",", "True", ")", "!=", "(", "data", "[", "'use_classrooms'", "]", "==", "\"true\"", ")", ":", "self", ".", "database", ".", "aggregations", ".", "delete_many", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", "}", ")", "course_content", "[", "'use_classrooms'", "]", "=", "True", "if", "data", "[", "\"use_classrooms\"", "]", "==", "\"true\"", "else", "False", "if", "data", "[", "\"accessible\"", "]", "==", "\"custom\"", ":", "course_content", "[", "'accessible'", "]", "=", "\"{}/{}\"", ".", "format", "(", "data", "[", "\"accessible_start\"", "]", ",", "data", "[", "\"accessible_end\"", "]", ")", "elif", "data", "[", "\"accessible\"", "]", "==", "\"true\"", ":", "course_content", "[", "'accessible'", "]", "=", "True", "else", ":", "course_content", "[", "'accessible'", "]", "=", "False", "try", ":", "AccessibleTime", "(", "course_content", "[", "'accessible'", "]", ")", "except", ":", "errors", ".", "append", "(", "_", "(", "'Invalid accessibility dates'", ")", ")", "course_content", "[", "'allow_unregister'", "]", "=", "True", "if", "data", "[", "\"allow_unregister\"", "]", "==", "\"true\"", "else", "False", "course_content", "[", "'allow_preview'", "]", "=", "True", "if", "data", "[", "\"allow_preview\"", "]", "==", "\"true\"", "else", "False", "if", "data", "[", "\"registration\"", "]", "==", "\"custom\"", ":", "course_content", "[", "'registration'", "]", "=", "\"{}/{}\"", ".", "format", "(", "data", "[", "\"registration_start\"", "]", ",", "data", "[", "\"registration_end\"", "]", ")", "elif", "data", "[", "\"registration\"", "]", "==", "\"true\"", ":", "course_content", "[", "'registration'", "]", "=", "True", "else", ":", "course_content", "[", "'registration'", "]", "=", "False", "try", ":", "AccessibleTime", "(", "course_content", "[", "'registration'", "]", ")", "except", ":", "errors", ".", "append", "(", "_", "(", "'Invalid registration dates'", ")", ")", "course_content", "[", "'registration_password'", "]", "=", "data", "[", "'registration_password'", "]", "if", "course_content", "[", "'registration_password'", "]", "==", "\"\"", ":", "course_content", "[", "'registration_password'", "]", "=", "None", "course_content", "[", "'registration_ac'", "]", "=", "data", "[", "'registration_ac'", "]", "if", "course_content", "[", "'registration_ac'", "]", "not", "in", "[", "\"None\"", ",", "\"username\"", ",", "\"binding\"", ",", "\"email\"", "]", ":", "errors", ".", "append", "(", "_", "(", "'Invalid ACL value'", ")", ")", "if", "course_content", "[", "'registration_ac'", "]", "==", "\"None\"", ":", "course_content", "[", "'registration_ac'", "]", "=", "None", "course_content", "[", "'registration_ac_list'", "]", "=", "data", "[", "'registration_ac_list'", "]", ".", "splitlines", "(", ")", "course_content", "[", "'is_lti'", "]", "=", "'lti'", "in", "data", "and", "data", "[", "'lti'", "]", "==", "\"true\"", "course_content", "[", "'lti_keys'", "]", "=", "dict", "(", "[", "x", ".", "split", "(", "\":\"", ")", "for", "x", "in", "data", "[", "'lti_keys'", "]", ".", "splitlines", "(", ")", "if", "x", "]", ")", "for", "lti_key", "in", "course_content", "[", "'lti_keys'", "]", ".", "keys", "(", ")", ":", "if", "not", "re", ".", "match", "(", "\"^[a-zA-Z0-9]*$\"", ",", "lti_key", ")", ":", "errors", ".", "append", "(", "_", "(", "\"LTI keys must be alphanumerical.\"", ")", ")", "course_content", "[", "'lti_send_back_grade'", "]", "=", "'lti_send_back_grade'", "in", "data", "and", "data", "[", "'lti_send_back_grade'", "]", "==", "\"true\"", "except", ":", "errors", ".", "append", "(", "_", "(", "'User returned an invalid form.'", ")", ")", "if", "len", "(", "errors", ")", "==", "0", ":", "self", ".", "course_factory", ".", "update_course_descriptor_content", "(", "courseid", ",", "course_content", ")", "errors", "=", "None", "course", ",", "__", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ",", "allow_all_staff", "=", "False", ")", "# don't forget to reload the modified course", "return", "self", ".", "page", "(", "course", ",", "errors", ",", "errors", "is", "None", ")" ]
POST request
[ "POST", "request" ]
python
train
51.426829
rosenbrockc/fortpy
fortpy/templates/genf90.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/templates/genf90.py#L366-L388
def fpy_interface(fpy, static, interface, typedict): """Splices the full list of subroutines and the module procedure list into the static.f90 file. :arg static: the string contents of the static.f90 file. :arg interface: the name of the interface *field* being replaced. :arg typedict: the dictionary of dtypes and their kind and suffix combos. """ modprocs = [] subtext = [] for dtype, combos in list(typedict.items()): for tcombo in combos: kind, suffix = tcombo xnames, sub = fpy_interface_sub(fpy, dtype, kind, suffix) modprocs.extend(xnames) subtext.append(sub) subtext.append("\n") #Next, chunk the names of the module procedures into blocks of five #so that they display nicely for human readability. from fortpy.printing.formatting import present_params splice = static.replace(interface, present_params(modprocs, 21)) return splice.replace(interface.replace("py", "xpy"), ''.join(subtext))
[ "def", "fpy_interface", "(", "fpy", ",", "static", ",", "interface", ",", "typedict", ")", ":", "modprocs", "=", "[", "]", "subtext", "=", "[", "]", "for", "dtype", ",", "combos", "in", "list", "(", "typedict", ".", "items", "(", ")", ")", ":", "for", "tcombo", "in", "combos", ":", "kind", ",", "suffix", "=", "tcombo", "xnames", ",", "sub", "=", "fpy_interface_sub", "(", "fpy", ",", "dtype", ",", "kind", ",", "suffix", ")", "modprocs", ".", "extend", "(", "xnames", ")", "subtext", ".", "append", "(", "sub", ")", "subtext", ".", "append", "(", "\"\\n\"", ")", "#Next, chunk the names of the module procedures into blocks of five", "#so that they display nicely for human readability.", "from", "fortpy", ".", "printing", ".", "formatting", "import", "present_params", "splice", "=", "static", ".", "replace", "(", "interface", ",", "present_params", "(", "modprocs", ",", "21", ")", ")", "return", "splice", ".", "replace", "(", "interface", ".", "replace", "(", "\"py\"", ",", "\"xpy\"", ")", ",", "''", ".", "join", "(", "subtext", ")", ")" ]
Splices the full list of subroutines and the module procedure list into the static.f90 file. :arg static: the string contents of the static.f90 file. :arg interface: the name of the interface *field* being replaced. :arg typedict: the dictionary of dtypes and their kind and suffix combos.
[ "Splices", "the", "full", "list", "of", "subroutines", "and", "the", "module", "procedure", "list", "into", "the", "static", ".", "f90", "file", "." ]
python
train
43.478261