repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
redcanari/canari3
src/canari/entrypoints.py
https://github.com/redcanari/canari3/blob/322d2bae4b49ac728229f418b786b51fcc227352/src/canari/entrypoints.py#L159-L162
def generate_entities_doc(ctx, out_path, package): """Create entities documentation from Canari python classes file.""" from canari.commands.generate_entities_doc import generate_entities_doc generate_entities_doc(ctx.project, out_path, package)
[ "def", "generate_entities_doc", "(", "ctx", ",", "out_path", ",", "package", ")", ":", "from", "canari", ".", "commands", ".", "generate_entities_doc", "import", "generate_entities_doc", "generate_entities_doc", "(", "ctx", ".", "project", ",", "out_path", ",", "package", ")" ]
Create entities documentation from Canari python classes file.
[ "Create", "entities", "documentation", "from", "Canari", "python", "classes", "file", "." ]
python
train
saltstack/salt
salt/states/sqlite3.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/sqlite3.py#L383-L471
def table_present(name, db, schema, force=False): ''' Make sure the specified table exists with the specified schema name The name of the table db The name of the database file schema The dictionary containing the schema information force If the name of the table exists and force is set to False, the state will fail. If force is set to True, the existing table will be replaced with the new table ''' changes = {'name': name, 'changes': {}, 'result': None, 'comment': ''} conn = None try: conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES) tables = _query(conn, "SELECT sql FROM sqlite_master " + "WHERE type='table' AND name=?", [name]) if len(tables) == 1: sql = None if isinstance(schema, six.string_types): sql = schema.strip() else: sql = _get_sql_from_schema(name, schema) if sql != tables[0][0]: if force: if __opts__['test']: changes['result'] = True changes['changes']['old'] = tables[0][0] changes['changes']['new'] = sql changes['comment'] = "'" + name + "' will be replaced" else: conn.execute("DROP TABLE `" + name + "`") conn.execute(sql) conn.commit() changes['result'] = True changes['changes']['old'] = tables[0][0] changes['changes']['new'] = sql changes['comment'] = "Replaced '" + name + "'" else: changes['result'] = False changes['comment'] = "Expected schema=" + sql + \ "\nactual schema=" + tables[0][0] else: changes['result'] = True changes['comment'] = "'" + name + \ "' exists with matching schema" elif not tables: # Create the table sql = None if isinstance(schema, six.string_types): sql = schema else: sql = _get_sql_from_schema(name, schema) if __opts__['test']: changes['result'] = True changes['changes']['new'] = sql changes['comment'] = "'" + name + "' will be created" else: conn.execute(sql) conn.commit() changes['result'] = True changes['changes']['new'] = sql changes['comment'] = "Created table '" + name + "'" else: changes['result'] = False changes['comment'] = 'Multiple tables with the same name=' + name except Exception as e: changes['result'] = False changes['comment'] = str(e) finally: if conn: conn.close() return changes
[ "def", "table_present", "(", "name", ",", "db", ",", "schema", ",", "force", "=", "False", ")", ":", "changes", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "conn", "=", "None", "try", ":", "conn", "=", "sqlite3", ".", "connect", "(", "db", ",", "detect_types", "=", "sqlite3", ".", "PARSE_DECLTYPES", ")", "tables", "=", "_query", "(", "conn", ",", "\"SELECT sql FROM sqlite_master \"", "+", "\"WHERE type='table' AND name=?\"", ",", "[", "name", "]", ")", "if", "len", "(", "tables", ")", "==", "1", ":", "sql", "=", "None", "if", "isinstance", "(", "schema", ",", "six", ".", "string_types", ")", ":", "sql", "=", "schema", ".", "strip", "(", ")", "else", ":", "sql", "=", "_get_sql_from_schema", "(", "name", ",", "schema", ")", "if", "sql", "!=", "tables", "[", "0", "]", "[", "0", "]", ":", "if", "force", ":", "if", "__opts__", "[", "'test'", "]", ":", "changes", "[", "'result'", "]", "=", "True", "changes", "[", "'changes'", "]", "[", "'old'", "]", "=", "tables", "[", "0", "]", "[", "0", "]", "changes", "[", "'changes'", "]", "[", "'new'", "]", "=", "sql", "changes", "[", "'comment'", "]", "=", "\"'\"", "+", "name", "+", "\"' will be replaced\"", "else", ":", "conn", ".", "execute", "(", "\"DROP TABLE `\"", "+", "name", "+", "\"`\"", ")", "conn", ".", "execute", "(", "sql", ")", "conn", ".", "commit", "(", ")", "changes", "[", "'result'", "]", "=", "True", "changes", "[", "'changes'", "]", "[", "'old'", "]", "=", "tables", "[", "0", "]", "[", "0", "]", "changes", "[", "'changes'", "]", "[", "'new'", "]", "=", "sql", "changes", "[", "'comment'", "]", "=", "\"Replaced '\"", "+", "name", "+", "\"'\"", "else", ":", "changes", "[", "'result'", "]", "=", "False", "changes", "[", "'comment'", "]", "=", "\"Expected schema=\"", "+", "sql", "+", "\"\\nactual schema=\"", "+", "tables", "[", "0", "]", "[", "0", "]", "else", ":", "changes", "[", "'result'", "]", "=", "True", "changes", "[", "'comment'", "]", "=", "\"'\"", "+", "name", "+", "\"' exists with matching schema\"", "elif", "not", "tables", ":", "# Create the table", "sql", "=", "None", "if", "isinstance", "(", "schema", ",", "six", ".", "string_types", ")", ":", "sql", "=", "schema", "else", ":", "sql", "=", "_get_sql_from_schema", "(", "name", ",", "schema", ")", "if", "__opts__", "[", "'test'", "]", ":", "changes", "[", "'result'", "]", "=", "True", "changes", "[", "'changes'", "]", "[", "'new'", "]", "=", "sql", "changes", "[", "'comment'", "]", "=", "\"'\"", "+", "name", "+", "\"' will be created\"", "else", ":", "conn", ".", "execute", "(", "sql", ")", "conn", ".", "commit", "(", ")", "changes", "[", "'result'", "]", "=", "True", "changes", "[", "'changes'", "]", "[", "'new'", "]", "=", "sql", "changes", "[", "'comment'", "]", "=", "\"Created table '\"", "+", "name", "+", "\"'\"", "else", ":", "changes", "[", "'result'", "]", "=", "False", "changes", "[", "'comment'", "]", "=", "'Multiple tables with the same name='", "+", "name", "except", "Exception", "as", "e", ":", "changes", "[", "'result'", "]", "=", "False", "changes", "[", "'comment'", "]", "=", "str", "(", "e", ")", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "return", "changes" ]
Make sure the specified table exists with the specified schema name The name of the table db The name of the database file schema The dictionary containing the schema information force If the name of the table exists and force is set to False, the state will fail. If force is set to True, the existing table will be replaced with the new table
[ "Make", "sure", "the", "specified", "table", "exists", "with", "the", "specified", "schema" ]
python
train
openstack/horizon
horizon/tabs/base.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/base.py#L389-L406
def get_default_classes(self): """Returns a list of the default classes for the tab. Defaults to and empty list (``[]``), however additional classes may be added depending on the state of the tab as follows: If the tab is the active tab for the tab group, in which the class ``"active"`` will be added. If the tab is not enabled, the classes the class ``"disabled"`` will be added. """ default_classes = super(Tab, self).get_default_classes() if self.is_active(): default_classes.extend(CSS_ACTIVE_TAB_CLASSES) if not self._enabled: default_classes.extend(CSS_DISABLED_TAB_CLASSES) return default_classes
[ "def", "get_default_classes", "(", "self", ")", ":", "default_classes", "=", "super", "(", "Tab", ",", "self", ")", ".", "get_default_classes", "(", ")", "if", "self", ".", "is_active", "(", ")", ":", "default_classes", ".", "extend", "(", "CSS_ACTIVE_TAB_CLASSES", ")", "if", "not", "self", ".", "_enabled", ":", "default_classes", ".", "extend", "(", "CSS_DISABLED_TAB_CLASSES", ")", "return", "default_classes" ]
Returns a list of the default classes for the tab. Defaults to and empty list (``[]``), however additional classes may be added depending on the state of the tab as follows: If the tab is the active tab for the tab group, in which the class ``"active"`` will be added. If the tab is not enabled, the classes the class ``"disabled"`` will be added.
[ "Returns", "a", "list", "of", "the", "default", "classes", "for", "the", "tab", "." ]
python
train
camptocamp/Studio
studio/controllers/datastores.py
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/controllers/datastores.py#L67-L78
def update(self, id): """PUT /datastores/id: Update an existing item.""" # url('DataStores', id=ID) content = request.environ['wsgi.input'].read(int(request.environ['CONTENT_LENGTH'])) content = content.decode('utf8') content = simplejson.loads(content) result = meta.Session.query(DataStore).get(id) result.name = content['name'] result.type = content['type'] result.ogrstring = content['ogrstring'] meta.Session.commit() response.status = 201
[ "def", "update", "(", "self", ",", "id", ")", ":", "# url('DataStores', id=ID)", "content", "=", "request", ".", "environ", "[", "'wsgi.input'", "]", ".", "read", "(", "int", "(", "request", ".", "environ", "[", "'CONTENT_LENGTH'", "]", ")", ")", "content", "=", "content", ".", "decode", "(", "'utf8'", ")", "content", "=", "simplejson", ".", "loads", "(", "content", ")", "result", "=", "meta", ".", "Session", ".", "query", "(", "DataStore", ")", ".", "get", "(", "id", ")", "result", ".", "name", "=", "content", "[", "'name'", "]", "result", ".", "type", "=", "content", "[", "'type'", "]", "result", ".", "ogrstring", "=", "content", "[", "'ogrstring'", "]", "meta", ".", "Session", ".", "commit", "(", ")", "response", ".", "status", "=", "201" ]
PUT /datastores/id: Update an existing item.
[ "PUT", "/", "datastores", "/", "id", ":", "Update", "an", "existing", "item", "." ]
python
train
google/grr
grr/server/grr_response_server/gui/api_plugins/hunt.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/hunt.py#L362-L370
def InitFromFlowResult(self, flow_result): """Init from rdf_flow_objects.FlowResult.""" self.payload_type = compatibility.GetName(flow_result.payload.__class__) self.payload = flow_result.payload self.client_id = flow_result.client_id self.timestamp = flow_result.timestamp return self
[ "def", "InitFromFlowResult", "(", "self", ",", "flow_result", ")", ":", "self", ".", "payload_type", "=", "compatibility", ".", "GetName", "(", "flow_result", ".", "payload", ".", "__class__", ")", "self", ".", "payload", "=", "flow_result", ".", "payload", "self", ".", "client_id", "=", "flow_result", ".", "client_id", "self", ".", "timestamp", "=", "flow_result", ".", "timestamp", "return", "self" ]
Init from rdf_flow_objects.FlowResult.
[ "Init", "from", "rdf_flow_objects", ".", "FlowResult", "." ]
python
train
Ex-Mente/auxi.0
auxi/tools/chemistry/stoichiometry.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/tools/chemistry/stoichiometry.py#L215-L226
def amount_fractions(masses): """ Calculate the mole fractions from the specified compound masses. :param masses: [kg] dictionary, e.g. {'SiO2': 3.0, 'FeO': 1.5} :returns: [mole fractions] dictionary """ n = amounts(masses) n_total = sum(n.values()) return {compound: n[compound]/n_total for compound in n.keys()}
[ "def", "amount_fractions", "(", "masses", ")", ":", "n", "=", "amounts", "(", "masses", ")", "n_total", "=", "sum", "(", "n", ".", "values", "(", ")", ")", "return", "{", "compound", ":", "n", "[", "compound", "]", "/", "n_total", "for", "compound", "in", "n", ".", "keys", "(", ")", "}" ]
Calculate the mole fractions from the specified compound masses. :param masses: [kg] dictionary, e.g. {'SiO2': 3.0, 'FeO': 1.5} :returns: [mole fractions] dictionary
[ "Calculate", "the", "mole", "fractions", "from", "the", "specified", "compound", "masses", "." ]
python
valid
cni/MRS
MRS/utils.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/utils.py#L306-L322
def zero_pad(ts, n_zeros): """ Pad a nitime.TimeSeries class instance with n_zeros before and after the data Parameters ---------- ts : a nitime.TimeSeries class instance """ zeros_shape = ts.shape[:-1] + (n_zeros,) zzs = np.zeros(zeros_shape) # Concatenate along the time-dimension: new_data = np.concatenate((zzs, ts.data, zzs), axis=-1) return nts.TimeSeries(new_data, sampling_rate=ts.sampling_rate)
[ "def", "zero_pad", "(", "ts", ",", "n_zeros", ")", ":", "zeros_shape", "=", "ts", ".", "shape", "[", ":", "-", "1", "]", "+", "(", "n_zeros", ",", ")", "zzs", "=", "np", ".", "zeros", "(", "zeros_shape", ")", "# Concatenate along the time-dimension:", "new_data", "=", "np", ".", "concatenate", "(", "(", "zzs", ",", "ts", ".", "data", ",", "zzs", ")", ",", "axis", "=", "-", "1", ")", "return", "nts", ".", "TimeSeries", "(", "new_data", ",", "sampling_rate", "=", "ts", ".", "sampling_rate", ")" ]
Pad a nitime.TimeSeries class instance with n_zeros before and after the data Parameters ---------- ts : a nitime.TimeSeries class instance
[ "Pad", "a", "nitime", ".", "TimeSeries", "class", "instance", "with", "n_zeros", "before", "and", "after", "the", "data" ]
python
train
fitnr/convertdate
convertdate/french_republican.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/french_republican.py#L190-L201
def from_jd(jd, method=None): '''Calculate date in the French Revolutionary calendar from Julian day. The five or six "sansculottides" are considered a thirteenth month in the results of this function.''' method = method or 'equinox' if method == 'equinox': return _from_jd_equinox(jd) else: return _from_jd_schematic(jd, method)
[ "def", "from_jd", "(", "jd", ",", "method", "=", "None", ")", ":", "method", "=", "method", "or", "'equinox'", "if", "method", "==", "'equinox'", ":", "return", "_from_jd_equinox", "(", "jd", ")", "else", ":", "return", "_from_jd_schematic", "(", "jd", ",", "method", ")" ]
Calculate date in the French Revolutionary calendar from Julian day. The five or six "sansculottides" are considered a thirteenth month in the results of this function.
[ "Calculate", "date", "in", "the", "French", "Revolutionary", "calendar", "from", "Julian", "day", ".", "The", "five", "or", "six", "sansculottides", "are", "considered", "a", "thirteenth", "month", "in", "the", "results", "of", "this", "function", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_genobstacles.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_genobstacles.py#L93-L98
def move(self, bearing, distance): '''move position by bearing and distance''' lat = self.pkt['I105']['Lat']['val'] lon = self.pkt['I105']['Lon']['val'] (lat, lon) = mp_util.gps_newpos(lat, lon, bearing, distance) self.setpos(lat, lon)
[ "def", "move", "(", "self", ",", "bearing", ",", "distance", ")", ":", "lat", "=", "self", ".", "pkt", "[", "'I105'", "]", "[", "'Lat'", "]", "[", "'val'", "]", "lon", "=", "self", ".", "pkt", "[", "'I105'", "]", "[", "'Lon'", "]", "[", "'val'", "]", "(", "lat", ",", "lon", ")", "=", "mp_util", ".", "gps_newpos", "(", "lat", ",", "lon", ",", "bearing", ",", "distance", ")", "self", ".", "setpos", "(", "lat", ",", "lon", ")" ]
move position by bearing and distance
[ "move", "position", "by", "bearing", "and", "distance" ]
python
train
bitesofcode/projexui
projexui/widgets/xtoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtoolbar.py#L230-L240
def setCollapsable(self, state): """ Sets whether or not this toolbar is collapsable. :param state | <bool> """ if self._collapsable == state: return self._collapsable = state self.clear()
[ "def", "setCollapsable", "(", "self", ",", "state", ")", ":", "if", "self", ".", "_collapsable", "==", "state", ":", "return", "self", ".", "_collapsable", "=", "state", "self", ".", "clear", "(", ")" ]
Sets whether or not this toolbar is collapsable. :param state | <bool>
[ "Sets", "whether", "or", "not", "this", "toolbar", "is", "collapsable", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L223-L253
def sumSeriesWithWildcards(requestContext, seriesList, *positions): """ Call sumSeries after inserting wildcards at the given position(s). Example:: &target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of:: &target=sumSeries(host.*.cpu-user.value)&target=sumSeries( host.*.cpu-system.value) """ newSeries = {} newNames = list() for series in seriesList: newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) if newname in newSeries: newSeries[newname] = sumSeries(requestContext, (series, newSeries[newname]))[0] else: newSeries[newname] = series newNames.append(newname) newSeries[newname].name = newname return [newSeries[name] for name in newNames]
[ "def", "sumSeriesWithWildcards", "(", "requestContext", ",", "seriesList", ",", "*", "positions", ")", ":", "newSeries", "=", "{", "}", "newNames", "=", "list", "(", ")", "for", "series", "in", "seriesList", ":", "newname", "=", "'.'", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", "[", "1", "]", ",", "filter", "(", "lambda", "i", ":", "i", "[", "0", "]", "not", "in", "positions", ",", "enumerate", "(", "series", ".", "name", ".", "split", "(", "'.'", ")", ")", ")", ")", ")", "if", "newname", "in", "newSeries", ":", "newSeries", "[", "newname", "]", "=", "sumSeries", "(", "requestContext", ",", "(", "series", ",", "newSeries", "[", "newname", "]", ")", ")", "[", "0", "]", "else", ":", "newSeries", "[", "newname", "]", "=", "series", "newNames", ".", "append", "(", "newname", ")", "newSeries", "[", "newname", "]", ".", "name", "=", "newname", "return", "[", "newSeries", "[", "name", "]", "for", "name", "in", "newNames", "]" ]
Call sumSeries after inserting wildcards at the given position(s). Example:: &target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of:: &target=sumSeries(host.*.cpu-user.value)&target=sumSeries( host.*.cpu-system.value)
[ "Call", "sumSeries", "after", "inserting", "wildcards", "at", "the", "given", "position", "(", "s", ")", "." ]
python
train
summa-tx/riemann
riemann/tx/tx_builder.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L192-L198
def make_script_sig(stack_script, redeem_script): ''' str, str -> bytearray ''' stack_script += ' {}'.format( serialization.hex_serialize(redeem_script)) return serialization.serialize(stack_script)
[ "def", "make_script_sig", "(", "stack_script", ",", "redeem_script", ")", ":", "stack_script", "+=", "' {}'", ".", "format", "(", "serialization", ".", "hex_serialize", "(", "redeem_script", ")", ")", "return", "serialization", ".", "serialize", "(", "stack_script", ")" ]
str, str -> bytearray
[ "str", "str", "-", ">", "bytearray" ]
python
train
BD2KGenomics/toil-lib
src/toil_lib/tools/preprocessing.py
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L649-L704
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False): """ Creates recalibration table for Base Quality Score Recalibration :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param str fai: FileStoreID for reference genome fasta index file :param str dbsnp: FileStoreID for dbSNP VCF file :param str mills: FileStoreID for Mills VCF file :param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for the recalibration table file :rtype: str """ inputs = {'ref.fasta': ref, 'ref.fasta.fai': fai, 'ref.dict': ref_dict, 'input.bam': bam, 'input.bai': bai, 'dbsnp.vcf': dbsnp, 'mills.vcf': mills} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) # Call: GATK -- BaseRecalibrator parameters = ['-T', 'BaseRecalibrator', '-nct', str(int(job.cores)), '-R', '/data/ref.fasta', '-I', '/data/input.bam', # Recommended known sites: # https://software.broadinstitute.org/gatk/guide/article?id=1247 '-knownSites', '/data/dbsnp.vcf', '-knownSites', '/data/mills.vcf', '-o', '/data/recal_data.table'] if unsafe: parameters.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY']) # Set TMPDIR to /data to prevent writing temporary files to /tmp docker_parameters = ['--rm', '--log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory), '-v', '{}:/data'.format(work_dir)] start_time = time.time() dockerCall(job=job, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', workDir=work_dir, parameters=parameters, dockerParameters=docker_parameters) end_time = time.time() _log_runtime(job, start_time, end_time, "GATK3 BaseRecalibrator") return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'recal_data.table'))
[ "def", "run_base_recalibration", "(", "job", ",", "bam", ",", "bai", ",", "ref", ",", "ref_dict", ",", "fai", ",", "dbsnp", ",", "mills", ",", "unsafe", "=", "False", ")", ":", "inputs", "=", "{", "'ref.fasta'", ":", "ref", ",", "'ref.fasta.fai'", ":", "fai", ",", "'ref.dict'", ":", "ref_dict", ",", "'input.bam'", ":", "bam", ",", "'input.bai'", ":", "bai", ",", "'dbsnp.vcf'", ":", "dbsnp", ",", "'mills.vcf'", ":", "mills", "}", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "for", "name", ",", "file_store_id", "in", "inputs", ".", "iteritems", "(", ")", ":", "job", ".", "fileStore", ".", "readGlobalFile", "(", "file_store_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", ")", "# Call: GATK -- BaseRecalibrator", "parameters", "=", "[", "'-T'", ",", "'BaseRecalibrator'", ",", "'-nct'", ",", "str", "(", "int", "(", "job", ".", "cores", ")", ")", ",", "'-R'", ",", "'/data/ref.fasta'", ",", "'-I'", ",", "'/data/input.bam'", ",", "# Recommended known sites:", "# https://software.broadinstitute.org/gatk/guide/article?id=1247", "'-knownSites'", ",", "'/data/dbsnp.vcf'", ",", "'-knownSites'", ",", "'/data/mills.vcf'", ",", "'-o'", ",", "'/data/recal_data.table'", "]", "if", "unsafe", ":", "parameters", ".", "extend", "(", "[", "'-U'", ",", "'ALLOW_SEQ_DICT_INCOMPATIBILITY'", "]", ")", "# Set TMPDIR to /data to prevent writing temporary files to /tmp", "docker_parameters", "=", "[", "'--rm'", ",", "'--log-driver'", ",", "'none'", ",", "'-e'", ",", "'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'", ".", "format", "(", "job", ".", "memory", ")", ",", "'-v'", ",", "'{}:/data'", ".", "format", "(", "work_dir", ")", "]", "start_time", "=", "time", ".", "time", "(", ")", "dockerCall", "(", "job", "=", "job", ",", "tool", "=", "'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'", ",", "workDir", "=", "work_dir", ",", "parameters", "=", "parameters", ",", "dockerParameters", "=", "docker_parameters", ")", "end_time", "=", "time", ".", "time", "(", ")", "_log_runtime", "(", "job", ",", "start_time", ",", "end_time", ",", "\"GATK3 BaseRecalibrator\"", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'recal_data.table'", ")", ")" ]
Creates recalibration table for Base Quality Score Recalibration :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param str fai: FileStoreID for reference genome fasta index file :param str dbsnp: FileStoreID for dbSNP VCF file :param str mills: FileStoreID for Mills VCF file :param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for the recalibration table file :rtype: str
[ "Creates", "recalibration", "table", "for", "Base", "Quality", "Score", "Recalibration" ]
python
test
StellarCN/py-stellar-base
stellar_base/stellarxdr/xdrgen.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/stellarxdr/xdrgen.py#L465-L472
def p_type_def_2(t): """type_def : ENUM ID enum_body SEMI""" id = t[2] body = t[3] lineno = t.lineno(1) sortno = t.lineno(4) + 0.5 if id_unique(id, 'enum', lineno): name_dict[id] = enum_info(id, body, lineno, sortno)
[ "def", "p_type_def_2", "(", "t", ")", ":", "id", "=", "t", "[", "2", "]", "body", "=", "t", "[", "3", "]", "lineno", "=", "t", ".", "lineno", "(", "1", ")", "sortno", "=", "t", ".", "lineno", "(", "4", ")", "+", "0.5", "if", "id_unique", "(", "id", ",", "'enum'", ",", "lineno", ")", ":", "name_dict", "[", "id", "]", "=", "enum_info", "(", "id", ",", "body", ",", "lineno", ",", "sortno", ")" ]
type_def : ENUM ID enum_body SEMI
[ "type_def", ":", "ENUM", "ID", "enum_body", "SEMI" ]
python
train
pre-commit/pre-commit
pre_commit/languages/python_venv.py
https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/languages/python_venv.py#L21-L47
def orig_py_exe(exe): # pragma: no cover (platform specific) """A -mvenv virtualenv made from a -mvirtualenv virtualenv installs packages to the incorrect location. Attempt to find the _original_ exe and invoke `-mvenv` from there. See: - https://github.com/pre-commit/pre-commit/issues/755 - https://github.com/pypa/virtualenv/issues/1095 - https://bugs.python.org/issue30811 """ try: prefix_script = 'import sys; print(sys.real_prefix)' _, prefix, _ = cmd_output(exe, '-c', prefix_script) prefix = prefix.strip() except CalledProcessError: # not created from -mvirtualenv return exe if os.name == 'nt': expected = os.path.join(prefix, 'python.exe') else: expected = os.path.join(prefix, 'bin', os.path.basename(exe)) if os.path.exists(expected): return expected else: return exe
[ "def", "orig_py_exe", "(", "exe", ")", ":", "# pragma: no cover (platform specific)", "try", ":", "prefix_script", "=", "'import sys; print(sys.real_prefix)'", "_", ",", "prefix", ",", "_", "=", "cmd_output", "(", "exe", ",", "'-c'", ",", "prefix_script", ")", "prefix", "=", "prefix", ".", "strip", "(", ")", "except", "CalledProcessError", ":", "# not created from -mvirtualenv", "return", "exe", "if", "os", ".", "name", "==", "'nt'", ":", "expected", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "'python.exe'", ")", "else", ":", "expected", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "'bin'", ",", "os", ".", "path", ".", "basename", "(", "exe", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "expected", ")", ":", "return", "expected", "else", ":", "return", "exe" ]
A -mvenv virtualenv made from a -mvirtualenv virtualenv installs packages to the incorrect location. Attempt to find the _original_ exe and invoke `-mvenv` from there. See: - https://github.com/pre-commit/pre-commit/issues/755 - https://github.com/pypa/virtualenv/issues/1095 - https://bugs.python.org/issue30811
[ "A", "-", "mvenv", "virtualenv", "made", "from", "a", "-", "mvirtualenv", "virtualenv", "installs", "packages", "to", "the", "incorrect", "location", ".", "Attempt", "to", "find", "the", "_original_", "exe", "and", "invoke", "-", "mvenv", "from", "there", "." ]
python
train
SavinaRoja/PyUserInput
pymouse/base.py
https://github.com/SavinaRoja/PyUserInput/blob/153c1d39b1a41b467b235fd182392d6dcbf07947/pymouse/base.py#L49-L57
def click(self, x, y, button=1, n=1): """ Click a mouse button n times on a given x, y. Button is defined as 1 = left, 2 = right, 3 = middle. """ for i in range(n): self.press(x, y, button) self.release(x, y, button)
[ "def", "click", "(", "self", ",", "x", ",", "y", ",", "button", "=", "1", ",", "n", "=", "1", ")", ":", "for", "i", "in", "range", "(", "n", ")", ":", "self", ".", "press", "(", "x", ",", "y", ",", "button", ")", "self", ".", "release", "(", "x", ",", "y", ",", "button", ")" ]
Click a mouse button n times on a given x, y. Button is defined as 1 = left, 2 = right, 3 = middle.
[ "Click", "a", "mouse", "button", "n", "times", "on", "a", "given", "x", "y", ".", "Button", "is", "defined", "as", "1", "=", "left", "2", "=", "right", "3", "=", "middle", "." ]
python
train
inasafe/inasafe
safe/metadata/metadata_db_io.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/metadata/metadata_db_io.py#L198-L211
def hash_for_datasource(data_source): """Given a data_source, return its hash. :param data_source: The data_source name from a layer. :type data_source: str :returns: An md5 hash for the data source name. :rtype: str """ import hashlib hash_value = hashlib.md5() hash_value.update(data_source.encode('utf-8')) hash_value = hash_value.hexdigest() return hash_value
[ "def", "hash_for_datasource", "(", "data_source", ")", ":", "import", "hashlib", "hash_value", "=", "hashlib", ".", "md5", "(", ")", "hash_value", ".", "update", "(", "data_source", ".", "encode", "(", "'utf-8'", ")", ")", "hash_value", "=", "hash_value", ".", "hexdigest", "(", ")", "return", "hash_value" ]
Given a data_source, return its hash. :param data_source: The data_source name from a layer. :type data_source: str :returns: An md5 hash for the data source name. :rtype: str
[ "Given", "a", "data_source", "return", "its", "hash", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/operations/preorder.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/preorder.py#L148-L171
def get_preorder_burn_info( outputs ): """ Given the set of outputs, find the fee sent to our burn address. This is always the third output. Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...} Return None if not found """ if len(outputs) != 3: # not a well-formed preorder return None op_fee = outputs[2]['value'] burn_address = None try: burn_address = virtualchain.script_hex_to_address(outputs[2]['script']) assert burn_address except: log.error("Not a well-formed preorder burn: {}".format(outputs[2]['script'])) return None return {'op_fee': op_fee, 'burn_address': burn_address}
[ "def", "get_preorder_burn_info", "(", "outputs", ")", ":", "if", "len", "(", "outputs", ")", "!=", "3", ":", "# not a well-formed preorder ", "return", "None", "op_fee", "=", "outputs", "[", "2", "]", "[", "'value'", "]", "burn_address", "=", "None", "try", ":", "burn_address", "=", "virtualchain", ".", "script_hex_to_address", "(", "outputs", "[", "2", "]", "[", "'script'", "]", ")", "assert", "burn_address", "except", ":", "log", ".", "error", "(", "\"Not a well-formed preorder burn: {}\"", ".", "format", "(", "outputs", "[", "2", "]", "[", "'script'", "]", ")", ")", "return", "None", "return", "{", "'op_fee'", ":", "op_fee", ",", "'burn_address'", ":", "burn_address", "}" ]
Given the set of outputs, find the fee sent to our burn address. This is always the third output. Return the fee and burn address on success as {'op_fee': ..., 'burn_address': ...} Return None if not found
[ "Given", "the", "set", "of", "outputs", "find", "the", "fee", "sent", "to", "our", "burn", "address", ".", "This", "is", "always", "the", "third", "output", ".", "Return", "the", "fee", "and", "burn", "address", "on", "success", "as", "{", "op_fee", ":", "...", "burn_address", ":", "...", "}", "Return", "None", "if", "not", "found" ]
python
train
Alignak-monitoring/alignak
alignak/dependencynode.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/dependencynode.py#L767-L801
def get_host_filters(self, expr): # pylint: disable=too-many-return-statements """Generates host filter list corresponding to the expression :: * '*' => any * 'g' => group filter * 'r' => regex name filter * 'l' => bp rule label filter * 't' => tag filter * '' => none filter * No flag match => host name filter :param expr: expression to parse :type expr: str :return: filter list :rtype: list """ if expr == "*": return [filter_any] match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr) if match is None: return [filter_host_by_name(expr)] flags, expr = match.groups() if "g" in flags: return [filter_host_by_group(expr)] if "r" in flags: return [filter_host_by_regex(expr)] if "l" in flags: return [filter_host_by_bp_rule_label(expr)] if "t" in flags: return [filter_host_by_tag(expr)] return [filter_none]
[ "def", "get_host_filters", "(", "self", ",", "expr", ")", ":", "# pylint: disable=too-many-return-statements", "if", "expr", "==", "\"*\"", ":", "return", "[", "filter_any", "]", "match", "=", "re", ".", "search", "(", "r\"^([%s]+):(.*)\"", "%", "self", ".", "host_flags", ",", "expr", ")", "if", "match", "is", "None", ":", "return", "[", "filter_host_by_name", "(", "expr", ")", "]", "flags", ",", "expr", "=", "match", ".", "groups", "(", ")", "if", "\"g\"", "in", "flags", ":", "return", "[", "filter_host_by_group", "(", "expr", ")", "]", "if", "\"r\"", "in", "flags", ":", "return", "[", "filter_host_by_regex", "(", "expr", ")", "]", "if", "\"l\"", "in", "flags", ":", "return", "[", "filter_host_by_bp_rule_label", "(", "expr", ")", "]", "if", "\"t\"", "in", "flags", ":", "return", "[", "filter_host_by_tag", "(", "expr", ")", "]", "return", "[", "filter_none", "]" ]
Generates host filter list corresponding to the expression :: * '*' => any * 'g' => group filter * 'r' => regex name filter * 'l' => bp rule label filter * 't' => tag filter * '' => none filter * No flag match => host name filter :param expr: expression to parse :type expr: str :return: filter list :rtype: list
[ "Generates", "host", "filter", "list", "corresponding", "to", "the", "expression", "::" ]
python
train
gwastro/pycbc
pycbc/vetoes/bank_chisq.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/vetoes/bank_chisq.py#L63-L97
def template_overlaps(bank_filters, template, psd, low_frequency_cutoff): """ This functions calculates the overlaps between the template and the bank veto templates. Parameters ---------- bank_filters: List of FrequencySeries template: FrequencySeries psd: FrequencySeries low_frequency_cutoff: float Returns ------- overlaps: List of complex overlap values. """ overlaps = [] template_ow = template / psd for bank_template in bank_filters: overlap = overlap_cplx(template_ow, bank_template, low_frequency_cutoff=low_frequency_cutoff, normalized=False) norm = sqrt(1 / template.sigmasq(psd) / bank_template.sigmasq(psd)) overlaps.append(overlap * norm) if (abs(overlaps[-1]) > 0.99): errMsg = "Overlap > 0.99 between bank template and filter. " errMsg += "This bank template will not be used to calculate " errMsg += "bank chisq for this filter template. The expected " errMsg += "value will be added to the chisq to account for " errMsg += "the removal of this template.\n" errMsg += "Masses of filter template: %e %e\n" \ %(template.params.mass1, template.params.mass2) errMsg += "Masses of bank filter template: %e %e\n" \ %(bank_template.params.mass1, bank_template.params.mass2) errMsg += "Overlap: %e" %(abs(overlaps[-1])) logging.debug(errMsg) return overlaps
[ "def", "template_overlaps", "(", "bank_filters", ",", "template", ",", "psd", ",", "low_frequency_cutoff", ")", ":", "overlaps", "=", "[", "]", "template_ow", "=", "template", "/", "psd", "for", "bank_template", "in", "bank_filters", ":", "overlap", "=", "overlap_cplx", "(", "template_ow", ",", "bank_template", ",", "low_frequency_cutoff", "=", "low_frequency_cutoff", ",", "normalized", "=", "False", ")", "norm", "=", "sqrt", "(", "1", "/", "template", ".", "sigmasq", "(", "psd", ")", "/", "bank_template", ".", "sigmasq", "(", "psd", ")", ")", "overlaps", ".", "append", "(", "overlap", "*", "norm", ")", "if", "(", "abs", "(", "overlaps", "[", "-", "1", "]", ")", ">", "0.99", ")", ":", "errMsg", "=", "\"Overlap > 0.99 between bank template and filter. \"", "errMsg", "+=", "\"This bank template will not be used to calculate \"", "errMsg", "+=", "\"bank chisq for this filter template. The expected \"", "errMsg", "+=", "\"value will be added to the chisq to account for \"", "errMsg", "+=", "\"the removal of this template.\\n\"", "errMsg", "+=", "\"Masses of filter template: %e %e\\n\"", "%", "(", "template", ".", "params", ".", "mass1", ",", "template", ".", "params", ".", "mass2", ")", "errMsg", "+=", "\"Masses of bank filter template: %e %e\\n\"", "%", "(", "bank_template", ".", "params", ".", "mass1", ",", "bank_template", ".", "params", ".", "mass2", ")", "errMsg", "+=", "\"Overlap: %e\"", "%", "(", "abs", "(", "overlaps", "[", "-", "1", "]", ")", ")", "logging", ".", "debug", "(", "errMsg", ")", "return", "overlaps" ]
This functions calculates the overlaps between the template and the bank veto templates. Parameters ---------- bank_filters: List of FrequencySeries template: FrequencySeries psd: FrequencySeries low_frequency_cutoff: float Returns ------- overlaps: List of complex overlap values.
[ "This", "functions", "calculates", "the", "overlaps", "between", "the", "template", "and", "the", "bank", "veto", "templates", "." ]
python
train
fastai/fastai
fastai/train.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/train.py#L120-L126
def on_backward_end(self, **kwargs): "accumulated step and reset samples, True will result in no stepping" if (self.acc_batches % self.n_step) == 0: for p in (self.learn.model.parameters()): if p.requires_grad: p.grad.div_(self.acc_samples) self.acc_samples = 0 else: return {'skip_step':True, 'skip_zero':True}
[ "def", "on_backward_end", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "(", "self", ".", "acc_batches", "%", "self", ".", "n_step", ")", "==", "0", ":", "for", "p", "in", "(", "self", ".", "learn", ".", "model", ".", "parameters", "(", ")", ")", ":", "if", "p", ".", "requires_grad", ":", "p", ".", "grad", ".", "div_", "(", "self", ".", "acc_samples", ")", "self", ".", "acc_samples", "=", "0", "else", ":", "return", "{", "'skip_step'", ":", "True", ",", "'skip_zero'", ":", "True", "}" ]
accumulated step and reset samples, True will result in no stepping
[ "accumulated", "step", "and", "reset", "samples", "True", "will", "result", "in", "no", "stepping" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L173-L242
def decode_from_dataset(estimator, problem_name, hparams, decode_hp, decode_to_file=None, dataset_split=None, checkpoint_path=None): """Perform decoding from dataset.""" tf.logging.info("Performing local inference from dataset for %s.", str(problem_name)) # We assume that worker_id corresponds to shard number. shard = decode_hp.shard_id if decode_hp.shards > 1 else None # Setup output directory for any artifacts that may be written out. output_dir = os.path.join(estimator.model_dir, "decode") tf.gfile.MakeDirs(output_dir) # If decode_hp.batch_size is specified, use a fixed batch size if decode_hp.batch_size: hparams.batch_size = decode_hp.batch_size hparams.use_fixed_batch_size = True dataset_kwargs = { "shard": shard, "dataset_split": dataset_split, "max_records": decode_hp.num_samples } # Build the inference input function problem = hparams.problem infer_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs) predictions, output_dirs = [], [] for decode_id in range(decode_hp.num_decodes): tf.logging.info("Decoding {}".format(decode_id)) # Create decode directory if not in-memory decoding. if not decode_hp.decode_in_memory: output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id) tf.gfile.MakeDirs(output_dir) output_dirs.append(output_dir) result = decode_once(estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results=decode_hp.log_results, checkpoint_path=checkpoint_path) if decode_hp.decode_in_memory: output_dirs = [output_dir] predictions.append(result) if decode_hp.decode_to_file: decode_hp.decode_to_file = _decode_filename( decode_hp.decode_to_file, problem_name, decode_hp) run_postdecode_hooks(DecodeHookArgs( estimator=estimator, problem=problem, output_dirs=output_dirs, hparams=hparams, decode_hparams=decode_hp, predictions=predictions ), dataset_split) return predictions
[ "def", "decode_from_dataset", "(", "estimator", ",", "problem_name", ",", "hparams", ",", "decode_hp", ",", "decode_to_file", "=", "None", ",", "dataset_split", "=", "None", ",", "checkpoint_path", "=", "None", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Performing local inference from dataset for %s.\"", ",", "str", "(", "problem_name", ")", ")", "# We assume that worker_id corresponds to shard number.", "shard", "=", "decode_hp", ".", "shard_id", "if", "decode_hp", ".", "shards", ">", "1", "else", "None", "# Setup output directory for any artifacts that may be written out.", "output_dir", "=", "os", ".", "path", ".", "join", "(", "estimator", ".", "model_dir", ",", "\"decode\"", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "output_dir", ")", "# If decode_hp.batch_size is specified, use a fixed batch size", "if", "decode_hp", ".", "batch_size", ":", "hparams", ".", "batch_size", "=", "decode_hp", ".", "batch_size", "hparams", ".", "use_fixed_batch_size", "=", "True", "dataset_kwargs", "=", "{", "\"shard\"", ":", "shard", ",", "\"dataset_split\"", ":", "dataset_split", ",", "\"max_records\"", ":", "decode_hp", ".", "num_samples", "}", "# Build the inference input function", "problem", "=", "hparams", ".", "problem", "infer_input_fn", "=", "problem", ".", "make_estimator_input_fn", "(", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ",", "hparams", ",", "dataset_kwargs", "=", "dataset_kwargs", ")", "predictions", ",", "output_dirs", "=", "[", "]", ",", "[", "]", "for", "decode_id", "in", "range", "(", "decode_hp", ".", "num_decodes", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Decoding {}\"", ".", "format", "(", "decode_id", ")", ")", "# Create decode directory if not in-memory decoding.", "if", "not", "decode_hp", ".", "decode_in_memory", ":", "output_dir", "=", "os", ".", "path", ".", "join", "(", "estimator", ".", "model_dir", ",", "\"decode_%05d\"", "%", "decode_id", ")", "tf", ".", "gfile", ".", "MakeDirs", "(", "output_dir", ")", "output_dirs", ".", "append", "(", "output_dir", ")", "result", "=", "decode_once", "(", "estimator", ",", "problem_name", ",", "hparams", ",", "infer_input_fn", ",", "decode_hp", ",", "decode_to_file", ",", "output_dir", ",", "log_results", "=", "decode_hp", ".", "log_results", ",", "checkpoint_path", "=", "checkpoint_path", ")", "if", "decode_hp", ".", "decode_in_memory", ":", "output_dirs", "=", "[", "output_dir", "]", "predictions", ".", "append", "(", "result", ")", "if", "decode_hp", ".", "decode_to_file", ":", "decode_hp", ".", "decode_to_file", "=", "_decode_filename", "(", "decode_hp", ".", "decode_to_file", ",", "problem_name", ",", "decode_hp", ")", "run_postdecode_hooks", "(", "DecodeHookArgs", "(", "estimator", "=", "estimator", ",", "problem", "=", "problem", ",", "output_dirs", "=", "output_dirs", ",", "hparams", "=", "hparams", ",", "decode_hparams", "=", "decode_hp", ",", "predictions", "=", "predictions", ")", ",", "dataset_split", ")", "return", "predictions" ]
Perform decoding from dataset.
[ "Perform", "decoding", "from", "dataset", "." ]
python
train
Infinidat/infi.execute
src/infi/execute/waiting.py
https://github.com/Infinidat/infi.execute/blob/6f8531950937dbf84bea7ab1ffc65f16a9e73870/src/infi/execute/waiting.py#L29-L37
def _get_deadline(results, timeout=None): """ returns the earliest deadline point in time """ start_time = time() all_deadlines = set(result.get_deadline() for result in results) all_deadlines.discard(None) if timeout is not None: all_deadlines.add(start_time + timeout) return min(all_deadlines) if all_deadlines else None
[ "def", "_get_deadline", "(", "results", ",", "timeout", "=", "None", ")", ":", "start_time", "=", "time", "(", ")", "all_deadlines", "=", "set", "(", "result", ".", "get_deadline", "(", ")", "for", "result", "in", "results", ")", "all_deadlines", ".", "discard", "(", "None", ")", "if", "timeout", "is", "not", "None", ":", "all_deadlines", ".", "add", "(", "start_time", "+", "timeout", ")", "return", "min", "(", "all_deadlines", ")", "if", "all_deadlines", "else", "None" ]
returns the earliest deadline point in time
[ "returns", "the", "earliest", "deadline", "point", "in", "time" ]
python
train
dakrauth/django-swingtime
swingtime/views.py
https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L20-L40
def event_listing( request, template='swingtime/event_list.html', events=None, **extra_context ): ''' View all ``events``. If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s. Context parameters: ``events`` an iterable of ``Event`` objects ... plus all values passed in via **extra_context ''' events = events or Event.objects.all() extra_context['events'] = events return render(request, template, extra_context)
[ "def", "event_listing", "(", "request", ",", "template", "=", "'swingtime/event_list.html'", ",", "events", "=", "None", ",", "*", "*", "extra_context", ")", ":", "events", "=", "events", "or", "Event", ".", "objects", ".", "all", "(", ")", "extra_context", "[", "'events'", "]", "=", "events", "return", "render", "(", "request", ",", "template", ",", "extra_context", ")" ]
View all ``events``. If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s. Context parameters: ``events`` an iterable of ``Event`` objects ... plus all values passed in via **extra_context
[ "View", "all", "events", "." ]
python
train
kiwiz/gkeepapi
gkeepapi/node.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/node.py#L1817-L1838
def from_json(raw): """Helper to construct a node from a dict. Args: raw (dict): Raw node representation. Returns: Node: A Node object or None. """ ncls = None _type = raw.get('type') try: ncls = _type_map[NodeType(_type)] except (KeyError, ValueError) as e: logger.warning('Unknown node type: %s', _type) if DEBUG: raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e) return None node = ncls() node.load(raw) return node
[ "def", "from_json", "(", "raw", ")", ":", "ncls", "=", "None", "_type", "=", "raw", ".", "get", "(", "'type'", ")", "try", ":", "ncls", "=", "_type_map", "[", "NodeType", "(", "_type", ")", "]", "except", "(", "KeyError", ",", "ValueError", ")", "as", "e", ":", "logger", ".", "warning", "(", "'Unknown node type: %s'", ",", "_type", ")", "if", "DEBUG", ":", "raise_from", "(", "exception", ".", "ParseException", "(", "'Parse error for %s'", "%", "(", "_type", ")", ",", "raw", ")", ",", "e", ")", "return", "None", "node", "=", "ncls", "(", ")", "node", ".", "load", "(", "raw", ")", "return", "node" ]
Helper to construct a node from a dict. Args: raw (dict): Raw node representation. Returns: Node: A Node object or None.
[ "Helper", "to", "construct", "a", "node", "from", "a", "dict", "." ]
python
train
secdev/scapy
scapy/config.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/config.py#L640-L650
def crypto_validator(func): """ This a decorator to be used for any method relying on the cryptography library. # noqa: E501 Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'. """ def func_in(*args, **kwargs): if not conf.crypto_valid: raise ImportError("Cannot execute crypto-related method! " "Please install python-cryptography v1.7 or later.") # noqa: E501 return func(*args, **kwargs) return func_in
[ "def", "crypto_validator", "(", "func", ")", ":", "def", "func_in", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "conf", ".", "crypto_valid", ":", "raise", "ImportError", "(", "\"Cannot execute crypto-related method! \"", "\"Please install python-cryptography v1.7 or later.\"", ")", "# noqa: E501", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func_in" ]
This a decorator to be used for any method relying on the cryptography library. # noqa: E501 Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
[ "This", "a", "decorator", "to", "be", "used", "for", "any", "method", "relying", "on", "the", "cryptography", "library", ".", "#", "noqa", ":", "E501", "Its", "behaviour", "depends", "on", "the", "crypto_valid", "attribute", "of", "the", "global", "conf", "." ]
python
train
Clinical-Genomics/scout
scout/utils/link.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/utils/link.py#L59-L83
def add_ensembl_info(genes, ensembl_lines): """Add the coordinates from ensembl Args: genes(dict): Dictionary with all genes ensembl_lines(iteable): Iteable with raw ensembl info """ LOG.info("Adding ensembl coordinates") # Parse and add the ensembl gene info if isinstance(ensembl_lines, DataFrame): ensembl_genes = parse_ensembl_gene_request(ensembl_lines) else: ensembl_genes = parse_ensembl_genes(ensembl_lines) for ensembl_gene in ensembl_genes: gene_obj = genes.get(ensembl_gene['hgnc_id']) if not gene_obj: continue gene_obj['chromosome'] = ensembl_gene['chrom'] gene_obj['start'] = ensembl_gene['gene_start'] gene_obj['end'] = ensembl_gene['gene_end'] # ensembl ids can differ between builds. There is one stated in HGNC # that is true for build 38. So we add information from ensembl gene_obj['ensembl_gene_id'] = ensembl_gene['ensembl_gene_id']
[ "def", "add_ensembl_info", "(", "genes", ",", "ensembl_lines", ")", ":", "LOG", ".", "info", "(", "\"Adding ensembl coordinates\"", ")", "# Parse and add the ensembl gene info", "if", "isinstance", "(", "ensembl_lines", ",", "DataFrame", ")", ":", "ensembl_genes", "=", "parse_ensembl_gene_request", "(", "ensembl_lines", ")", "else", ":", "ensembl_genes", "=", "parse_ensembl_genes", "(", "ensembl_lines", ")", "for", "ensembl_gene", "in", "ensembl_genes", ":", "gene_obj", "=", "genes", ".", "get", "(", "ensembl_gene", "[", "'hgnc_id'", "]", ")", "if", "not", "gene_obj", ":", "continue", "gene_obj", "[", "'chromosome'", "]", "=", "ensembl_gene", "[", "'chrom'", "]", "gene_obj", "[", "'start'", "]", "=", "ensembl_gene", "[", "'gene_start'", "]", "gene_obj", "[", "'end'", "]", "=", "ensembl_gene", "[", "'gene_end'", "]", "# ensembl ids can differ between builds. There is one stated in HGNC", "# that is true for build 38. So we add information from ensembl", "gene_obj", "[", "'ensembl_gene_id'", "]", "=", "ensembl_gene", "[", "'ensembl_gene_id'", "]" ]
Add the coordinates from ensembl Args: genes(dict): Dictionary with all genes ensembl_lines(iteable): Iteable with raw ensembl info
[ "Add", "the", "coordinates", "from", "ensembl", "Args", ":", "genes", "(", "dict", ")", ":", "Dictionary", "with", "all", "genes", "ensembl_lines", "(", "iteable", ")", ":", "Iteable", "with", "raw", "ensembl", "info" ]
python
test
pyviz/holoviews
holoviews/plotting/bokeh/plot.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/plot.py#L760-L773
def update_frame(self, key, ranges=None): """ Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state. """ ranges = self.compute_ranges(self.layout, key, ranges) for coord in self.layout.keys(full_grid=True): subplot = self.subplots.get(wrap_tuple(coord), None) if subplot is not None: subplot.update_frame(key, ranges) title = self._get_title_div(key) if title: self.handles['title']
[ "def", "update_frame", "(", "self", ",", "key", ",", "ranges", "=", "None", ")", ":", "ranges", "=", "self", ".", "compute_ranges", "(", "self", ".", "layout", ",", "key", ",", "ranges", ")", "for", "coord", "in", "self", ".", "layout", ".", "keys", "(", "full_grid", "=", "True", ")", ":", "subplot", "=", "self", ".", "subplots", ".", "get", "(", "wrap_tuple", "(", "coord", ")", ",", "None", ")", "if", "subplot", "is", "not", "None", ":", "subplot", ".", "update_frame", "(", "key", ",", "ranges", ")", "title", "=", "self", ".", "_get_title_div", "(", "key", ")", "if", "title", ":", "self", ".", "handles", "[", "'title'", "]" ]
Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state.
[ "Update", "the", "internal", "state", "of", "the", "Plot", "to", "represent", "the", "given", "key", "tuple", "(", "where", "integers", "represent", "frames", ")", ".", "Returns", "this", "state", "." ]
python
train
wummel/linkchecker
linkcheck/url.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/url.py#L275-L285
def urlunsplit (urlparts): """Same as urlparse.urlunsplit but with extra UNC path handling for Windows OS.""" res = urlparse.urlunsplit(urlparts) if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]: # UNC paths must have 4 slashes: 'file:////server/path' # Depending on the path in urlparts[2], urlparse.urlunsplit() # left only two or three slashes. This is fixed below repl = 'file://' if urlparts[2].startswith('//') else 'file:/' res = res.replace('file:', repl) return res
[ "def", "urlunsplit", "(", "urlparts", ")", ":", "res", "=", "urlparse", ".", "urlunsplit", "(", "urlparts", ")", "if", "os", ".", "name", "==", "'nt'", "and", "urlparts", "[", "0", "]", "==", "'file'", "and", "'|'", "not", "in", "urlparts", "[", "2", "]", ":", "# UNC paths must have 4 slashes: 'file:////server/path'", "# Depending on the path in urlparts[2], urlparse.urlunsplit()", "# left only two or three slashes. This is fixed below", "repl", "=", "'file://'", "if", "urlparts", "[", "2", "]", ".", "startswith", "(", "'//'", ")", "else", "'file:/'", "res", "=", "res", ".", "replace", "(", "'file:'", ",", "repl", ")", "return", "res" ]
Same as urlparse.urlunsplit but with extra UNC path handling for Windows OS.
[ "Same", "as", "urlparse", ".", "urlunsplit", "but", "with", "extra", "UNC", "path", "handling", "for", "Windows", "OS", "." ]
python
train
gem/oq-engine
openquake/hmtk/faults/mfd/anderson_luco_area_mmax.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/mfd/anderson_luco_area_mmax.py#L119-L138
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta): ''' Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) ''' delta_m = mmax - mag_value a_2 = self._get_a2_value(bbar, dbar, slip / 10., beta, mmax) return a_2 * (np.exp(bbar * delta_m) - 1.0) * (delta_m > 0.0)
[ "def", "cumulative_value", "(", "self", ",", "slip", ",", "mmax", ",", "mag_value", ",", "bbar", ",", "dbar", ",", "beta", ")", ":", "delta_m", "=", "mmax", "-", "mag_value", "a_2", "=", "self", ".", "_get_a2_value", "(", "bbar", ",", "dbar", ",", "slip", "/", "10.", ",", "beta", ",", "mmax", ")", "return", "a_2", "*", "(", "np", ".", "exp", "(", "bbar", "*", "delta_m", ")", "-", "1.0", ")", "*", "(", "delta_m", ">", "0.0", ")" ]
Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
[ "Returns", "the", "rate", "of", "events", "with", "M", ">", "mag_value" ]
python
train
geertj/pyskiplist
pyskiplist/skiplist.py
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/skiplist.py#L315-L333
def items(self, start=None, stop=None): """Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list. """ if start is None: node = self._head[2] else: self._find_lt(start) node = self._path[0][2] while node is not self._tail and (stop is None or node[0] < stop): yield (node[0], node[1]) node = node[2]
[ "def", "items", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "if", "start", "is", "None", ":", "node", "=", "self", ".", "_head", "[", "2", "]", "else", ":", "self", ".", "_find_lt", "(", "start", ")", "node", "=", "self", ".", "_path", "[", "0", "]", "[", "2", "]", "while", "node", "is", "not", "self", ".", "_tail", "and", "(", "stop", "is", "None", "or", "node", "[", "0", "]", "<", "stop", ")", ":", "yield", "(", "node", "[", "0", "]", ",", "node", "[", "1", "]", ")", "node", "=", "node", "[", "2", "]" ]
Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list.
[ "Return", "an", "iterator", "yielding", "pairs", "." ]
python
train
pandas-dev/pandas
pandas/core/panel.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L515-L541
def set_value(self, *args, **kwargs): """ Quickly set single value at (item, major, minor) location. .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar takeable : interpret the passed labels as indexers, default False Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(*args, **kwargs)
[ "def", "set_value", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"set_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_set_value", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Quickly set single value at (item, major, minor) location. .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar takeable : interpret the passed labels as indexers, default False Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object.
[ "Quickly", "set", "single", "value", "at", "(", "item", "major", "minor", ")", "location", "." ]
python
train
limpyd/redis-limpyd
limpyd/contrib/indexes.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/contrib/indexes.py#L164-L170
def check_uniqueness(self, *args): """For a unique index, check if the given args are not used twice For the parameters, seen BaseIndex.check_uniqueness """ self.get_unique_index().check_uniqueness(*self.prepare_args(args, transform=False))
[ "def", "check_uniqueness", "(", "self", ",", "*", "args", ")", ":", "self", ".", "get_unique_index", "(", ")", ".", "check_uniqueness", "(", "*", "self", ".", "prepare_args", "(", "args", ",", "transform", "=", "False", ")", ")" ]
For a unique index, check if the given args are not used twice For the parameters, seen BaseIndex.check_uniqueness
[ "For", "a", "unique", "index", "check", "if", "the", "given", "args", "are", "not", "used", "twice" ]
python
train
astroswego/plotypus
src/plotypus/lightcurve.py
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L310-L341
def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, verbosity=None, **kwargs): """get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`. """ data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols) if len(data) != 0: masked_data = numpy.ma.array(data=data, mask=None, dtype=float) return get_lightcurve(masked_data, *args, verbosity=verbosity, **kwargs) else: verbose_print("{}: file contains no data points".format(file), operation="coverage", verbosity=verbosity) return
[ "def", "get_lightcurve_from_file", "(", "file", ",", "*", "args", ",", "use_cols", "=", "None", ",", "skiprows", "=", "0", ",", "verbosity", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "numpy", ".", "loadtxt", "(", "file", ",", "skiprows", "=", "skiprows", ",", "usecols", "=", "use_cols", ")", "if", "len", "(", "data", ")", "!=", "0", ":", "masked_data", "=", "numpy", ".", "ma", ".", "array", "(", "data", "=", "data", ",", "mask", "=", "None", ",", "dtype", "=", "float", ")", "return", "get_lightcurve", "(", "masked_data", ",", "*", "args", ",", "verbosity", "=", "verbosity", ",", "*", "*", "kwargs", ")", "else", ":", "verbose_print", "(", "\"{}: file contains no data points\"", ".", "format", "(", "file", ")", ",", "operation", "=", "\"coverage\"", ",", "verbosity", "=", "verbosity", ")", "return" ]
get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`.
[ "get_lightcurve_from_file", "(", "file", "*", "args", "use_cols", "=", "None", "skiprows", "=", "0", "**", "kwargs", ")" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/attention_lm_moe.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L632-L650
def attention_lm_moe_small(): """Cheap model for single-gpu training. on lm1b_32k: ~312M params 1.6 steps/sec on [GeForce GTX TITAN X] After 50K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.31 Returns: an hparams object. """ hparams = attention_lm_moe_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.moe_num_experts = 128 hparams.moe_layers = "2" return hparams
[ "def", "attention_lm_moe_small", "(", ")", ":", "hparams", "=", "attention_lm_moe_base", "(", ")", "hparams", ".", "num_hidden_layers", "=", "4", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "moe_num_experts", "=", "128", "hparams", ".", "moe_layers", "=", "\"2\"", "return", "hparams" ]
Cheap model for single-gpu training. on lm1b_32k: ~312M params 1.6 steps/sec on [GeForce GTX TITAN X] After 50K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.31 Returns: an hparams object.
[ "Cheap", "model", "for", "single", "-", "gpu", "training", "." ]
python
train
p3trus/slave
slave/iec60488.py
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/iec60488.py#L198-L204
def _construct_register(reg, default_reg): """Constructs a register dict.""" if reg: x = dict((k, reg.get(k, d)) for k, d in default_reg.items()) else: x = dict(default_reg) return x
[ "def", "_construct_register", "(", "reg", ",", "default_reg", ")", ":", "if", "reg", ":", "x", "=", "dict", "(", "(", "k", ",", "reg", ".", "get", "(", "k", ",", "d", ")", ")", "for", "k", ",", "d", "in", "default_reg", ".", "items", "(", ")", ")", "else", ":", "x", "=", "dict", "(", "default_reg", ")", "return", "x" ]
Constructs a register dict.
[ "Constructs", "a", "register", "dict", "." ]
python
train
galaxyproject/pulsar
pulsar/cache/util.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/cache/util.py#L8-L27
def atomicish_move(source, destination, tmp_suffix="_TMP"): """Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination) """ destination_dir = os.path.dirname(destination) destination_name = os.path.basename(destination) temp_destination = os.path.join(destination_dir, "%s%s" % (destination_name, tmp_suffix)) shutil.move(source, temp_destination) os.rename(temp_destination, destination)
[ "def", "atomicish_move", "(", "source", ",", "destination", ",", "tmp_suffix", "=", "\"_TMP\"", ")", ":", "destination_dir", "=", "os", ".", "path", ".", "dirname", "(", "destination", ")", "destination_name", "=", "os", ".", "path", ".", "basename", "(", "destination", ")", "temp_destination", "=", "os", ".", "path", ".", "join", "(", "destination_dir", ",", "\"%s%s\"", "%", "(", "destination_name", ",", "tmp_suffix", ")", ")", "shutil", ".", "move", "(", "source", ",", "temp_destination", ")", "os", ".", "rename", "(", "temp_destination", ",", "destination", ")" ]
Move source to destination without risk of partial moves. > from tempfile import mkdtemp > from os.path import join, exists > temp_dir = mkdtemp() > source = join(temp_dir, "the_source") > destination = join(temp_dir, "the_dest") > open(source, "wb").write(b"Hello World!") > assert exists(source) > assert not exists(destination) > atomicish_move(source, destination) > assert not exists(source) > assert exists(destination)
[ "Move", "source", "to", "destination", "without", "risk", "of", "partial", "moves", "." ]
python
train
obriencj/python-javatools
javatools/report.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L388-L398
def _indent(stream, indent, *msgs): """ write a message to a text stream, with indentation. Also ensures that the output encoding of the messages is safe for writing. """ for x in range(0, indent): stream.write(" ") for x in msgs: # Any nicer way? In Py2 x can be 'str' or 'unicode'. stream.write(x.encode("ascii", "backslashreplace").decode("ascii")) stream.write("\n")
[ "def", "_indent", "(", "stream", ",", "indent", ",", "*", "msgs", ")", ":", "for", "x", "in", "range", "(", "0", ",", "indent", ")", ":", "stream", ".", "write", "(", "\" \"", ")", "for", "x", "in", "msgs", ":", "# Any nicer way? In Py2 x can be 'str' or 'unicode'.", "stream", ".", "write", "(", "x", ".", "encode", "(", "\"ascii\"", ",", "\"backslashreplace\"", ")", ".", "decode", "(", "\"ascii\"", ")", ")", "stream", ".", "write", "(", "\"\\n\"", ")" ]
write a message to a text stream, with indentation. Also ensures that the output encoding of the messages is safe for writing.
[ "write", "a", "message", "to", "a", "text", "stream", "with", "indentation", ".", "Also", "ensures", "that", "the", "output", "encoding", "of", "the", "messages", "is", "safe", "for", "writing", "." ]
python
train
greyli/flask-avatars
flask_avatars/identicon.py
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L110-L126
def _string_to_byte_list(self, data): """ Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest) """ bytes_length = 16 m = self.digest() m.update(str.encode(data)) hex_digest = m.hexdigest() return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length) for num in range(bytes_length))
[ "def", "_string_to_byte_list", "(", "self", ",", "data", ")", ":", "bytes_length", "=", "16", "m", "=", "self", ".", "digest", "(", ")", "m", ".", "update", "(", "str", ".", "encode", "(", "data", ")", ")", "hex_digest", "=", "m", ".", "hexdigest", "(", ")", "return", "list", "(", "int", "(", "hex_digest", "[", "num", "*", "2", ":", "num", "*", "2", "+", "2", "]", ",", "bytes_length", ")", "for", "num", "in", "range", "(", "bytes_length", ")", ")" ]
Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest)
[ "Creates", "a", "hex", "digest", "of", "the", "input", "string", "given", "to", "create", "the", "image", "if", "it", "s", "not", "already", "hexadecimal" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11609-L11620
def log_request_list_encode(self, target_system, target_component, start, end): ''' Request a list of available logs. On some systems calling this may stop on-board logging until LOG_REQUEST_END is called. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start : First log id (0 for first available) (uint16_t) end : Last log id (0xffff for last available) (uint16_t) ''' return MAVLink_log_request_list_message(target_system, target_component, start, end)
[ "def", "log_request_list_encode", "(", "self", ",", "target_system", ",", "target_component", ",", "start", ",", "end", ")", ":", "return", "MAVLink_log_request_list_message", "(", "target_system", ",", "target_component", ",", "start", ",", "end", ")" ]
Request a list of available logs. On some systems calling this may stop on-board logging until LOG_REQUEST_END is called. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start : First log id (0 for first available) (uint16_t) end : Last log id (0xffff for last available) (uint16_t)
[ "Request", "a", "list", "of", "available", "logs", ".", "On", "some", "systems", "calling", "this", "may", "stop", "on", "-", "board", "logging", "until", "LOG_REQUEST_END", "is", "called", "." ]
python
train
jacopofar/runtime_typecheck
runtime_typecheck/runtime_typecheck.py
https://github.com/jacopofar/runtime_typecheck/blob/9a5e5caff65751bfd711cfe8b4b7dd31d5ece215/runtime_typecheck/runtime_typecheck.py#L15-L91
def check_type(obj: Any, candidate_type: Any, reltype: str = 'invariant') -> bool: """Tell wether a value correspond to a type, optionally specifying the type as contravariant or covariant. Args: obj (Any): The value to check. candidate_type (Any): The type to check the object against. reltype (:obj:`str`, optional): Variance of the type, can be contravariant, covariant or invariant. By default is invariant. Returns: bool: True if the type is fine, False otherwise Raises: ValueError: When the variance or the type are not among the ones the function can manage. """ if reltype not in ['invariant', 'covariant', 'contravariant']: raise ValueError(f' Variadic type {reltype} is unknown') # builtin type like str, or a class if type(candidate_type) == type and reltype in ['invariant']: return isinstance(obj, candidate_type) if type(candidate_type) == type and reltype in ['covariant']: return issubclass(obj.__class__, candidate_type) if type(candidate_type) == type and reltype in ['contravariant']: return issubclass(candidate_type, obj.__class__) # Any accepts everything if type(candidate_type) == type(Any): return True # Union, at least one match in __args__ if type(candidate_type) == type(Union): return any(check_type(obj, t, reltype) for t in candidate_type.__args__) # Tuple, each element matches the corresponding type in __args__ if type(candidate_type) == type(Tuple) and tuple in candidate_type.__bases__: if not hasattr(obj, '__len__'): return False if len(candidate_type.__args__) != len(obj): return False return all(check_type(o, t, reltype) for (o, t) in zip(obj, candidate_type.__args__)) # Dict, each (key, value) matches the type in __args__ if type(candidate_type) == type(Dict) and dict in candidate_type.__bases__: if type(obj) != dict: return False return all(check_type(k, candidate_type.__args__[0], reltype) and check_type(v, candidate_type.__args__[1], reltype) for (k, v) in obj.items()) # List or Set, each element matches the type in __args__ if type(candidate_type) == type(List) and \ (list in candidate_type.__bases__ or set in candidate_type.__bases__): if not hasattr(obj, '__len__'): return False return all(check_type(o, candidate_type.__args__[0], reltype) for o in obj) # TypeVar, this is tricky if type(candidate_type) == TypeVar: # TODO consider contravariant, variant and bound # invariant with a list of constraints, acts like a Tuple if not candidate_type.__constraints__: return True if not (candidate_type.__covariant__ or candidate_type.__contravariant__): return any(check_type(obj, t) for t in candidate_type.__constraints__) if type(candidate_type) == type(Type): return check_type(obj, candidate_type.__args__[0], reltype='covariant') if inspect.isclass(candidate_type) and reltype in ['invariant']: return isinstance(obj, candidate_type) raise ValueError(f'Cannot check against {reltype} type {candidate_type}')
[ "def", "check_type", "(", "obj", ":", "Any", ",", "candidate_type", ":", "Any", ",", "reltype", ":", "str", "=", "'invariant'", ")", "->", "bool", ":", "if", "reltype", "not", "in", "[", "'invariant'", ",", "'covariant'", ",", "'contravariant'", "]", ":", "raise", "ValueError", "(", "f' Variadic type {reltype} is unknown'", ")", "# builtin type like str, or a class", "if", "type", "(", "candidate_type", ")", "==", "type", "and", "reltype", "in", "[", "'invariant'", "]", ":", "return", "isinstance", "(", "obj", ",", "candidate_type", ")", "if", "type", "(", "candidate_type", ")", "==", "type", "and", "reltype", "in", "[", "'covariant'", "]", ":", "return", "issubclass", "(", "obj", ".", "__class__", ",", "candidate_type", ")", "if", "type", "(", "candidate_type", ")", "==", "type", "and", "reltype", "in", "[", "'contravariant'", "]", ":", "return", "issubclass", "(", "candidate_type", ",", "obj", ".", "__class__", ")", "# Any accepts everything", "if", "type", "(", "candidate_type", ")", "==", "type", "(", "Any", ")", ":", "return", "True", "# Union, at least one match in __args__", "if", "type", "(", "candidate_type", ")", "==", "type", "(", "Union", ")", ":", "return", "any", "(", "check_type", "(", "obj", ",", "t", ",", "reltype", ")", "for", "t", "in", "candidate_type", ".", "__args__", ")", "# Tuple, each element matches the corresponding type in __args__", "if", "type", "(", "candidate_type", ")", "==", "type", "(", "Tuple", ")", "and", "tuple", "in", "candidate_type", ".", "__bases__", ":", "if", "not", "hasattr", "(", "obj", ",", "'__len__'", ")", ":", "return", "False", "if", "len", "(", "candidate_type", ".", "__args__", ")", "!=", "len", "(", "obj", ")", ":", "return", "False", "return", "all", "(", "check_type", "(", "o", ",", "t", ",", "reltype", ")", "for", "(", "o", ",", "t", ")", "in", "zip", "(", "obj", ",", "candidate_type", ".", "__args__", ")", ")", "# Dict, each (key, value) matches the type in __args__", "if", "type", "(", "candidate_type", ")", "==", "type", "(", "Dict", ")", "and", "dict", "in", "candidate_type", ".", "__bases__", ":", "if", "type", "(", "obj", ")", "!=", "dict", ":", "return", "False", "return", "all", "(", "check_type", "(", "k", ",", "candidate_type", ".", "__args__", "[", "0", "]", ",", "reltype", ")", "and", "check_type", "(", "v", ",", "candidate_type", ".", "__args__", "[", "1", "]", ",", "reltype", ")", "for", "(", "k", ",", "v", ")", "in", "obj", ".", "items", "(", ")", ")", "# List or Set, each element matches the type in __args__", "if", "type", "(", "candidate_type", ")", "==", "type", "(", "List", ")", "and", "(", "list", "in", "candidate_type", ".", "__bases__", "or", "set", "in", "candidate_type", ".", "__bases__", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'__len__'", ")", ":", "return", "False", "return", "all", "(", "check_type", "(", "o", ",", "candidate_type", ".", "__args__", "[", "0", "]", ",", "reltype", ")", "for", "o", "in", "obj", ")", "# TypeVar, this is tricky", "if", "type", "(", "candidate_type", ")", "==", "TypeVar", ":", "# TODO consider contravariant, variant and bound", "# invariant with a list of constraints, acts like a Tuple", "if", "not", "candidate_type", ".", "__constraints__", ":", "return", "True", "if", "not", "(", "candidate_type", ".", "__covariant__", "or", "candidate_type", ".", "__contravariant__", ")", ":", "return", "any", "(", "check_type", "(", "obj", ",", "t", ")", "for", "t", "in", "candidate_type", ".", "__constraints__", ")", "if", "type", "(", "candidate_type", ")", "==", "type", "(", "Type", ")", ":", "return", "check_type", "(", "obj", ",", "candidate_type", ".", "__args__", "[", "0", "]", ",", "reltype", "=", "'covariant'", ")", "if", "inspect", ".", "isclass", "(", "candidate_type", ")", "and", "reltype", "in", "[", "'invariant'", "]", ":", "return", "isinstance", "(", "obj", ",", "candidate_type", ")", "raise", "ValueError", "(", "f'Cannot check against {reltype} type {candidate_type}'", ")" ]
Tell wether a value correspond to a type, optionally specifying the type as contravariant or covariant. Args: obj (Any): The value to check. candidate_type (Any): The type to check the object against. reltype (:obj:`str`, optional): Variance of the type, can be contravariant, covariant or invariant. By default is invariant. Returns: bool: True if the type is fine, False otherwise Raises: ValueError: When the variance or the type are not among the ones the function can manage.
[ "Tell", "wether", "a", "value", "correspond", "to", "a", "type", "optionally", "specifying", "the", "type", "as", "contravariant", "or", "covariant", "." ]
python
train
pmelchior/proxmin
proxmin/algorithms.py
https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/algorithms.py#L163-L271
def sdmm(X, prox_f, step_f, proxs_g=None, steps_g=None, Ls=None, e_rel=1e-6, e_abs=0, max_iter=1000, traceback=None): """Simultaneous-Direction Method of Multipliers This method is an extension of the linearized ADMM for multiple constraints. Args: X: initial X, will be updated prox_f: proxed function f step_f: step size for prox_f proxs_g: list of proxed functions steps_g: specific value of step size for proxs_g (experts only!) If set, needs to have same format as proxs_g. By default, set to the maximum value of step_f * ||L_i||_s^2. Ls: linear operators of the argument of g_i. If set, needs to have same format as proxs_g. Matrices can be numpy.array, scipy.sparse, or None (for identity). e_rel: relative error threshold for primal and dual residuals e_abs: absolute error threshold for primal and dual residuals max_iter: maximum iteration number, irrespective of current residuals traceback: utils.Traceback to hold variable histories Returns: converged: whether the optimizer has converged within e_rel error: X^it - X^it-1 See also: algorithms.admm Reference: Moolekamp & Melchior, Algorithm 2 (arXiv:1708.09066) """ # fall-back to simple ADMM if proxs_g is None or not hasattr(proxs_g, '__iter__'): return admm(X, prox_f, step_f, prox_g=proxs_g, step_g=steps_g, L=Ls, e_rel=e_rel, max_iter=max_iter, traceback=traceback) # from here on we know that proxs_g is a list M = len(proxs_g) # if steps_g / Ls are None or single: create M duplicates if not hasattr(steps_g, "__iter__"): steps_g = [steps_g] * M if not hasattr(Ls, "__iter__"): Ls = [Ls] * M # check for cases in which a list was given assert len(steps_g) == M assert len(Ls) == M # get/check compatible step sizes for g # use matrix adapter for convenient & fast notation _L = [] for i in range(M): _L.append(utils.MatrixAdapter(Ls[i])) # get/check compatible step size for g if steps_g[i] is None: steps_g[i] = utils.get_step_g(step_f, _L[i].spectral_norm, M=M) # Initialization Z,U = utils.initZU(X, _L) it, omega = 0, 0 if traceback is not None: traceback.update_history(it, X=X, step_f=step_f, omega=omega) traceback.update_history(it, M=M, Z=Z, U=U, R=U, S=[np.zeros(X.shape, dtype=X.dtype) for n in range(M)], steps_g=steps_g) while it < max_iter: # update the variables LX, R, S = utils.update_variables(X, Z, U, prox_f, step_f, proxs_g, steps_g, _L) if traceback is not None: traceback.update_history(it+1, X=X, step_f=step_f, omega=omega) traceback.update_history(it+1, M=M, Z=Z, U=U, R=R, S=S, steps_g=steps_g) # convergence criteria, adapted from Boyd 2011, Sec 3.3.1 converged, errors = utils.check_constraint_convergence(X, _L, LX, Z, U, R, S, step_f, steps_g, e_rel, e_abs) if converged: break it += 1 # if X and primal residual does not change: decrease step_f and step_g, and restart if it > 1: if (X == X_).all() and all([(R[i] == R_[i]).all() for i in range(M)]): step_f /= 2 for i in range(M): steps_g[i] /= 2 # re-init it = 0 Z,U = utils.initZU(X, _L) if traceback is not None: traceback.reset() traceback.update_history(it, X=X, step_f=step_f) traceback.update_history(it, M=M, Z=Z, U=U, R=U, S=[np.zeros(X.shape, dtype=X.dtype) for n in range(M)], steps_g=steps_g) logger.info("Restarting with step_f = %.3f" % step_f) R_ = R X_ = X.copy() logger.info("Completed {0} iterations".format(it+1)) if not converged: logger.warning("Solution did not converge") return converged, errors
[ "def", "sdmm", "(", "X", ",", "prox_f", ",", "step_f", ",", "proxs_g", "=", "None", ",", "steps_g", "=", "None", ",", "Ls", "=", "None", ",", "e_rel", "=", "1e-6", ",", "e_abs", "=", "0", ",", "max_iter", "=", "1000", ",", "traceback", "=", "None", ")", ":", "# fall-back to simple ADMM", "if", "proxs_g", "is", "None", "or", "not", "hasattr", "(", "proxs_g", ",", "'__iter__'", ")", ":", "return", "admm", "(", "X", ",", "prox_f", ",", "step_f", ",", "prox_g", "=", "proxs_g", ",", "step_g", "=", "steps_g", ",", "L", "=", "Ls", ",", "e_rel", "=", "e_rel", ",", "max_iter", "=", "max_iter", ",", "traceback", "=", "traceback", ")", "# from here on we know that proxs_g is a list", "M", "=", "len", "(", "proxs_g", ")", "# if steps_g / Ls are None or single: create M duplicates", "if", "not", "hasattr", "(", "steps_g", ",", "\"__iter__\"", ")", ":", "steps_g", "=", "[", "steps_g", "]", "*", "M", "if", "not", "hasattr", "(", "Ls", ",", "\"__iter__\"", ")", ":", "Ls", "=", "[", "Ls", "]", "*", "M", "# check for cases in which a list was given", "assert", "len", "(", "steps_g", ")", "==", "M", "assert", "len", "(", "Ls", ")", "==", "M", "# get/check compatible step sizes for g", "# use matrix adapter for convenient & fast notation", "_L", "=", "[", "]", "for", "i", "in", "range", "(", "M", ")", ":", "_L", ".", "append", "(", "utils", ".", "MatrixAdapter", "(", "Ls", "[", "i", "]", ")", ")", "# get/check compatible step size for g", "if", "steps_g", "[", "i", "]", "is", "None", ":", "steps_g", "[", "i", "]", "=", "utils", ".", "get_step_g", "(", "step_f", ",", "_L", "[", "i", "]", ".", "spectral_norm", ",", "M", "=", "M", ")", "# Initialization", "Z", ",", "U", "=", "utils", ".", "initZU", "(", "X", ",", "_L", ")", "it", ",", "omega", "=", "0", ",", "0", "if", "traceback", "is", "not", "None", ":", "traceback", ".", "update_history", "(", "it", ",", "X", "=", "X", ",", "step_f", "=", "step_f", ",", "omega", "=", "omega", ")", "traceback", ".", "update_history", "(", "it", ",", "M", "=", "M", ",", "Z", "=", "Z", ",", "U", "=", "U", ",", "R", "=", "U", ",", "S", "=", "[", "np", ".", "zeros", "(", "X", ".", "shape", ",", "dtype", "=", "X", ".", "dtype", ")", "for", "n", "in", "range", "(", "M", ")", "]", ",", "steps_g", "=", "steps_g", ")", "while", "it", "<", "max_iter", ":", "# update the variables", "LX", ",", "R", ",", "S", "=", "utils", ".", "update_variables", "(", "X", ",", "Z", ",", "U", ",", "prox_f", ",", "step_f", ",", "proxs_g", ",", "steps_g", ",", "_L", ")", "if", "traceback", "is", "not", "None", ":", "traceback", ".", "update_history", "(", "it", "+", "1", ",", "X", "=", "X", ",", "step_f", "=", "step_f", ",", "omega", "=", "omega", ")", "traceback", ".", "update_history", "(", "it", "+", "1", ",", "M", "=", "M", ",", "Z", "=", "Z", ",", "U", "=", "U", ",", "R", "=", "R", ",", "S", "=", "S", ",", "steps_g", "=", "steps_g", ")", "# convergence criteria, adapted from Boyd 2011, Sec 3.3.1", "converged", ",", "errors", "=", "utils", ".", "check_constraint_convergence", "(", "X", ",", "_L", ",", "LX", ",", "Z", ",", "U", ",", "R", ",", "S", ",", "step_f", ",", "steps_g", ",", "e_rel", ",", "e_abs", ")", "if", "converged", ":", "break", "it", "+=", "1", "# if X and primal residual does not change: decrease step_f and step_g, and restart", "if", "it", ">", "1", ":", "if", "(", "X", "==", "X_", ")", ".", "all", "(", ")", "and", "all", "(", "[", "(", "R", "[", "i", "]", "==", "R_", "[", "i", "]", ")", ".", "all", "(", ")", "for", "i", "in", "range", "(", "M", ")", "]", ")", ":", "step_f", "/=", "2", "for", "i", "in", "range", "(", "M", ")", ":", "steps_g", "[", "i", "]", "/=", "2", "# re-init", "it", "=", "0", "Z", ",", "U", "=", "utils", ".", "initZU", "(", "X", ",", "_L", ")", "if", "traceback", "is", "not", "None", ":", "traceback", ".", "reset", "(", ")", "traceback", ".", "update_history", "(", "it", ",", "X", "=", "X", ",", "step_f", "=", "step_f", ")", "traceback", ".", "update_history", "(", "it", ",", "M", "=", "M", ",", "Z", "=", "Z", ",", "U", "=", "U", ",", "R", "=", "U", ",", "S", "=", "[", "np", ".", "zeros", "(", "X", ".", "shape", ",", "dtype", "=", "X", ".", "dtype", ")", "for", "n", "in", "range", "(", "M", ")", "]", ",", "steps_g", "=", "steps_g", ")", "logger", ".", "info", "(", "\"Restarting with step_f = %.3f\"", "%", "step_f", ")", "R_", "=", "R", "X_", "=", "X", ".", "copy", "(", ")", "logger", ".", "info", "(", "\"Completed {0} iterations\"", ".", "format", "(", "it", "+", "1", ")", ")", "if", "not", "converged", ":", "logger", ".", "warning", "(", "\"Solution did not converge\"", ")", "return", "converged", ",", "errors" ]
Simultaneous-Direction Method of Multipliers This method is an extension of the linearized ADMM for multiple constraints. Args: X: initial X, will be updated prox_f: proxed function f step_f: step size for prox_f proxs_g: list of proxed functions steps_g: specific value of step size for proxs_g (experts only!) If set, needs to have same format as proxs_g. By default, set to the maximum value of step_f * ||L_i||_s^2. Ls: linear operators of the argument of g_i. If set, needs to have same format as proxs_g. Matrices can be numpy.array, scipy.sparse, or None (for identity). e_rel: relative error threshold for primal and dual residuals e_abs: absolute error threshold for primal and dual residuals max_iter: maximum iteration number, irrespective of current residuals traceback: utils.Traceback to hold variable histories Returns: converged: whether the optimizer has converged within e_rel error: X^it - X^it-1 See also: algorithms.admm Reference: Moolekamp & Melchior, Algorithm 2 (arXiv:1708.09066)
[ "Simultaneous", "-", "Direction", "Method", "of", "Multipliers" ]
python
train
pytroll/satpy
satpy/readers/caliop_l2_cloud.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/caliop_l2_cloud.py#L101-L105
def get_lonlats(self): """Get longitude and latitude arrays from the file.""" longitudes = self.get_sds_variable('Longitude') latitudes = self.get_sds_variable('Latitude') return longitudes, latitudes
[ "def", "get_lonlats", "(", "self", ")", ":", "longitudes", "=", "self", ".", "get_sds_variable", "(", "'Longitude'", ")", "latitudes", "=", "self", ".", "get_sds_variable", "(", "'Latitude'", ")", "return", "longitudes", ",", "latitudes" ]
Get longitude and latitude arrays from the file.
[ "Get", "longitude", "and", "latitude", "arrays", "from", "the", "file", "." ]
python
train
apache/spark
python/pyspark/mllib/classification.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/classification.py#L195-L231
def predict(self, x): """ Predict values for a single data point or an RDD of points using the model trained. """ if isinstance(x, RDD): return x.map(lambda v: self.predict(v)) x = _convert_to_vector(x) if self.numClasses == 2: margin = self.weights.dot(x) + self._intercept if margin > 0: prob = 1 / (1 + exp(-margin)) else: exp_margin = exp(margin) prob = exp_margin / (1 + exp_margin) if self._threshold is None: return prob else: return 1 if prob > self._threshold else 0 else: best_class = 0 max_margin = 0.0 if x.size + 1 == self._dataWithBiasSize: for i in range(0, self._numClasses - 1): margin = x.dot(self._weightsMatrix[i][0:x.size]) + \ self._weightsMatrix[i][x.size] if margin > max_margin: max_margin = margin best_class = i + 1 else: for i in range(0, self._numClasses - 1): margin = x.dot(self._weightsMatrix[i]) if margin > max_margin: max_margin = margin best_class = i + 1 return best_class
[ "def", "predict", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "RDD", ")", ":", "return", "x", ".", "map", "(", "lambda", "v", ":", "self", ".", "predict", "(", "v", ")", ")", "x", "=", "_convert_to_vector", "(", "x", ")", "if", "self", ".", "numClasses", "==", "2", ":", "margin", "=", "self", ".", "weights", ".", "dot", "(", "x", ")", "+", "self", ".", "_intercept", "if", "margin", ">", "0", ":", "prob", "=", "1", "/", "(", "1", "+", "exp", "(", "-", "margin", ")", ")", "else", ":", "exp_margin", "=", "exp", "(", "margin", ")", "prob", "=", "exp_margin", "/", "(", "1", "+", "exp_margin", ")", "if", "self", ".", "_threshold", "is", "None", ":", "return", "prob", "else", ":", "return", "1", "if", "prob", ">", "self", ".", "_threshold", "else", "0", "else", ":", "best_class", "=", "0", "max_margin", "=", "0.0", "if", "x", ".", "size", "+", "1", "==", "self", ".", "_dataWithBiasSize", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "_numClasses", "-", "1", ")", ":", "margin", "=", "x", ".", "dot", "(", "self", ".", "_weightsMatrix", "[", "i", "]", "[", "0", ":", "x", ".", "size", "]", ")", "+", "self", ".", "_weightsMatrix", "[", "i", "]", "[", "x", ".", "size", "]", "if", "margin", ">", "max_margin", ":", "max_margin", "=", "margin", "best_class", "=", "i", "+", "1", "else", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "_numClasses", "-", "1", ")", ":", "margin", "=", "x", ".", "dot", "(", "self", ".", "_weightsMatrix", "[", "i", "]", ")", "if", "margin", ">", "max_margin", ":", "max_margin", "=", "margin", "best_class", "=", "i", "+", "1", "return", "best_class" ]
Predict values for a single data point or an RDD of points using the model trained.
[ "Predict", "values", "for", "a", "single", "data", "point", "or", "an", "RDD", "of", "points", "using", "the", "model", "trained", "." ]
python
train
jedie/DragonPy
dragonpy/core/gui.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/core/gui.py#L439-L455
def event_key_pressed(self, event): """ So a "invert shift" for user inputs: Convert all lowercase letters to uppercase and vice versa. """ char = event.char if not char: return if char in string.ascii_letters: char = invert_shift(char) self.user_input_queue.put(char) # Don't insert the char in text widget, because it will be echoed # back from the machine! return "break"
[ "def", "event_key_pressed", "(", "self", ",", "event", ")", ":", "char", "=", "event", ".", "char", "if", "not", "char", ":", "return", "if", "char", "in", "string", ".", "ascii_letters", ":", "char", "=", "invert_shift", "(", "char", ")", "self", ".", "user_input_queue", ".", "put", "(", "char", ")", "# Don't insert the char in text widget, because it will be echoed", "# back from the machine!", "return", "\"break\"" ]
So a "invert shift" for user inputs: Convert all lowercase letters to uppercase and vice versa.
[ "So", "a", "invert", "shift", "for", "user", "inputs", ":", "Convert", "all", "lowercase", "letters", "to", "uppercase", "and", "vice", "versa", "." ]
python
train
Julian/Ivoire
ivoire/transform.py
https://github.com/Julian/Ivoire/blob/5b8218cffa409ed733cf850a6fde16fafb8fc2af/ivoire/transform.py#L77-L94
def transform_describe_body(self, body, group_var): """ Transform the body of an ``ExampleGroup``. ``body`` is the body. ``group_var`` is the name bound to the example group in the context manager (usually "it"). """ for node in body: withitem, = node.items context_expr = withitem.context_expr name = context_expr.args[0].s context_var = withitem.optional_vars.id yield self.transform_example(node, name, context_var, group_var)
[ "def", "transform_describe_body", "(", "self", ",", "body", ",", "group_var", ")", ":", "for", "node", "in", "body", ":", "withitem", ",", "=", "node", ".", "items", "context_expr", "=", "withitem", ".", "context_expr", "name", "=", "context_expr", ".", "args", "[", "0", "]", ".", "s", "context_var", "=", "withitem", ".", "optional_vars", ".", "id", "yield", "self", ".", "transform_example", "(", "node", ",", "name", ",", "context_var", ",", "group_var", ")" ]
Transform the body of an ``ExampleGroup``. ``body`` is the body. ``group_var`` is the name bound to the example group in the context manager (usually "it").
[ "Transform", "the", "body", "of", "an", "ExampleGroup", "." ]
python
test
Azure/azure-sdk-for-python
azure-mgmt-containerregistry/azure/mgmt/containerregistry/container_registry_management_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-containerregistry/azure/mgmt/containerregistry/container_registry_management_client.py#L157-L176
def operations(self): """Instance depends on the API version: * 2017-03-01: :class:`Operations<azure.mgmt.containerregistry.v2017_03_01.operations.Operations>` * 2017-10-01: :class:`Operations<azure.mgmt.containerregistry.v2017_10_01.operations.Operations>` * 2018-02-01-preview: :class:`Operations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.Operations>` * 2018-09-01: :class:`Operations<azure.mgmt.containerregistry.v2018_09_01.operations.Operations>` """ api_version = self._get_api_version('operations') if api_version == '2017-03-01': from .v2017_03_01.operations import Operations as OperationClass elif api_version == '2017-10-01': from .v2017_10_01.operations import Operations as OperationClass elif api_version == '2018-02-01-preview': from .v2018_02_01_preview.operations import Operations as OperationClass elif api_version == '2018-09-01': from .v2018_09_01.operations import Operations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "operations", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'operations'", ")", "if", "api_version", "==", "'2017-03-01'", ":", "from", ".", "v2017_03_01", ".", "operations", "import", "Operations", "as", "OperationClass", "elif", "api_version", "==", "'2017-10-01'", ":", "from", ".", "v2017_10_01", ".", "operations", "import", "Operations", "as", "OperationClass", "elif", "api_version", "==", "'2018-02-01-preview'", ":", "from", ".", "v2018_02_01_preview", ".", "operations", "import", "Operations", "as", "OperationClass", "elif", "api_version", "==", "'2018-09-01'", ":", "from", ".", "v2018_09_01", ".", "operations", "import", "Operations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2017-03-01: :class:`Operations<azure.mgmt.containerregistry.v2017_03_01.operations.Operations>` * 2017-10-01: :class:`Operations<azure.mgmt.containerregistry.v2017_10_01.operations.Operations>` * 2018-02-01-preview: :class:`Operations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.Operations>` * 2018-09-01: :class:`Operations<azure.mgmt.containerregistry.v2018_09_01.operations.Operations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
test
DLR-RM/RAFCON
source/rafcon/gui/controllers/states_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/states_editor.py#L516-L543
def keep_only_sticked_and_selected_tabs(self): """Close all tabs, except the currently active one and all sticked ones""" # Only if the user didn't deactivate this behaviour if not global_gui_config.get_config_value('KEEP_ONLY_STICKY_STATES_OPEN', True): return page_id = self.view.notebook.get_current_page() # No tabs are open if page_id == -1: return page = self.view.notebook.get_nth_page(page_id) current_state_identifier = self.get_state_identifier_for_page(page) states_to_be_closed = [] # Iterate over all tabs for state_identifier, tab_info in list(self.tabs.items()): # If the tab is currently open, keep it open if current_state_identifier == state_identifier: continue # If the tab is sticky, keep it open if tab_info['is_sticky']: continue # Otherwise close it states_to_be_closed.append(state_identifier) for state_identifier in states_to_be_closed: self.close_page(state_identifier, delete=False)
[ "def", "keep_only_sticked_and_selected_tabs", "(", "self", ")", ":", "# Only if the user didn't deactivate this behaviour", "if", "not", "global_gui_config", ".", "get_config_value", "(", "'KEEP_ONLY_STICKY_STATES_OPEN'", ",", "True", ")", ":", "return", "page_id", "=", "self", ".", "view", ".", "notebook", ".", "get_current_page", "(", ")", "# No tabs are open", "if", "page_id", "==", "-", "1", ":", "return", "page", "=", "self", ".", "view", ".", "notebook", ".", "get_nth_page", "(", "page_id", ")", "current_state_identifier", "=", "self", ".", "get_state_identifier_for_page", "(", "page", ")", "states_to_be_closed", "=", "[", "]", "# Iterate over all tabs", "for", "state_identifier", ",", "tab_info", "in", "list", "(", "self", ".", "tabs", ".", "items", "(", ")", ")", ":", "# If the tab is currently open, keep it open", "if", "current_state_identifier", "==", "state_identifier", ":", "continue", "# If the tab is sticky, keep it open", "if", "tab_info", "[", "'is_sticky'", "]", ":", "continue", "# Otherwise close it", "states_to_be_closed", ".", "append", "(", "state_identifier", ")", "for", "state_identifier", "in", "states_to_be_closed", ":", "self", ".", "close_page", "(", "state_identifier", ",", "delete", "=", "False", ")" ]
Close all tabs, except the currently active one and all sticked ones
[ "Close", "all", "tabs", "except", "the", "currently", "active", "one", "and", "all", "sticked", "ones" ]
python
train
LonamiWebs/Telethon
telethon/tl/custom/sendergetter.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/sendergetter.py#L45-L60
def input_sender(self): """ This :tl:`InputPeer` is the input version of the user/channel who sent the message. Similarly to `input_chat`, this doesn't have things like username or similar, but still useful in some cases. Note that this might not be available if the library can't find the input chat, or if the message a broadcast on a channel. """ if self._input_sender is None and self._sender_id: try: self._input_sender = self._client.session\ .get_input_entity(self._sender_id) except ValueError: pass return self._input_sender
[ "def", "input_sender", "(", "self", ")", ":", "if", "self", ".", "_input_sender", "is", "None", "and", "self", ".", "_sender_id", ":", "try", ":", "self", ".", "_input_sender", "=", "self", ".", "_client", ".", "session", ".", "get_input_entity", "(", "self", ".", "_sender_id", ")", "except", "ValueError", ":", "pass", "return", "self", ".", "_input_sender" ]
This :tl:`InputPeer` is the input version of the user/channel who sent the message. Similarly to `input_chat`, this doesn't have things like username or similar, but still useful in some cases. Note that this might not be available if the library can't find the input chat, or if the message a broadcast on a channel.
[ "This", ":", "tl", ":", "InputPeer", "is", "the", "input", "version", "of", "the", "user", "/", "channel", "who", "sent", "the", "message", ".", "Similarly", "to", "input_chat", "this", "doesn", "t", "have", "things", "like", "username", "or", "similar", "but", "still", "useful", "in", "some", "cases", "." ]
python
train
ahopkins/sanic-jwt
sanic_jwt/authentication.py
https://github.com/ahopkins/sanic-jwt/blob/fca7750499c8cedde823d778512f613777fb5282/sanic_jwt/authentication.py#L179-L211
async def _get_payload(self, user): """ Given a user object, create a payload and extend it as configured. """ payload = await utils.call(self.build_payload, user) if ( not isinstance(payload, dict) or self.config.user_id() not in payload ): raise exceptions.InvalidPayload payload = await utils.call(self.add_claims, payload, user) extend_payload_args = inspect.getfullargspec(self.extend_payload) args = [payload] if "user" in extend_payload_args.args: args.append(user) payload = await utils.call(self.extend_payload, *args) if self.config.scopes_enabled(): scopes = await utils.call(self.add_scopes_to_payload, user) if not isinstance(scopes, (tuple, list)): scopes = [scopes] payload[self.config.scopes_name()] = scopes claims = self.claims + [x.get_key() for x in self._custom_claims] missing = [x for x in claims if x not in payload] if missing: logger.debug("") raise exceptions.MissingRegisteredClaim(missing=missing) return payload
[ "async", "def", "_get_payload", "(", "self", ",", "user", ")", ":", "payload", "=", "await", "utils", ".", "call", "(", "self", ".", "build_payload", ",", "user", ")", "if", "(", "not", "isinstance", "(", "payload", ",", "dict", ")", "or", "self", ".", "config", ".", "user_id", "(", ")", "not", "in", "payload", ")", ":", "raise", "exceptions", ".", "InvalidPayload", "payload", "=", "await", "utils", ".", "call", "(", "self", ".", "add_claims", ",", "payload", ",", "user", ")", "extend_payload_args", "=", "inspect", ".", "getfullargspec", "(", "self", ".", "extend_payload", ")", "args", "=", "[", "payload", "]", "if", "\"user\"", "in", "extend_payload_args", ".", "args", ":", "args", ".", "append", "(", "user", ")", "payload", "=", "await", "utils", ".", "call", "(", "self", ".", "extend_payload", ",", "*", "args", ")", "if", "self", ".", "config", ".", "scopes_enabled", "(", ")", ":", "scopes", "=", "await", "utils", ".", "call", "(", "self", ".", "add_scopes_to_payload", ",", "user", ")", "if", "not", "isinstance", "(", "scopes", ",", "(", "tuple", ",", "list", ")", ")", ":", "scopes", "=", "[", "scopes", "]", "payload", "[", "self", ".", "config", ".", "scopes_name", "(", ")", "]", "=", "scopes", "claims", "=", "self", ".", "claims", "+", "[", "x", ".", "get_key", "(", ")", "for", "x", "in", "self", ".", "_custom_claims", "]", "missing", "=", "[", "x", "for", "x", "in", "claims", "if", "x", "not", "in", "payload", "]", "if", "missing", ":", "logger", ".", "debug", "(", "\"\"", ")", "raise", "exceptions", ".", "MissingRegisteredClaim", "(", "missing", "=", "missing", ")", "return", "payload" ]
Given a user object, create a payload and extend it as configured.
[ "Given", "a", "user", "object", "create", "a", "payload", "and", "extend", "it", "as", "configured", "." ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L1297-L1307
def delete_consumer(self, consumer=None): """ Remove a specific consumer from a consumer group. :consumer: name of consumer to delete. If not provided, will be the default consumer for this stream. :returns: number of pending messages that the consumer had before being deleted. """ if consumer is None: consumer = self._consumer return self.database.xgroup_delconsumer(self.key, self.group, consumer)
[ "def", "delete_consumer", "(", "self", ",", "consumer", "=", "None", ")", ":", "if", "consumer", "is", "None", ":", "consumer", "=", "self", ".", "_consumer", "return", "self", ".", "database", ".", "xgroup_delconsumer", "(", "self", ".", "key", ",", "self", ".", "group", ",", "consumer", ")" ]
Remove a specific consumer from a consumer group. :consumer: name of consumer to delete. If not provided, will be the default consumer for this stream. :returns: number of pending messages that the consumer had before being deleted.
[ "Remove", "a", "specific", "consumer", "from", "a", "consumer", "group", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/script_editor/models.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/models.py#L1010-L1023
def get_file_language(self, file): """ Returns the language of given file. :param file: File to get language of. :type file: unicode :return: File language. :rtype: Language """ for language in self.__languages: if re.search(language.extensions, file): LOGGER.debug("> '{0}' file detected language: '{1}'.".format(file, language.name)) return language
[ "def", "get_file_language", "(", "self", ",", "file", ")", ":", "for", "language", "in", "self", ".", "__languages", ":", "if", "re", ".", "search", "(", "language", ".", "extensions", ",", "file", ")", ":", "LOGGER", ".", "debug", "(", "\"> '{0}' file detected language: '{1}'.\"", ".", "format", "(", "file", ",", "language", ".", "name", ")", ")", "return", "language" ]
Returns the language of given file. :param file: File to get language of. :type file: unicode :return: File language. :rtype: Language
[ "Returns", "the", "language", "of", "given", "file", "." ]
python
train
GPflow/GPflow
gpflow/kernels.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/kernels.py#L846-L855
def _broadcasting_elementwise_op(op, a, b): r""" Apply binary operation `op` to every pair in tensors `a` and `b`. :param op: binary operator on tensors, e.g. tf.add, tf.substract :param a: tf.Tensor, shape [n_1, ..., n_a] :param b: tf.Tensor, shape [m_1, ..., m_b] :return: tf.Tensor, shape [n_1, ..., n_a, m_1, ..., m_b] """ flatres = op(tf.reshape(a, [-1, 1]), tf.reshape(b, [1, -1])) return tf.reshape(flatres, tf.concat([tf.shape(a), tf.shape(b)], 0))
[ "def", "_broadcasting_elementwise_op", "(", "op", ",", "a", ",", "b", ")", ":", "flatres", "=", "op", "(", "tf", ".", "reshape", "(", "a", ",", "[", "-", "1", ",", "1", "]", ")", ",", "tf", ".", "reshape", "(", "b", ",", "[", "1", ",", "-", "1", "]", ")", ")", "return", "tf", ".", "reshape", "(", "flatres", ",", "tf", ".", "concat", "(", "[", "tf", ".", "shape", "(", "a", ")", ",", "tf", ".", "shape", "(", "b", ")", "]", ",", "0", ")", ")" ]
r""" Apply binary operation `op` to every pair in tensors `a` and `b`. :param op: binary operator on tensors, e.g. tf.add, tf.substract :param a: tf.Tensor, shape [n_1, ..., n_a] :param b: tf.Tensor, shape [m_1, ..., m_b] :return: tf.Tensor, shape [n_1, ..., n_a, m_1, ..., m_b]
[ "r", "Apply", "binary", "operation", "op", "to", "every", "pair", "in", "tensors", "a", "and", "b", ".", ":", "param", "op", ":", "binary", "operator", "on", "tensors", "e", ".", "g", ".", "tf", ".", "add", "tf", ".", "substract", ":", "param", "a", ":", "tf", ".", "Tensor", "shape", "[", "n_1", "...", "n_a", "]", ":", "param", "b", ":", "tf", ".", "Tensor", "shape", "[", "m_1", "...", "m_b", "]", ":", "return", ":", "tf", ".", "Tensor", "shape", "[", "n_1", "...", "n_a", "m_1", "...", "m_b", "]" ]
python
train
IBMStreams/pypi.streamsx
streamsx/rest.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest.py#L290-L296
def resource_url(self): """str: Root URL for IBM Streams REST API""" if self._iam: self._resource_url = self._resource_url or _get_iam_rest_api_url_from_creds(self.rest_client, self.credentials) else: self._resource_url = self._resource_url or _get_rest_api_url_from_creds(self.session, self.credentials) return self._resource_url
[ "def", "resource_url", "(", "self", ")", ":", "if", "self", ".", "_iam", ":", "self", ".", "_resource_url", "=", "self", ".", "_resource_url", "or", "_get_iam_rest_api_url_from_creds", "(", "self", ".", "rest_client", ",", "self", ".", "credentials", ")", "else", ":", "self", ".", "_resource_url", "=", "self", ".", "_resource_url", "or", "_get_rest_api_url_from_creds", "(", "self", ".", "session", ",", "self", ".", "credentials", ")", "return", "self", ".", "_resource_url" ]
str: Root URL for IBM Streams REST API
[ "str", ":", "Root", "URL", "for", "IBM", "Streams", "REST", "API" ]
python
train
noirbizarre/minibench
minibench/cli.py
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/cli.py#L165-L186
def cli(patterns, times, json, csv, rst, md, ref, unit, precision, debug): '''Execute minibench benchmarks''' if ref: ref = JSON.load(ref) filenames = [] reporters = [CliReporter(ref=ref, debug=debug, unit=unit, precision=precision)] kwargs = {} for pattern in patterns or ['**/*.bench.py']: filenames.extend(resolve_pattern(pattern)) if json: reporters.append(JsonReporter(json, precision=precision)) if csv: reporters.append(CsvReporter(csv, precision=precision)) if rst: reporters.append(RstReporter(rst, precision=precision)) if md: reporters.append(MarkdownReporter(md, precision=precision)) if times: kwargs['times'] = times runner = BenchmarkRunner(*filenames, reporters=reporters, debug=debug) runner.run(**kwargs)
[ "def", "cli", "(", "patterns", ",", "times", ",", "json", ",", "csv", ",", "rst", ",", "md", ",", "ref", ",", "unit", ",", "precision", ",", "debug", ")", ":", "if", "ref", ":", "ref", "=", "JSON", ".", "load", "(", "ref", ")", "filenames", "=", "[", "]", "reporters", "=", "[", "CliReporter", "(", "ref", "=", "ref", ",", "debug", "=", "debug", ",", "unit", "=", "unit", ",", "precision", "=", "precision", ")", "]", "kwargs", "=", "{", "}", "for", "pattern", "in", "patterns", "or", "[", "'**/*.bench.py'", "]", ":", "filenames", ".", "extend", "(", "resolve_pattern", "(", "pattern", ")", ")", "if", "json", ":", "reporters", ".", "append", "(", "JsonReporter", "(", "json", ",", "precision", "=", "precision", ")", ")", "if", "csv", ":", "reporters", ".", "append", "(", "CsvReporter", "(", "csv", ",", "precision", "=", "precision", ")", ")", "if", "rst", ":", "reporters", ".", "append", "(", "RstReporter", "(", "rst", ",", "precision", "=", "precision", ")", ")", "if", "md", ":", "reporters", ".", "append", "(", "MarkdownReporter", "(", "md", ",", "precision", "=", "precision", ")", ")", "if", "times", ":", "kwargs", "[", "'times'", "]", "=", "times", "runner", "=", "BenchmarkRunner", "(", "*", "filenames", ",", "reporters", "=", "reporters", ",", "debug", "=", "debug", ")", "runner", ".", "run", "(", "*", "*", "kwargs", ")" ]
Execute minibench benchmarks
[ "Execute", "minibench", "benchmarks" ]
python
train
pypa/pipenv
pipenv/vendor/vistir/contextmanagers.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/contextmanagers.py#L111-L162
def spinner( spinner_name=None, start_text=None, handler_map=None, nospin=False, write_to_stdout=True, ): """Get a spinner object or a dummy spinner to wrap a context. :param str spinner_name: A spinner type e.g. "dots" or "bouncingBar" (default: {"bouncingBar"}) :param str start_text: Text to start off the spinner with (default: {None}) :param dict handler_map: Handler map for signals to be handled gracefully (default: {None}) :param bool nospin: If true, use the dummy spinner (default: {False}) :param bool write_to_stdout: Writes to stdout if true, otherwise writes to stderr (default: True) :return: A spinner object which can be manipulated while alive :rtype: :class:`~vistir.spin.VistirSpinner` Raises: RuntimeError -- Raised if the spinner extra is not installed """ from .spin import create_spinner has_yaspin = None try: import yaspin except ImportError: has_yaspin = False if not nospin: raise RuntimeError( "Failed to import spinner! Reinstall vistir with command:" " pip install --upgrade vistir[spinner]" ) else: spinner_name = "" else: has_yaspin = True spinner_name = "" use_yaspin = (has_yaspin is False) or (nospin is True) if has_yaspin is None or has_yaspin is True and not nospin: use_yaspin = True if start_text is None and use_yaspin is True: start_text = "Running..." with create_spinner( spinner_name=spinner_name, text=start_text, handler_map=handler_map, nospin=nospin, use_yaspin=use_yaspin, write_to_stdout=write_to_stdout, ) as _spinner: yield _spinner
[ "def", "spinner", "(", "spinner_name", "=", "None", ",", "start_text", "=", "None", ",", "handler_map", "=", "None", ",", "nospin", "=", "False", ",", "write_to_stdout", "=", "True", ",", ")", ":", "from", ".", "spin", "import", "create_spinner", "has_yaspin", "=", "None", "try", ":", "import", "yaspin", "except", "ImportError", ":", "has_yaspin", "=", "False", "if", "not", "nospin", ":", "raise", "RuntimeError", "(", "\"Failed to import spinner! Reinstall vistir with command:\"", "\" pip install --upgrade vistir[spinner]\"", ")", "else", ":", "spinner_name", "=", "\"\"", "else", ":", "has_yaspin", "=", "True", "spinner_name", "=", "\"\"", "use_yaspin", "=", "(", "has_yaspin", "is", "False", ")", "or", "(", "nospin", "is", "True", ")", "if", "has_yaspin", "is", "None", "or", "has_yaspin", "is", "True", "and", "not", "nospin", ":", "use_yaspin", "=", "True", "if", "start_text", "is", "None", "and", "use_yaspin", "is", "True", ":", "start_text", "=", "\"Running...\"", "with", "create_spinner", "(", "spinner_name", "=", "spinner_name", ",", "text", "=", "start_text", ",", "handler_map", "=", "handler_map", ",", "nospin", "=", "nospin", ",", "use_yaspin", "=", "use_yaspin", ",", "write_to_stdout", "=", "write_to_stdout", ",", ")", "as", "_spinner", ":", "yield", "_spinner" ]
Get a spinner object or a dummy spinner to wrap a context. :param str spinner_name: A spinner type e.g. "dots" or "bouncingBar" (default: {"bouncingBar"}) :param str start_text: Text to start off the spinner with (default: {None}) :param dict handler_map: Handler map for signals to be handled gracefully (default: {None}) :param bool nospin: If true, use the dummy spinner (default: {False}) :param bool write_to_stdout: Writes to stdout if true, otherwise writes to stderr (default: True) :return: A spinner object which can be manipulated while alive :rtype: :class:`~vistir.spin.VistirSpinner` Raises: RuntimeError -- Raised if the spinner extra is not installed
[ "Get", "a", "spinner", "object", "or", "a", "dummy", "spinner", "to", "wrap", "a", "context", "." ]
python
train
pypa/pipenv
pipenv/vendor/pexpect/pxssh.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/pxssh.py#L449-L471
def prompt(self, timeout=-1): '''Match the next shell prompt. This is little more than a short-cut to the :meth:`~pexpect.spawn.expect` method. Note that if you called :meth:`login` with ``auto_prompt_reset=False``, then before calling :meth:`prompt` you must set the :attr:`PROMPT` attribute to a regex that it will use for matching the prompt. Calling :meth:`prompt` will erase the contents of the :attr:`before` attribute even if no prompt is ever matched. If timeout is not given or it is set to -1 then self.timeout is used. :return: True if the shell prompt was matched, False if the timeout was reached. ''' if timeout == -1: timeout = self.timeout i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout) if i==1: return False return True
[ "def", "prompt", "(", "self", ",", "timeout", "=", "-", "1", ")", ":", "if", "timeout", "==", "-", "1", ":", "timeout", "=", "self", ".", "timeout", "i", "=", "self", ".", "expect", "(", "[", "self", ".", "PROMPT", ",", "TIMEOUT", "]", ",", "timeout", "=", "timeout", ")", "if", "i", "==", "1", ":", "return", "False", "return", "True" ]
Match the next shell prompt. This is little more than a short-cut to the :meth:`~pexpect.spawn.expect` method. Note that if you called :meth:`login` with ``auto_prompt_reset=False``, then before calling :meth:`prompt` you must set the :attr:`PROMPT` attribute to a regex that it will use for matching the prompt. Calling :meth:`prompt` will erase the contents of the :attr:`before` attribute even if no prompt is ever matched. If timeout is not given or it is set to -1 then self.timeout is used. :return: True if the shell prompt was matched, False if the timeout was reached.
[ "Match", "the", "next", "shell", "prompt", "." ]
python
train
twilio/twilio-python
twilio/rest/pricing/v2/voice/number.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/pricing/v2/voice/number.py#L188-L201
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: NumberContext for this NumberInstance :rtype: twilio.rest.pricing.v2.voice.number.NumberContext """ if self._context is None: self._context = NumberContext( self._version, destination_number=self._solution['destination_number'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "NumberContext", "(", "self", ".", "_version", ",", "destination_number", "=", "self", ".", "_solution", "[", "'destination_number'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: NumberContext for this NumberInstance :rtype: twilio.rest.pricing.v2.voice.number.NumberContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
expfactory/expfactory
expfactory/server.py
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/server.py#L56-L74
def initdb(self): '''initdb will check for writability of the data folder, meaning that it is bound to the local machine. If the folder isn't bound, expfactory runs in demo mode (not saving data) ''' self.database = EXPFACTORY_DATABASE bot.info("DATABASE: %s" %self.database) # Supported database options valid = ('sqlite', 'postgres', 'mysql', 'filesystem') if not self.database.startswith(valid): bot.warning('%s is not yet a supported type, saving to filesystem.' % self.database) self.database = 'filesystem' # Add functions specific to database type self.init_db() # uses url in self.database bot.log("Data base: %s" % self.database)
[ "def", "initdb", "(", "self", ")", ":", "self", ".", "database", "=", "EXPFACTORY_DATABASE", "bot", ".", "info", "(", "\"DATABASE: %s\"", "%", "self", ".", "database", ")", "# Supported database options", "valid", "=", "(", "'sqlite'", ",", "'postgres'", ",", "'mysql'", ",", "'filesystem'", ")", "if", "not", "self", ".", "database", ".", "startswith", "(", "valid", ")", ":", "bot", ".", "warning", "(", "'%s is not yet a supported type, saving to filesystem.'", "%", "self", ".", "database", ")", "self", ".", "database", "=", "'filesystem'", "# Add functions specific to database type", "self", ".", "init_db", "(", ")", "# uses url in self.database", "bot", ".", "log", "(", "\"Data base: %s\"", "%", "self", ".", "database", ")" ]
initdb will check for writability of the data folder, meaning that it is bound to the local machine. If the folder isn't bound, expfactory runs in demo mode (not saving data)
[ "initdb", "will", "check", "for", "writability", "of", "the", "data", "folder", "meaning", "that", "it", "is", "bound", "to", "the", "local", "machine", ".", "If", "the", "folder", "isn", "t", "bound", "expfactory", "runs", "in", "demo", "mode", "(", "not", "saving", "data", ")" ]
python
train
gboeing/osmnx
osmnx/utils.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/utils.py#L361-L398
def great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=6371009): """ Vectorized function to calculate the great-circle distance between two points or between vectors of points, using haversine. Parameters ---------- lat1 : float or array of float lng1 : float or array of float lat2 : float or array of float lng2 : float or array of float earth_radius : numeric radius of earth in units in which distance will be returned (default is meters) Returns ------- distance : float or vector of floats distance or vector of distances from (lat1, lng1) to (lat2, lng2) in units of earth_radius """ phi1 = np.deg2rad(lat1) phi2 = np.deg2rad(lat2) d_phi = phi2 - phi1 theta1 = np.deg2rad(lng1) theta2 = np.deg2rad(lng2) d_theta = theta2 - theta1 h = np.sin(d_phi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(d_theta / 2) ** 2 h = np.minimum(1.0, h) # protect against floating point errors arc = 2 * np.arcsin(np.sqrt(h)) # return distance in units of earth_radius distance = arc * earth_radius return distance
[ "def", "great_circle_vec", "(", "lat1", ",", "lng1", ",", "lat2", ",", "lng2", ",", "earth_radius", "=", "6371009", ")", ":", "phi1", "=", "np", ".", "deg2rad", "(", "lat1", ")", "phi2", "=", "np", ".", "deg2rad", "(", "lat2", ")", "d_phi", "=", "phi2", "-", "phi1", "theta1", "=", "np", ".", "deg2rad", "(", "lng1", ")", "theta2", "=", "np", ".", "deg2rad", "(", "lng2", ")", "d_theta", "=", "theta2", "-", "theta1", "h", "=", "np", ".", "sin", "(", "d_phi", "/", "2", ")", "**", "2", "+", "np", ".", "cos", "(", "phi1", ")", "*", "np", ".", "cos", "(", "phi2", ")", "*", "np", ".", "sin", "(", "d_theta", "/", "2", ")", "**", "2", "h", "=", "np", ".", "minimum", "(", "1.0", ",", "h", ")", "# protect against floating point errors", "arc", "=", "2", "*", "np", ".", "arcsin", "(", "np", ".", "sqrt", "(", "h", ")", ")", "# return distance in units of earth_radius", "distance", "=", "arc", "*", "earth_radius", "return", "distance" ]
Vectorized function to calculate the great-circle distance between two points or between vectors of points, using haversine. Parameters ---------- lat1 : float or array of float lng1 : float or array of float lat2 : float or array of float lng2 : float or array of float earth_radius : numeric radius of earth in units in which distance will be returned (default is meters) Returns ------- distance : float or vector of floats distance or vector of distances from (lat1, lng1) to (lat2, lng2) in units of earth_radius
[ "Vectorized", "function", "to", "calculate", "the", "great", "-", "circle", "distance", "between", "two", "points", "or", "between", "vectors", "of", "points", "using", "haversine", "." ]
python
train
objectrocket/python-client
objectrocket/instances/mongodb.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/instances/mongodb.py#L62-L80
def get_authenticated_connection(self, user, passwd, db='admin', ssl=True): """Get an authenticated connection to this instance. :param str user: The username to use for authentication. :param str passwd: The password to use for authentication. :param str db: The name of the database to authenticate against. Defaults to ``'Admin'``. :param bool ssl: Use SSL/TLS if available for this instance. Defaults to ``True``. :raises: :py:class:`pymongo.errors.OperationFailure` if authentication fails. """ # Attempt to establish an authenticated connection. try: connection = self.get_connection(ssl=ssl) connection[db].authenticate(user, passwd) return connection # Catch exception here for logging, then just re-raise. except pymongo.errors.OperationFailure as ex: logger.exception(ex) raise
[ "def", "get_authenticated_connection", "(", "self", ",", "user", ",", "passwd", ",", "db", "=", "'admin'", ",", "ssl", "=", "True", ")", ":", "# Attempt to establish an authenticated connection.", "try", ":", "connection", "=", "self", ".", "get_connection", "(", "ssl", "=", "ssl", ")", "connection", "[", "db", "]", ".", "authenticate", "(", "user", ",", "passwd", ")", "return", "connection", "# Catch exception here for logging, then just re-raise.", "except", "pymongo", ".", "errors", ".", "OperationFailure", "as", "ex", ":", "logger", ".", "exception", "(", "ex", ")", "raise" ]
Get an authenticated connection to this instance. :param str user: The username to use for authentication. :param str passwd: The password to use for authentication. :param str db: The name of the database to authenticate against. Defaults to ``'Admin'``. :param bool ssl: Use SSL/TLS if available for this instance. Defaults to ``True``. :raises: :py:class:`pymongo.errors.OperationFailure` if authentication fails.
[ "Get", "an", "authenticated", "connection", "to", "this", "instance", "." ]
python
train
ionelmc/nose-htmloutput
src/nose_htmloutput/__init__.py
https://github.com/ionelmc/nose-htmloutput/blob/1cda401c09fcffdb30bc240fb15c31b68d7a6594/src/nose_htmloutput/__init__.py#L87-L96
def options(self, parser, env): """Sets additional command line options.""" Plugin.options(self, parser, env) parser.add_option( '--html-file', action='store', dest='html_file', metavar="FILE", default=env.get('NOSE_HTML_FILE', 'nosetests.html'), help="Path to html file to store the report in. " "Default is nosetests.html in the working directory " "[NOSE_HTML_FILE]")
[ "def", "options", "(", "self", ",", "parser", ",", "env", ")", ":", "Plugin", ".", "options", "(", "self", ",", "parser", ",", "env", ")", "parser", ".", "add_option", "(", "'--html-file'", ",", "action", "=", "'store'", ",", "dest", "=", "'html_file'", ",", "metavar", "=", "\"FILE\"", ",", "default", "=", "env", ".", "get", "(", "'NOSE_HTML_FILE'", ",", "'nosetests.html'", ")", ",", "help", "=", "\"Path to html file to store the report in. \"", "\"Default is nosetests.html in the working directory \"", "\"[NOSE_HTML_FILE]\"", ")" ]
Sets additional command line options.
[ "Sets", "additional", "command", "line", "options", "." ]
python
train
LonamiWebs/Telethon
telethon_generator/generators/docs.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_generator/generators/docs.py#L199-L206
def _copy_replace(src, dst, replacements): """Copies the src file into dst applying the replacements dict""" with src.open() as infile, dst.open('w') as outfile: outfile.write(re.sub( '|'.join(re.escape(k) for k in replacements), lambda m: str(replacements[m.group(0)]), infile.read() ))
[ "def", "_copy_replace", "(", "src", ",", "dst", ",", "replacements", ")", ":", "with", "src", ".", "open", "(", ")", "as", "infile", ",", "dst", ".", "open", "(", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "re", ".", "sub", "(", "'|'", ".", "join", "(", "re", ".", "escape", "(", "k", ")", "for", "k", "in", "replacements", ")", ",", "lambda", "m", ":", "str", "(", "replacements", "[", "m", ".", "group", "(", "0", ")", "]", ")", ",", "infile", ".", "read", "(", ")", ")", ")" ]
Copies the src file into dst applying the replacements dict
[ "Copies", "the", "src", "file", "into", "dst", "applying", "the", "replacements", "dict" ]
python
train
yandex/yandex-tank
yandextank/core/tankcore.py
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L465-L475
def mkstemp(self, suffix, prefix, directory=None): """ Generate temp file name in artifacts base dir and close temp file handle """ if not directory: directory = self.artifacts_dir fd, fname = tempfile.mkstemp(suffix, prefix, directory) os.close(fd) os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode? return fname
[ "def", "mkstemp", "(", "self", ",", "suffix", ",", "prefix", ",", "directory", "=", "None", ")", ":", "if", "not", "directory", ":", "directory", "=", "self", ".", "artifacts_dir", "fd", ",", "fname", "=", "tempfile", ".", "mkstemp", "(", "suffix", ",", "prefix", ",", "directory", ")", "os", ".", "close", "(", "fd", ")", "os", ".", "chmod", "(", "fname", ",", "0o644", ")", "# FIXME: chmod to parent dir's mode?", "return", "fname" ]
Generate temp file name in artifacts base dir and close temp file handle
[ "Generate", "temp", "file", "name", "in", "artifacts", "base", "dir", "and", "close", "temp", "file", "handle" ]
python
test
rwl/godot
godot/edge.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/edge.py#L693-L700
def _get_name(self): """ Property getter. """ if (self.tail_node is not None) and (self.head_node is not None): return "%s %s %s" % (self.tail_node.ID, self.conn, self.head_node.ID) else: return "Edge"
[ "def", "_get_name", "(", "self", ")", ":", "if", "(", "self", ".", "tail_node", "is", "not", "None", ")", "and", "(", "self", ".", "head_node", "is", "not", "None", ")", ":", "return", "\"%s %s %s\"", "%", "(", "self", ".", "tail_node", ".", "ID", ",", "self", ".", "conn", ",", "self", ".", "head_node", ".", "ID", ")", "else", ":", "return", "\"Edge\"" ]
Property getter.
[ "Property", "getter", "." ]
python
test
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/app.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L1625-L1642
def handle_url_build_error(self, error, endpoint, values): """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. """ exc_type, exc_value, tb = sys.exc_info() for handler in self.url_build_error_handlers: try: rv = handler(error, endpoint, values) if rv is not None: return rv except BuildError as error: pass # At this point we want to reraise the exception. If the error is # still the same one we can reraise it with the original traceback, # otherwise we raise it from here. if error is exc_value: reraise(exc_type, exc_value, tb) raise error
[ "def", "handle_url_build_error", "(", "self", ",", "error", ",", "endpoint", ",", "values", ")", ":", "exc_type", ",", "exc_value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "handler", "in", "self", ".", "url_build_error_handlers", ":", "try", ":", "rv", "=", "handler", "(", "error", ",", "endpoint", ",", "values", ")", "if", "rv", "is", "not", "None", ":", "return", "rv", "except", "BuildError", "as", "error", ":", "pass", "# At this point we want to reraise the exception. If the error is", "# still the same one we can reraise it with the original traceback,", "# otherwise we raise it from here.", "if", "error", "is", "exc_value", ":", "reraise", "(", "exc_type", ",", "exc_value", ",", "tb", ")", "raise", "error" ]
Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
[ "Handle", ":", "class", ":", "~werkzeug", ".", "routing", ".", "BuildError", "on", ":", "meth", ":", "url_for", "." ]
python
test
NiklasRosenstein-Python/nr-deprecated
nr/path.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/path.py#L220-L235
def addsuffix(subject, suffix, replace=False): """ Adds the specified *suffix* to the *subject*. If *replace* is True, the old suffix will be removed first. If *suffix* is callable, it must accept exactly one argument and return a modified value. """ if not suffix and not replace: return subject if replace: subject = rmvsuffix(subject) if suffix and callable(suffix): subject = suffix(subject) elif suffix: subject += suffix return subject
[ "def", "addsuffix", "(", "subject", ",", "suffix", ",", "replace", "=", "False", ")", ":", "if", "not", "suffix", "and", "not", "replace", ":", "return", "subject", "if", "replace", ":", "subject", "=", "rmvsuffix", "(", "subject", ")", "if", "suffix", "and", "callable", "(", "suffix", ")", ":", "subject", "=", "suffix", "(", "subject", ")", "elif", "suffix", ":", "subject", "+=", "suffix", "return", "subject" ]
Adds the specified *suffix* to the *subject*. If *replace* is True, the old suffix will be removed first. If *suffix* is callable, it must accept exactly one argument and return a modified value.
[ "Adds", "the", "specified", "*", "suffix", "*", "to", "the", "*", "subject", "*", ".", "If", "*", "replace", "*", "is", "True", "the", "old", "suffix", "will", "be", "removed", "first", ".", "If", "*", "suffix", "*", "is", "callable", "it", "must", "accept", "exactly", "one", "argument", "and", "return", "a", "modified", "value", "." ]
python
train
lsst-sqre/lander
lander/ltdclient.py
https://github.com/lsst-sqre/lander/blob/5e4f6123e48b451ba21963724ace0dc59798618e/lander/ltdclient.py#L35-L42
def get_keeper_token(base_url, username, password): """Get a temporary auth token from LTD Keeper.""" token_endpoint = base_url + '/token' r = requests.get(token_endpoint, auth=(username, password)) if r.status_code != 200: raise RuntimeError('Could not authenticate to {0}: error {1:d}\n{2}'. format(base_url, r.status_code, r.json())) return r.json()['token']
[ "def", "get_keeper_token", "(", "base_url", ",", "username", ",", "password", ")", ":", "token_endpoint", "=", "base_url", "+", "'/token'", "r", "=", "requests", ".", "get", "(", "token_endpoint", ",", "auth", "=", "(", "username", ",", "password", ")", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "RuntimeError", "(", "'Could not authenticate to {0}: error {1:d}\\n{2}'", ".", "format", "(", "base_url", ",", "r", ".", "status_code", ",", "r", ".", "json", "(", ")", ")", ")", "return", "r", ".", "json", "(", ")", "[", "'token'", "]" ]
Get a temporary auth token from LTD Keeper.
[ "Get", "a", "temporary", "auth", "token", "from", "LTD", "Keeper", "." ]
python
train
touilleMan/mongopatcher
mongopatcher/extensions/flask.py
https://github.com/touilleMan/mongopatcher/blob/eb54b20030afd4dc99dd9c80c7d7a345ec49a916/mongopatcher/extensions/flask.py#L10-L39
def init_patcher(app, db): """ Init mongopatcher for the application :param app: :class:`flask.Flask` app to initialize :param db: :class:`pymongo.MongoClient` to work on .. note: This function must be called before using ``patcher_manager`` """ app.config.setdefault('MONGOPATCHER_PATCHES_DIR', 'patches') app.config.setdefault('MONGOPATCHER_COLLECTION', 'mongopatcher') if not hasattr(app, 'extensions'): app.extensions = {} if 'mongopatcher' not in app.extensions: mp = MongoPatcher(db=db, patches_dir=app.config['MONGOPATCHER_PATCHES_DIR'], collection=app.config['MONGOPATCHER_COLLECTION']) app.extensions['mongopatcher'] = mp else: # Raise an exception if extension already initialized as # potentially new configuration would not be loaded. raise Exception('Extension already initialized') if 'MONGOPATCHER_DATAMODEL_VERSION' not in app.config: # Find last version from patches patches = mp.discover(app.config['MONGOPATCHER_PATCHES_DIR']) last_version = patches[-1].target_version if patches else '1.0.0' app.config.setdefault('MONGOPATCHER_DATAMODEL_VERSION', last_version) mp.__class__.need_upgrade = need_upgrade mp.app_datamodel_version = app.config['MONGOPATCHER_DATAMODEL_VERSION'] return mp
[ "def", "init_patcher", "(", "app", ",", "db", ")", ":", "app", ".", "config", ".", "setdefault", "(", "'MONGOPATCHER_PATCHES_DIR'", ",", "'patches'", ")", "app", ".", "config", ".", "setdefault", "(", "'MONGOPATCHER_COLLECTION'", ",", "'mongopatcher'", ")", "if", "not", "hasattr", "(", "app", ",", "'extensions'", ")", ":", "app", ".", "extensions", "=", "{", "}", "if", "'mongopatcher'", "not", "in", "app", ".", "extensions", ":", "mp", "=", "MongoPatcher", "(", "db", "=", "db", ",", "patches_dir", "=", "app", ".", "config", "[", "'MONGOPATCHER_PATCHES_DIR'", "]", ",", "collection", "=", "app", ".", "config", "[", "'MONGOPATCHER_COLLECTION'", "]", ")", "app", ".", "extensions", "[", "'mongopatcher'", "]", "=", "mp", "else", ":", "# Raise an exception if extension already initialized as", "# potentially new configuration would not be loaded.", "raise", "Exception", "(", "'Extension already initialized'", ")", "if", "'MONGOPATCHER_DATAMODEL_VERSION'", "not", "in", "app", ".", "config", ":", "# Find last version from patches", "patches", "=", "mp", ".", "discover", "(", "app", ".", "config", "[", "'MONGOPATCHER_PATCHES_DIR'", "]", ")", "last_version", "=", "patches", "[", "-", "1", "]", ".", "target_version", "if", "patches", "else", "'1.0.0'", "app", ".", "config", ".", "setdefault", "(", "'MONGOPATCHER_DATAMODEL_VERSION'", ",", "last_version", ")", "mp", ".", "__class__", ".", "need_upgrade", "=", "need_upgrade", "mp", ".", "app_datamodel_version", "=", "app", ".", "config", "[", "'MONGOPATCHER_DATAMODEL_VERSION'", "]", "return", "mp" ]
Init mongopatcher for the application :param app: :class:`flask.Flask` app to initialize :param db: :class:`pymongo.MongoClient` to work on .. note: This function must be called before using ``patcher_manager``
[ "Init", "mongopatcher", "for", "the", "application" ]
python
train
senaite/senaite.core
bika/lims/api/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L1134-L1147
def normalize_id(string): """Normalize the id :param string: A string to normalize :type string: str :returns: Normalized ID :rtype: str """ if not isinstance(string, basestring): fail("Type of argument must be string, found '{}'" .format(type(string))) # get the id nomalizer utility normalizer = getUtility(IIDNormalizer).normalize return normalizer(string)
[ "def", "normalize_id", "(", "string", ")", ":", "if", "not", "isinstance", "(", "string", ",", "basestring", ")", ":", "fail", "(", "\"Type of argument must be string, found '{}'\"", ".", "format", "(", "type", "(", "string", ")", ")", ")", "# get the id nomalizer utility", "normalizer", "=", "getUtility", "(", "IIDNormalizer", ")", ".", "normalize", "return", "normalizer", "(", "string", ")" ]
Normalize the id :param string: A string to normalize :type string: str :returns: Normalized ID :rtype: str
[ "Normalize", "the", "id" ]
python
train
boakley/robotframework-hub
rfhub/kwdb.py
https://github.com/boakley/robotframework-hub/blob/f3dc7562fe6218a7b8d7aac7b9ef234e1a573f7c/rfhub/kwdb.py#L230-L266
def add_installed_libraries(self, extra_libs = ["Selenium2Library", "SudsLibrary", "RequestsLibrary"]): """Add any installed libraries that we can find We do this by looking in the `libraries` folder where robot is installed. If you have libraries installed in a non-standard place, this won't pick them up. """ libdir = os.path.dirname(robot.libraries.__file__) loaded = [] for filename in os.listdir(libdir): if filename.endswith(".py") or filename.endswith(".pyc"): libname, ext = os.path.splitext(filename) if (libname.lower() not in loaded and not self._should_ignore(libname)): try: self.add(libname) loaded.append(libname.lower()) except Exception as e: # need a better way to log this... self.log.debug("unable to add library: " + str(e)) # I hate how I implemented this, but I don't think there's # any way to find out which installed python packages are # robot libraries. for library in extra_libs: if (library.lower() not in loaded and not self._should_ignore(library)): try: self.add(library) loaded.append(library.lower()) except Exception as e: self.log.debug("unable to add external library %s: %s" % \ (library, str(e)))
[ "def", "add_installed_libraries", "(", "self", ",", "extra_libs", "=", "[", "\"Selenium2Library\"", ",", "\"SudsLibrary\"", ",", "\"RequestsLibrary\"", "]", ")", ":", "libdir", "=", "os", ".", "path", ".", "dirname", "(", "robot", ".", "libraries", ".", "__file__", ")", "loaded", "=", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "libdir", ")", ":", "if", "filename", ".", "endswith", "(", "\".py\"", ")", "or", "filename", ".", "endswith", "(", "\".pyc\"", ")", ":", "libname", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "(", "libname", ".", "lower", "(", ")", "not", "in", "loaded", "and", "not", "self", ".", "_should_ignore", "(", "libname", ")", ")", ":", "try", ":", "self", ".", "add", "(", "libname", ")", "loaded", ".", "append", "(", "libname", ".", "lower", "(", ")", ")", "except", "Exception", "as", "e", ":", "# need a better way to log this...", "self", ".", "log", ".", "debug", "(", "\"unable to add library: \"", "+", "str", "(", "e", ")", ")", "# I hate how I implemented this, but I don't think there's", "# any way to find out which installed python packages are", "# robot libraries.", "for", "library", "in", "extra_libs", ":", "if", "(", "library", ".", "lower", "(", ")", "not", "in", "loaded", "and", "not", "self", ".", "_should_ignore", "(", "library", ")", ")", ":", "try", ":", "self", ".", "add", "(", "library", ")", "loaded", ".", "append", "(", "library", ".", "lower", "(", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "debug", "(", "\"unable to add external library %s: %s\"", "%", "(", "library", ",", "str", "(", "e", ")", ")", ")" ]
Add any installed libraries that we can find We do this by looking in the `libraries` folder where robot is installed. If you have libraries installed in a non-standard place, this won't pick them up.
[ "Add", "any", "installed", "libraries", "that", "we", "can", "find" ]
python
train
lablup/backend.ai-common
src/ai/backend/common/utils.py
https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/utils.py#L43-L52
def dict2kvlist(o): ''' Serializes a dict-like object into a generator of the flatten list of repeating key-value pairs. It is useful when using HMSET method in Redis. Example: >>> list(dict2kvlist({'a': 1, 'b': 2})) ['a', 1, 'b', 2] ''' return chain.from_iterable((k, v) for k, v in o.items())
[ "def", "dict2kvlist", "(", "o", ")", ":", "return", "chain", ".", "from_iterable", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "o", ".", "items", "(", ")", ")" ]
Serializes a dict-like object into a generator of the flatten list of repeating key-value pairs. It is useful when using HMSET method in Redis. Example: >>> list(dict2kvlist({'a': 1, 'b': 2})) ['a', 1, 'b', 2]
[ "Serializes", "a", "dict", "-", "like", "object", "into", "a", "generator", "of", "the", "flatten", "list", "of", "repeating", "key", "-", "value", "pairs", ".", "It", "is", "useful", "when", "using", "HMSET", "method", "in", "Redis", "." ]
python
train
PyFilesystem/pyfilesystem2
fs/copy.py
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/copy.py#L22-L47
def copy_fs( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy. """ return copy_dir( src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers )
[ "def", "copy_fs", "(", "src_fs", ",", "# type: Union[FS, Text]", "dst_fs", ",", "# type: Union[FS, Text]", "walker", "=", "None", ",", "# type: Optional[Walker]", "on_copy", "=", "None", ",", "# type: Optional[_OnCopy]", "workers", "=", "0", ",", "# type: int", ")", ":", "# type: (...) -> None", "return", "copy_dir", "(", "src_fs", ",", "\"/\"", ",", "dst_fs", ",", "\"/\"", ",", "walker", "=", "walker", ",", "on_copy", "=", "on_copy", ",", "workers", "=", "workers", ")" ]
Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy.
[ "Copy", "the", "contents", "of", "one", "filesystem", "to", "another", "." ]
python
train
apache/spark
python/pyspark/ml/feature.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L3293-L3302
def findSynonyms(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) return self._call_java("findSynonyms", word, num)
[ "def", "findSynonyms", "(", "self", ",", "word", ",", "num", ")", ":", "if", "not", "isinstance", "(", "word", ",", "basestring", ")", ":", "word", "=", "_convert_to_vector", "(", "word", ")", "return", "self", ".", "_call_java", "(", "\"findSynonyms\"", ",", "word", ",", "num", ")" ]
Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity).
[ "Find", "num", "number", "of", "words", "closest", "in", "similarity", "to", "word", ".", "word", "can", "be", "a", "string", "or", "vector", "representation", ".", "Returns", "a", "dataframe", "with", "two", "fields", "word", "and", "similarity", "(", "which", "gives", "the", "cosine", "similarity", ")", "." ]
python
train
F5Networks/f5-common-python
f5/bigip/tm/gtm/monitor.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/gtm/monitor.py#L423-L444
def update(self, **kwargs): """Change the configuration of the resource on the device. This method uses Http PUT alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * ``tmCommand`` attribute removed prior to PUT * ``agent`` attribute removed prior to PUT * ``post`` attribute removed prior to PUT :param kwargs: keys and associated values to alter on the device """ self.__dict__.pop('tmCommand', '') self.__dict__.pop('agent', '') self.__dict__.pop('method', '') super(Real_Server, self).update(**kwargs)
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "__dict__", ".", "pop", "(", "'tmCommand'", ",", "''", ")", "self", ".", "__dict__", ".", "pop", "(", "'agent'", ",", "''", ")", "self", ".", "__dict__", ".", "pop", "(", "'method'", ",", "''", ")", "super", "(", "Real_Server", ",", "self", ")", ".", "update", "(", "*", "*", "kwargs", ")" ]
Change the configuration of the resource on the device. This method uses Http PUT alter the service state on the device. The attributes of the instance will be packaged as a dictionary. That dictionary will be updated with kwargs. It is then submitted as JSON to the device. Various edge cases are handled: * read-only attributes that are unchangeable are removed * ``tmCommand`` attribute removed prior to PUT * ``agent`` attribute removed prior to PUT * ``post`` attribute removed prior to PUT :param kwargs: keys and associated values to alter on the device
[ "Change", "the", "configuration", "of", "the", "resource", "on", "the", "device", "." ]
python
train
log2timeline/plaso
plaso/parsers/olecf.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/olecf.py#L44-L103
def ParseFileObject(self, parser_mediator, file_object): """Parses an OLE Compound File (OLECF) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. """ olecf_file = pyolecf.file() olecf_file.set_ascii_codepage(parser_mediator.codepage) try: olecf_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return root_item = olecf_file.root_item if not root_item: return # Get a list of all items in the root item from the OLECF file. item_names = [item.name for item in root_item.sub_items] # Compare the list of available plugin objects. # We will try to use every plugin against the file (except # the default plugin) and run it. Only if none of the plugins # works will we use the default plugin. item_names = frozenset(item_names) try: for plugin in self._plugins: if parser_mediator.abort: break if not plugin.REQUIRED_ITEMS.issubset(item_names): continue try: plugin.UpdateChainAndProcess(parser_mediator, root_item=root_item) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse OLECF file with error: ' '{1!s}').format(plugin.NAME, exception)) if self._default_plugin and not parser_mediator.abort: try: self._default_plugin.UpdateChainAndProcess( parser_mediator, root_item=root_item) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse OLECF file with error: ' '{1!s}').format(self._default_plugin.NAME, exception)) finally: olecf_file.close()
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "olecf_file", "=", "pyolecf", ".", "file", "(", ")", "olecf_file", ".", "set_ascii_codepage", "(", "parser_mediator", ".", "codepage", ")", "try", ":", "olecf_file", ".", "open_file_object", "(", "file_object", ")", "except", "IOError", "as", "exception", ":", "parser_mediator", ".", "ProduceExtractionWarning", "(", "'unable to open file with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "return", "root_item", "=", "olecf_file", ".", "root_item", "if", "not", "root_item", ":", "return", "# Get a list of all items in the root item from the OLECF file.", "item_names", "=", "[", "item", ".", "name", "for", "item", "in", "root_item", ".", "sub_items", "]", "# Compare the list of available plugin objects.", "# We will try to use every plugin against the file (except", "# the default plugin) and run it. Only if none of the plugins", "# works will we use the default plugin.", "item_names", "=", "frozenset", "(", "item_names", ")", "try", ":", "for", "plugin", "in", "self", ".", "_plugins", ":", "if", "parser_mediator", ".", "abort", ":", "break", "if", "not", "plugin", ".", "REQUIRED_ITEMS", ".", "issubset", "(", "item_names", ")", ":", "continue", "try", ":", "plugin", ".", "UpdateChainAndProcess", "(", "parser_mediator", ",", "root_item", "=", "root_item", ")", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "parser_mediator", ".", "ProduceExtractionWarning", "(", "(", "'plugin: {0:s} unable to parse OLECF file with error: '", "'{1!s}'", ")", ".", "format", "(", "plugin", ".", "NAME", ",", "exception", ")", ")", "if", "self", ".", "_default_plugin", "and", "not", "parser_mediator", ".", "abort", ":", "try", ":", "self", ".", "_default_plugin", ".", "UpdateChainAndProcess", "(", "parser_mediator", ",", "root_item", "=", "root_item", ")", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "parser_mediator", ".", "ProduceExtractionWarning", "(", "(", "'plugin: {0:s} unable to parse OLECF file with error: '", "'{1!s}'", ")", ".", "format", "(", "self", ".", "_default_plugin", ".", "NAME", ",", "exception", ")", ")", "finally", ":", "olecf_file", ".", "close", "(", ")" ]
Parses an OLE Compound File (OLECF) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
[ "Parses", "an", "OLE", "Compound", "File", "(", "OLECF", ")", "file", "-", "like", "object", "." ]
python
train
vertexproject/synapse
synapse/datamodel.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/datamodel.py#L389-L411
def addDataModels(self, mods): ''' Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef). ''' # Load all the universal properties for _, mdef in mods: for univname, _, _ in mdef.get('univs', ()): self.addUnivName(univname) # Load all the forms for _, mdef in mods: for formname, formopts, propdefs in mdef.get('forms', ()): self.formnames.add(formname) self.propnames.add(formname) for univname in self.univnames: full = f'{formname}{univname}' self.propnames.add(full) for propname, _, _ in propdefs: full = f'{formname}:{propname}' self.propnames.add(full)
[ "def", "addDataModels", "(", "self", ",", "mods", ")", ":", "# Load all the universal properties", "for", "_", ",", "mdef", "in", "mods", ":", "for", "univname", ",", "_", ",", "_", "in", "mdef", ".", "get", "(", "'univs'", ",", "(", ")", ")", ":", "self", ".", "addUnivName", "(", "univname", ")", "# Load all the forms", "for", "_", ",", "mdef", "in", "mods", ":", "for", "formname", ",", "formopts", ",", "propdefs", "in", "mdef", ".", "get", "(", "'forms'", ",", "(", ")", ")", ":", "self", ".", "formnames", ".", "add", "(", "formname", ")", "self", ".", "propnames", ".", "add", "(", "formname", ")", "for", "univname", "in", "self", ".", "univnames", ":", "full", "=", "f'{formname}{univname}'", "self", ".", "propnames", ".", "add", "(", "full", ")", "for", "propname", ",", "_", ",", "_", "in", "propdefs", ":", "full", "=", "f'{formname}:{propname}'", "self", ".", "propnames", ".", "add", "(", "full", ")" ]
Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef).
[ "Adds", "a", "model", "definition", "(", "same", "format", "as", "input", "to", "Model", ".", "addDataModels", "and", "output", "of", "Model", ".", "getModelDef", ")", "." ]
python
train
scanny/python-pptx
lab/parse_xsd/parse_xsd.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/lab/parse_xsd/parse_xsd.py#L29-L36
def pfxdtag(tag): """ Return short-form prefixed tag from fully qualified (Clark notation) tagname. """ uri, tagroot = tag[1:].split('}') prefix = reverse_nsmap[uri] return '%s:%s' % (prefix, tagroot)
[ "def", "pfxdtag", "(", "tag", ")", ":", "uri", ",", "tagroot", "=", "tag", "[", "1", ":", "]", ".", "split", "(", "'}'", ")", "prefix", "=", "reverse_nsmap", "[", "uri", "]", "return", "'%s:%s'", "%", "(", "prefix", ",", "tagroot", ")" ]
Return short-form prefixed tag from fully qualified (Clark notation) tagname.
[ "Return", "short", "-", "form", "prefixed", "tag", "from", "fully", "qualified", "(", "Clark", "notation", ")", "tagname", "." ]
python
train
coin-or/GiMPy
src/gimpy/graph.py
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L603-L630
def edge_to_string(self, e): ''' API: edge_to_string(self, e) Description: Return string that represents edge e in dot language. Input: e: Edge tuple in (source,sink) format. Pre: Graph should have this edge. Return: String that represents given edge. ''' edge = list() edge.append(quote_if_necessary(str(e[0]))) edge.append(self.edge_connect_symbol) edge.append(quote_if_necessary(str(e[1]))) # return if there is nothing in self.edge_attr[e] if len(self.edge_attr[e]) is 0: return ''.join(edge) edge.append(' [') for a in self.edge_attr[e]: edge.append(a) edge.append('=') edge.append(quote_if_necessary(str(self.edge_attr[e][a]))) edge.append(', ') edge = edge[:-1] edge.append(']') return ''.join(edge)
[ "def", "edge_to_string", "(", "self", ",", "e", ")", ":", "edge", "=", "list", "(", ")", "edge", ".", "append", "(", "quote_if_necessary", "(", "str", "(", "e", "[", "0", "]", ")", ")", ")", "edge", ".", "append", "(", "self", ".", "edge_connect_symbol", ")", "edge", ".", "append", "(", "quote_if_necessary", "(", "str", "(", "e", "[", "1", "]", ")", ")", ")", "# return if there is nothing in self.edge_attr[e]", "if", "len", "(", "self", ".", "edge_attr", "[", "e", "]", ")", "is", "0", ":", "return", "''", ".", "join", "(", "edge", ")", "edge", ".", "append", "(", "' ['", ")", "for", "a", "in", "self", ".", "edge_attr", "[", "e", "]", ":", "edge", ".", "append", "(", "a", ")", "edge", ".", "append", "(", "'='", ")", "edge", ".", "append", "(", "quote_if_necessary", "(", "str", "(", "self", ".", "edge_attr", "[", "e", "]", "[", "a", "]", ")", ")", ")", "edge", ".", "append", "(", "', '", ")", "edge", "=", "edge", "[", ":", "-", "1", "]", "edge", ".", "append", "(", "']'", ")", "return", "''", ".", "join", "(", "edge", ")" ]
API: edge_to_string(self, e) Description: Return string that represents edge e in dot language. Input: e: Edge tuple in (source,sink) format. Pre: Graph should have this edge. Return: String that represents given edge.
[ "API", ":", "edge_to_string", "(", "self", "e", ")", "Description", ":", "Return", "string", "that", "represents", "edge", "e", "in", "dot", "language", ".", "Input", ":", "e", ":", "Edge", "tuple", "in", "(", "source", "sink", ")", "format", ".", "Pre", ":", "Graph", "should", "have", "this", "edge", ".", "Return", ":", "String", "that", "represents", "given", "edge", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/inspire_cds_package/base.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/base.py#L200-L217
def update_subject_categories(self, primary, secondary, kb): """650 Translate Categories.""" category_fields = record_get_field_instances(self.record, tag='650', ind1='1', ind2='7') record_delete_fields(self.record, "650") for field in category_fields: for idx, (key, value) in enumerate(field[0]): if key == 'a': new_value = self.get_config_item(value, kb) if new_value != value: new_subs = [('2', secondary), ('a', new_value)] else: new_subs = [('2', primary), ('a', value)] record_add_field(self.record, "650", ind1="1", ind2="7", subfields=new_subs) break
[ "def", "update_subject_categories", "(", "self", ",", "primary", ",", "secondary", ",", "kb", ")", ":", "category_fields", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "tag", "=", "'650'", ",", "ind1", "=", "'1'", ",", "ind2", "=", "'7'", ")", "record_delete_fields", "(", "self", ".", "record", ",", "\"650\"", ")", "for", "field", "in", "category_fields", ":", "for", "idx", ",", "(", "key", ",", "value", ")", "in", "enumerate", "(", "field", "[", "0", "]", ")", ":", "if", "key", "==", "'a'", ":", "new_value", "=", "self", ".", "get_config_item", "(", "value", ",", "kb", ")", "if", "new_value", "!=", "value", ":", "new_subs", "=", "[", "(", "'2'", ",", "secondary", ")", ",", "(", "'a'", ",", "new_value", ")", "]", "else", ":", "new_subs", "=", "[", "(", "'2'", ",", "primary", ")", ",", "(", "'a'", ",", "value", ")", "]", "record_add_field", "(", "self", ".", "record", ",", "\"650\"", ",", "ind1", "=", "\"1\"", ",", "ind2", "=", "\"7\"", ",", "subfields", "=", "new_subs", ")", "break" ]
650 Translate Categories.
[ "650", "Translate", "Categories", "." ]
python
valid
hyperledger/sawtooth-core
validator/sawtooth_validator/config/logs.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/config/logs.py#L36-L57
def _get_config(): """Determines if there is a log config in the config directory and returns it. If it does not exist, return None. Returns: log_config (dict): The dictionary to pass to logging.config.dictConfig """ conf_file = os.path.join(_get_config_dir(), 'log_config.toml') if os.path.exists(conf_file): with open(conf_file) as fd: raw_config = fd.read() log_config = toml.loads(raw_config) return log_config conf_file = os.path.join(_get_config_dir(), 'log_config.yaml') if os.path.exists(conf_file): with open(conf_file) as fd: raw_config = fd.read() log_config = yaml.safe_load(raw_config) return log_config return None
[ "def", "_get_config", "(", ")", ":", "conf_file", "=", "os", ".", "path", ".", "join", "(", "_get_config_dir", "(", ")", ",", "'log_config.toml'", ")", "if", "os", ".", "path", ".", "exists", "(", "conf_file", ")", ":", "with", "open", "(", "conf_file", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "log_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "return", "log_config", "conf_file", "=", "os", ".", "path", ".", "join", "(", "_get_config_dir", "(", ")", ",", "'log_config.yaml'", ")", "if", "os", ".", "path", ".", "exists", "(", "conf_file", ")", ":", "with", "open", "(", "conf_file", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "log_config", "=", "yaml", ".", "safe_load", "(", "raw_config", ")", "return", "log_config", "return", "None" ]
Determines if there is a log config in the config directory and returns it. If it does not exist, return None. Returns: log_config (dict): The dictionary to pass to logging.config.dictConfig
[ "Determines", "if", "there", "is", "a", "log", "config", "in", "the", "config", "directory", "and", "returns", "it", ".", "If", "it", "does", "not", "exist", "return", "None", "." ]
python
train
juga0/dhcpcanon
dhcpcanon/dhcpcapfsm.py
https://github.com/juga0/dhcpcanon/blob/9f51a29e57fe93dc93fb22bb0ed12fcfe9557e59/dhcpcanon/dhcpcapfsm.py#L672-L680
def on_renewing(self): """Action on renewing on RENEWING state. Not recording lease, but restarting timers. """ self.client.lease.sanitize_net_values() self.client.lease.set_times(self.time_sent_request) self.set_timers()
[ "def", "on_renewing", "(", "self", ")", ":", "self", ".", "client", ".", "lease", ".", "sanitize_net_values", "(", ")", "self", ".", "client", ".", "lease", ".", "set_times", "(", "self", ".", "time_sent_request", ")", "self", ".", "set_timers", "(", ")" ]
Action on renewing on RENEWING state. Not recording lease, but restarting timers.
[ "Action", "on", "renewing", "on", "RENEWING", "state", "." ]
python
test
ebu/PlugIt
plugit_proxy/views.py
https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L311-L343
def build_base_parameters(request): """Build the list of parameters to forward from the post and get parameters""" getParameters = {} postParameters = {} files = {} # Copy GET parameters, excluding ebuio_* for v in request.GET: if v[:6] != 'ebuio_': val = request.GET.getlist(v) if len(val) == 1: getParameters[v] = val[0] else: getParameters[v] = val # If using post, copy post parameters and files. Excluding ebuio_* if request.method == 'POST': for v in request.POST: if v[:6] != 'ebuio_': val = request.POST.getlist(v) if len(val) == 1: postParameters[v] = val[0] else: postParameters[v] = val for v in request.FILES: if v[:6] != 'ebuio_': files[v] = request.FILES[v] # .chunks() return (getParameters, postParameters, files)
[ "def", "build_base_parameters", "(", "request", ")", ":", "getParameters", "=", "{", "}", "postParameters", "=", "{", "}", "files", "=", "{", "}", "# Copy GET parameters, excluding ebuio_*", "for", "v", "in", "request", ".", "GET", ":", "if", "v", "[", ":", "6", "]", "!=", "'ebuio_'", ":", "val", "=", "request", ".", "GET", ".", "getlist", "(", "v", ")", "if", "len", "(", "val", ")", "==", "1", ":", "getParameters", "[", "v", "]", "=", "val", "[", "0", "]", "else", ":", "getParameters", "[", "v", "]", "=", "val", "# If using post, copy post parameters and files. Excluding ebuio_*", "if", "request", ".", "method", "==", "'POST'", ":", "for", "v", "in", "request", ".", "POST", ":", "if", "v", "[", ":", "6", "]", "!=", "'ebuio_'", ":", "val", "=", "request", ".", "POST", ".", "getlist", "(", "v", ")", "if", "len", "(", "val", ")", "==", "1", ":", "postParameters", "[", "v", "]", "=", "val", "[", "0", "]", "else", ":", "postParameters", "[", "v", "]", "=", "val", "for", "v", "in", "request", ".", "FILES", ":", "if", "v", "[", ":", "6", "]", "!=", "'ebuio_'", ":", "files", "[", "v", "]", "=", "request", ".", "FILES", "[", "v", "]", "# .chunks()", "return", "(", "getParameters", ",", "postParameters", ",", "files", ")" ]
Build the list of parameters to forward from the post and get parameters
[ "Build", "the", "list", "of", "parameters", "to", "forward", "from", "the", "post", "and", "get", "parameters" ]
python
train
kpdyer/regex2dfa
third_party/re2/re2/make_unicode_groups.py
https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/re2/make_unicode_groups.py#L30-L40
def MakeRanges(codes): """Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]""" ranges = [] last = -100 for c in codes: if c == last+1: ranges[-1][1] = c else: ranges.append([c, c]) last = c return ranges
[ "def", "MakeRanges", "(", "codes", ")", ":", "ranges", "=", "[", "]", "last", "=", "-", "100", "for", "c", "in", "codes", ":", "if", "c", "==", "last", "+", "1", ":", "ranges", "[", "-", "1", "]", "[", "1", "]", "=", "c", "else", ":", "ranges", ".", "append", "(", "[", "c", ",", "c", "]", ")", "last", "=", "c", "return", "ranges" ]
Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]
[ "Turn", "a", "list", "like", "[", "1", "2", "3", "7", "8", "9", "]", "into", "a", "range", "list", "[[", "1", "3", "]", "[", "7", "9", "]]" ]
python
train
dry-python/dependencies
src/dependencies/contrib/_flask.py
https://github.com/dry-python/dependencies/blob/297912cbc6482ba26b3104729645f3a2aba5facc/src/dependencies/contrib/_flask.py#L7-L12
def method_view(injector): """Create Flask method based dispatching view from injector class.""" handler = create_handler(MethodView) apply_http_methods(handler, injector) return injector.let(as_view=handler.as_view)
[ "def", "method_view", "(", "injector", ")", ":", "handler", "=", "create_handler", "(", "MethodView", ")", "apply_http_methods", "(", "handler", ",", "injector", ")", "return", "injector", ".", "let", "(", "as_view", "=", "handler", ".", "as_view", ")" ]
Create Flask method based dispatching view from injector class.
[ "Create", "Flask", "method", "based", "dispatching", "view", "from", "injector", "class", "." ]
python
test
oseledets/ttpy
tt/optimize/tt_min.py
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/optimize/tt_min.py#L147-L261
def min_tens(tens, rmax=10, nswp=10, verb=True, smooth_fun=None): """Find (approximate) minimal element in a TT-tensor.""" if smooth_fun is None: smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam)) d = tens.d Rx = [[]] * (d + 1) # Python list for the interfaces Rx[0] = np.ones((1, 1)) Rx[d] = np.ones((1, 1)) Jy = [np.empty(0, dtype=np.int)] * (d + 1) ry = rmax * np.ones(d + 1, dtype=np.int) ry[0] = 1 ry[d] = 1 n = tens.n elements_seen = 0 phi_left = [np.empty(0)] * (d + 1) phi_left[0] = np.array([1]) phi_right = [np.empty(0)] * (d + 1) phi_right[d] = np.array([1]) cores = tt.tensor.to_list(tens) # Fill initial multiindex J randomly. grid = [np.reshape(range(n[i]), (n[i], 1)) for i in xrange(d)] for i in xrange(d - 1): ry[i + 1] = min(ry[i + 1], n[i] * ry[i]) ind = sorted(np.random.permutation(ry[i] * n[i])[0:ry[i + 1]]) w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1) phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1)) phi_left[i + 1] = phi_left[i + 1][ind, :] swp = 0 dirn = -1 i = d - 1 lm = float('Inf') while swp < nswp: # Right-to-left sweep # The idea: compute the current core; compute the function of it; # Shift locally or globally? Local shift would be the first try # Compute the current core if np.size(Jy[i]) == 0: w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i]) w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]), np.ones((ry[i], 1), dtype=np.int)) if np.size(Jy[i + 1]) == 0: w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int)) J = np.hstack((w1, w2, w3)) phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1) phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1])) cry = np.tensordot( phi_left[i], np.tensordot( cores[i], phi_right[ i + 1], 1), 1) elements_seen += cry.size cry = reshape(cry, (ry[i], n[i], ry[i + 1])) min_cur = np.min(cry.flatten("F")) ind_cur = np.argmin(cry.flatten("F")) if lm > min_cur: lm = min_cur x_full = J[ind_cur, :] val = tens[x_full] if verb: print('New record:', val, 'Point:', x_full, 'elements seen:', elements_seen) cry = smooth_fun(cry, lm) if dirn < 0 and i > 0: cry = reshape(cry, (ry[i], n[i] * ry[i + 1])) cry = cry.T #q, r = np.linalg.qr(cry) u, s, v = mysvd(cry, full_matrices=False) ry[i] = min(ry[i], rmax) q = u[:, :ry[i]] ind = rect_maxvol(q)[0] # maxvol(q) ry[i] = ind.size w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]) if np.size(Jy[i + 1]) == 0: w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int) else: w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int)) Jy[i] = np.hstack((w1, w2)) Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1)) Jy[i] = Jy[i][ind, :] phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1) phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1])) phi_right[i] = phi_right[i][:, ind] if dirn > 0 and i < d - 1: cry = reshape(cry, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cry) #ind = maxvol(q) ind = rect_maxvol(q)[0] ry[i + 1] = ind.size phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1) phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1)) phi_left[i + 1] = phi_left[i + 1][ind, :] w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] i += dirn if i == d or i == -1: dirn = -dirn i += dirn swp = swp + 1 return val, x_full
[ "def", "min_tens", "(", "tens", ",", "rmax", "=", "10", ",", "nswp", "=", "10", ",", "verb", "=", "True", ",", "smooth_fun", "=", "None", ")", ":", "if", "smooth_fun", "is", "None", ":", "smooth_fun", "=", "lambda", "p", ",", "lam", ":", "(", "math", ".", "pi", "/", "2", "-", "np", ".", "arctan", "(", "p", "-", "lam", ")", ")", "d", "=", "tens", ".", "d", "Rx", "=", "[", "[", "]", "]", "*", "(", "d", "+", "1", ")", "# Python list for the interfaces", "Rx", "[", "0", "]", "=", "np", ".", "ones", "(", "(", "1", ",", "1", ")", ")", "Rx", "[", "d", "]", "=", "np", ".", "ones", "(", "(", "1", ",", "1", ")", ")", "Jy", "=", "[", "np", ".", "empty", "(", "0", ",", "dtype", "=", "np", ".", "int", ")", "]", "*", "(", "d", "+", "1", ")", "ry", "=", "rmax", "*", "np", ".", "ones", "(", "d", "+", "1", ",", "dtype", "=", "np", ".", "int", ")", "ry", "[", "0", "]", "=", "1", "ry", "[", "d", "]", "=", "1", "n", "=", "tens", ".", "n", "elements_seen", "=", "0", "phi_left", "=", "[", "np", ".", "empty", "(", "0", ")", "]", "*", "(", "d", "+", "1", ")", "phi_left", "[", "0", "]", "=", "np", ".", "array", "(", "[", "1", "]", ")", "phi_right", "=", "[", "np", ".", "empty", "(", "0", ")", "]", "*", "(", "d", "+", "1", ")", "phi_right", "[", "d", "]", "=", "np", ".", "array", "(", "[", "1", "]", ")", "cores", "=", "tt", ".", "tensor", ".", "to_list", "(", "tens", ")", "# Fill initial multiindex J randomly.", "grid", "=", "[", "np", ".", "reshape", "(", "range", "(", "n", "[", "i", "]", ")", ",", "(", "n", "[", "i", "]", ",", "1", ")", ")", "for", "i", "in", "xrange", "(", "d", ")", "]", "for", "i", "in", "xrange", "(", "d", "-", "1", ")", ":", "ry", "[", "i", "+", "1", "]", "=", "min", "(", "ry", "[", "i", "+", "1", "]", ",", "n", "[", "i", "]", "*", "ry", "[", "i", "]", ")", "ind", "=", "sorted", "(", "np", ".", "random", ".", "permutation", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ")", "[", "0", ":", "ry", "[", "i", "+", "1", "]", "]", ")", "w1", "=", "mkron", "(", "np", ".", "ones", "(", "(", "n", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ",", "Jy", "[", "i", "]", ")", "w2", "=", "mkron", "(", "grid", "[", "i", "]", ",", "np", ".", "ones", "(", "(", "ry", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "Jy", "[", "i", "+", "1", "]", "=", "np", ".", "hstack", "(", "(", "w1", ",", "w2", ")", ")", "Jy", "[", "i", "+", "1", "]", "=", "reshape", "(", "Jy", "[", "i", "+", "1", "]", ",", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ",", "-", "1", ")", ")", "Jy", "[", "i", "+", "1", "]", "=", "Jy", "[", "i", "+", "1", "]", "[", "ind", ",", ":", "]", "phi_left", "[", "i", "+", "1", "]", "=", "np", ".", "tensordot", "(", "phi_left", "[", "i", "]", ",", "cores", "[", "i", "]", ",", "1", ")", "phi_left", "[", "i", "+", "1", "]", "=", "reshape", "(", "phi_left", "[", "i", "+", "1", "]", ",", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ",", "-", "1", ")", ")", "phi_left", "[", "i", "+", "1", "]", "=", "phi_left", "[", "i", "+", "1", "]", "[", "ind", ",", ":", "]", "swp", "=", "0", "dirn", "=", "-", "1", "i", "=", "d", "-", "1", "lm", "=", "float", "(", "'Inf'", ")", "while", "swp", "<", "nswp", ":", "# Right-to-left sweep", "# The idea: compute the current core; compute the function of it;", "# Shift locally or globally? Local shift would be the first try", "# Compute the current core", "if", "np", ".", "size", "(", "Jy", "[", "i", "]", ")", "==", "0", ":", "w1", "=", "np", ".", "zeros", "(", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ",", "0", ")", ",", "dtype", "=", "np", ".", "int", ")", "else", ":", "w1", "=", "mkron", "(", "np", ".", "ones", "(", "(", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ",", "Jy", "[", "i", "]", ")", "w2", "=", "mkron", "(", "mkron", "(", "np", ".", "ones", "(", "(", "ry", "[", "i", "+", "1", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ",", "grid", "[", "i", "]", ")", ",", "np", ".", "ones", "(", "(", "ry", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "if", "np", ".", "size", "(", "Jy", "[", "i", "+", "1", "]", ")", "==", "0", ":", "w3", "=", "np", ".", "zeros", "(", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ",", "0", ")", ",", "dtype", "=", "np", ".", "int", ")", "else", ":", "w3", "=", "mkron", "(", "Jy", "[", "i", "+", "1", "]", ",", "np", ".", "ones", "(", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "J", "=", "np", ".", "hstack", "(", "(", "w1", ",", "w2", ",", "w3", ")", ")", "phi_right", "[", "i", "]", "=", "np", ".", "tensordot", "(", "cores", "[", "i", "]", ",", "phi_right", "[", "i", "+", "1", "]", ",", "1", ")", "phi_right", "[", "i", "]", "=", "reshape", "(", "phi_right", "[", "i", "]", ",", "(", "-", "1", ",", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ")", ")", "cry", "=", "np", ".", "tensordot", "(", "phi_left", "[", "i", "]", ",", "np", ".", "tensordot", "(", "cores", "[", "i", "]", ",", "phi_right", "[", "i", "+", "1", "]", ",", "1", ")", ",", "1", ")", "elements_seen", "+=", "cry", ".", "size", "cry", "=", "reshape", "(", "cry", ",", "(", "ry", "[", "i", "]", ",", "n", "[", "i", "]", ",", "ry", "[", "i", "+", "1", "]", ")", ")", "min_cur", "=", "np", ".", "min", "(", "cry", ".", "flatten", "(", "\"F\"", ")", ")", "ind_cur", "=", "np", ".", "argmin", "(", "cry", ".", "flatten", "(", "\"F\"", ")", ")", "if", "lm", ">", "min_cur", ":", "lm", "=", "min_cur", "x_full", "=", "J", "[", "ind_cur", ",", ":", "]", "val", "=", "tens", "[", "x_full", "]", "if", "verb", ":", "print", "(", "'New record:'", ",", "val", ",", "'Point:'", ",", "x_full", ",", "'elements seen:'", ",", "elements_seen", ")", "cry", "=", "smooth_fun", "(", "cry", ",", "lm", ")", "if", "dirn", "<", "0", "and", "i", ">", "0", ":", "cry", "=", "reshape", "(", "cry", ",", "(", "ry", "[", "i", "]", ",", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ")", ")", "cry", "=", "cry", ".", "T", "#q, r = np.linalg.qr(cry)", "u", ",", "s", ",", "v", "=", "mysvd", "(", "cry", ",", "full_matrices", "=", "False", ")", "ry", "[", "i", "]", "=", "min", "(", "ry", "[", "i", "]", ",", "rmax", ")", "q", "=", "u", "[", ":", ",", ":", "ry", "[", "i", "]", "]", "ind", "=", "rect_maxvol", "(", "q", ")", "[", "0", "]", "# maxvol(q)", "ry", "[", "i", "]", "=", "ind", ".", "size", "w1", "=", "mkron", "(", "np", ".", "ones", "(", "(", "ry", "[", "i", "+", "1", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ",", "grid", "[", "i", "]", ")", "if", "np", ".", "size", "(", "Jy", "[", "i", "+", "1", "]", ")", "==", "0", ":", "w2", "=", "np", ".", "zeros", "(", "(", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ",", "0", ")", ",", "dtype", "=", "np", ".", "int", ")", "else", ":", "w2", "=", "mkron", "(", "Jy", "[", "i", "+", "1", "]", ",", "np", ".", "ones", "(", "(", "n", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "Jy", "[", "i", "]", "=", "np", ".", "hstack", "(", "(", "w1", ",", "w2", ")", ")", "Jy", "[", "i", "]", "=", "reshape", "(", "Jy", "[", "i", "]", ",", "(", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ",", "-", "1", ")", ")", "Jy", "[", "i", "]", "=", "Jy", "[", "i", "]", "[", "ind", ",", ":", "]", "phi_right", "[", "i", "]", "=", "np", ".", "tensordot", "(", "cores", "[", "i", "]", ",", "phi_right", "[", "i", "+", "1", "]", ",", "1", ")", "phi_right", "[", "i", "]", "=", "reshape", "(", "phi_right", "[", "i", "]", ",", "(", "-", "1", ",", "n", "[", "i", "]", "*", "ry", "[", "i", "+", "1", "]", ")", ")", "phi_right", "[", "i", "]", "=", "phi_right", "[", "i", "]", "[", ":", ",", "ind", "]", "if", "dirn", ">", "0", "and", "i", "<", "d", "-", "1", ":", "cry", "=", "reshape", "(", "cry", ",", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ",", "ry", "[", "i", "+", "1", "]", ")", ")", "q", ",", "r", "=", "np", ".", "linalg", ".", "qr", "(", "cry", ")", "#ind = maxvol(q)", "ind", "=", "rect_maxvol", "(", "q", ")", "[", "0", "]", "ry", "[", "i", "+", "1", "]", "=", "ind", ".", "size", "phi_left", "[", "i", "+", "1", "]", "=", "np", ".", "tensordot", "(", "phi_left", "[", "i", "]", ",", "cores", "[", "i", "]", ",", "1", ")", "phi_left", "[", "i", "+", "1", "]", "=", "reshape", "(", "phi_left", "[", "i", "+", "1", "]", ",", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ",", "-", "1", ")", ")", "phi_left", "[", "i", "+", "1", "]", "=", "phi_left", "[", "i", "+", "1", "]", "[", "ind", ",", ":", "]", "w1", "=", "mkron", "(", "np", ".", "ones", "(", "(", "n", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ",", "Jy", "[", "i", "]", ")", "w2", "=", "mkron", "(", "grid", "[", "i", "]", ",", "np", ".", "ones", "(", "(", "ry", "[", "i", "]", ",", "1", ")", ",", "dtype", "=", "np", ".", "int", ")", ")", "Jy", "[", "i", "+", "1", "]", "=", "np", ".", "hstack", "(", "(", "w1", ",", "w2", ")", ")", "Jy", "[", "i", "+", "1", "]", "=", "reshape", "(", "Jy", "[", "i", "+", "1", "]", ",", "(", "ry", "[", "i", "]", "*", "n", "[", "i", "]", ",", "-", "1", ")", ")", "Jy", "[", "i", "+", "1", "]", "=", "Jy", "[", "i", "+", "1", "]", "[", "ind", ",", ":", "]", "i", "+=", "dirn", "if", "i", "==", "d", "or", "i", "==", "-", "1", ":", "dirn", "=", "-", "dirn", "i", "+=", "dirn", "swp", "=", "swp", "+", "1", "return", "val", ",", "x_full" ]
Find (approximate) minimal element in a TT-tensor.
[ "Find", "(", "approximate", ")", "minimal", "element", "in", "a", "TT", "-", "tensor", "." ]
python
train
mrtazz/simplenote.py
simplenote/simplenote.py
https://github.com/mrtazz/simplenote.py/blob/19a8c6e5de8db2e5ff9f0a82e86b8a6fb35eac82/simplenote/simplenote.py#L224-L319
def get_note_list(self, data=True, since=None, tags=[]): """ Method to get the note list The method can be passed optional arguments to limit the list to notes containing a certain tag, or only updated since a certain Simperium cursor. If omitted a list of all notes is returned. By default data objects are returned. If data is set to false only keys/ids and versions are returned. An empty data object is inserted for compatibility. Arguments: - tags=[] list of tags as string: return notes that have at least one of these tags - since=cursor Simperium cursor as string: return only changes since this cursor - data=True If false only return keys/ids and versions Returns: A tuple `(notes, status)` - notes (list): A list of note objects with all properties set except `content`. - status (int): 0 on success and -1 otherwise """ # initialize data status = 0 ret = [] response_notes = {} notes = { "index" : [] } # get the note index params = '/index?limit=%s' % (str(NOTE_FETCH_LENGTH)) if since is not None: params += '&since=%s' % (since) # Fetching data is the default if data: params += '&data=true' # perform initial HTTP request request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) # re-write for v1 consistency note_objects = [] for n in response_notes["index"]: # If data=False then can't do this bit... or not all of it, just have id and version. Add empty data object. if not data: n['d'] = {} note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes["index"].extend(note_objects) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 # get additional notes if bookmark was set in response while "mark" in response_notes: params += '&mark=%s' % response_notes["mark"] # perform the actual HTTP request request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) # re-write for v1 consistency note_objects = [] for n in response_notes["index"]: if not data: n['d'] = {} note_object = n['d'] note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes["index"].extend(note_objects) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note_list = notes["index"] self.current = response_notes["current"] # Can only filter for tags at end, once all notes have been retrieved. if (len(tags) > 0): note_list = [n for n in note_list if (len(set(n["tags"]).intersection(tags)) > 0)] return note_list, status
[ "def", "get_note_list", "(", "self", ",", "data", "=", "True", ",", "since", "=", "None", ",", "tags", "=", "[", "]", ")", ":", "# initialize data", "status", "=", "0", "ret", "=", "[", "]", "response_notes", "=", "{", "}", "notes", "=", "{", "\"index\"", ":", "[", "]", "}", "# get the note index", "params", "=", "'/index?limit=%s'", "%", "(", "str", "(", "NOTE_FETCH_LENGTH", ")", ")", "if", "since", "is", "not", "None", ":", "params", "+=", "'&since=%s'", "%", "(", "since", ")", "# Fetching data is the default", "if", "data", ":", "params", "+=", "'&data=true'", "# perform initial HTTP request", "request", "=", "Request", "(", "DATA_URL", "+", "params", ")", "request", ".", "add_header", "(", "self", ".", "header", ",", "self", ".", "get_token", "(", ")", ")", "try", ":", "response", "=", "urllib2", ".", "urlopen", "(", "request", ")", "response_notes", "=", "json", ".", "loads", "(", "response", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "# re-write for v1 consistency", "note_objects", "=", "[", "]", "for", "n", "in", "response_notes", "[", "\"index\"", "]", ":", "# If data=False then can't do this bit... or not all of it, just have id and version. Add empty data object.", "if", "not", "data", ":", "n", "[", "'d'", "]", "=", "{", "}", "note_object", "=", "self", ".", "__add_simplenote_api_fields", "(", "n", "[", "'d'", "]", ",", "n", "[", "'id'", "]", ",", "n", "[", "'v'", "]", ")", "note_objects", ".", "append", "(", "note_object", ")", "notes", "[", "\"index\"", "]", ".", "extend", "(", "note_objects", ")", "except", "HTTPError", "as", "e", ":", "if", "e", ".", "code", "==", "401", ":", "raise", "SimplenoteLoginFailed", "(", "'Login to Simplenote API failed! Check Token.'", ")", "else", ":", "return", "e", ",", "-", "1", "except", "IOError", "as", "e", ":", "return", "e", ",", "-", "1", "# get additional notes if bookmark was set in response", "while", "\"mark\"", "in", "response_notes", ":", "params", "+=", "'&mark=%s'", "%", "response_notes", "[", "\"mark\"", "]", "# perform the actual HTTP request", "request", "=", "Request", "(", "DATA_URL", "+", "params", ")", "request", ".", "add_header", "(", "self", ".", "header", ",", "self", ".", "get_token", "(", ")", ")", "try", ":", "response", "=", "urllib2", ".", "urlopen", "(", "request", ")", "response_notes", "=", "json", ".", "loads", "(", "response", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "# re-write for v1 consistency", "note_objects", "=", "[", "]", "for", "n", "in", "response_notes", "[", "\"index\"", "]", ":", "if", "not", "data", ":", "n", "[", "'d'", "]", "=", "{", "}", "note_object", "=", "n", "[", "'d'", "]", "note_object", "=", "self", ".", "__add_simplenote_api_fields", "(", "n", "[", "'d'", "]", ",", "n", "[", "'id'", "]", ",", "n", "[", "'v'", "]", ")", "note_objects", ".", "append", "(", "note_object", ")", "notes", "[", "\"index\"", "]", ".", "extend", "(", "note_objects", ")", "except", "HTTPError", "as", "e", ":", "if", "e", ".", "code", "==", "401", ":", "raise", "SimplenoteLoginFailed", "(", "'Login to Simplenote API failed! Check Token.'", ")", "else", ":", "return", "e", ",", "-", "1", "except", "IOError", "as", "e", ":", "return", "e", ",", "-", "1", "note_list", "=", "notes", "[", "\"index\"", "]", "self", ".", "current", "=", "response_notes", "[", "\"current\"", "]", "# Can only filter for tags at end, once all notes have been retrieved.", "if", "(", "len", "(", "tags", ")", ">", "0", ")", ":", "note_list", "=", "[", "n", "for", "n", "in", "note_list", "if", "(", "len", "(", "set", "(", "n", "[", "\"tags\"", "]", ")", ".", "intersection", "(", "tags", ")", ")", ">", "0", ")", "]", "return", "note_list", ",", "status" ]
Method to get the note list The method can be passed optional arguments to limit the list to notes containing a certain tag, or only updated since a certain Simperium cursor. If omitted a list of all notes is returned. By default data objects are returned. If data is set to false only keys/ids and versions are returned. An empty data object is inserted for compatibility. Arguments: - tags=[] list of tags as string: return notes that have at least one of these tags - since=cursor Simperium cursor as string: return only changes since this cursor - data=True If false only return keys/ids and versions Returns: A tuple `(notes, status)` - notes (list): A list of note objects with all properties set except `content`. - status (int): 0 on success and -1 otherwise
[ "Method", "to", "get", "the", "note", "list" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L10441-L10463
def pull_guest_properties(self): """Get the list of the guest properties matching a set of patterns along with their values, timestamps and flags and give responsibility for managing properties to the console. out names of type str The names of the properties returned. out values of type str The values of the properties returned. The array entries match the corresponding entries in the @a name array. out timestamps of type int The timestamps of the properties returned. The array entries match the corresponding entries in the @a name array. out flags of type str The flags of the properties returned. The array entries match the corresponding entries in the @a name array. """ (names, values, timestamps, flags) = self._call("pullGuestProperties") return (names, values, timestamps, flags)
[ "def", "pull_guest_properties", "(", "self", ")", ":", "(", "names", ",", "values", ",", "timestamps", ",", "flags", ")", "=", "self", ".", "_call", "(", "\"pullGuestProperties\"", ")", "return", "(", "names", ",", "values", ",", "timestamps", ",", "flags", ")" ]
Get the list of the guest properties matching a set of patterns along with their values, timestamps and flags and give responsibility for managing properties to the console. out names of type str The names of the properties returned. out values of type str The values of the properties returned. The array entries match the corresponding entries in the @a name array. out timestamps of type int The timestamps of the properties returned. The array entries match the corresponding entries in the @a name array. out flags of type str The flags of the properties returned. The array entries match the corresponding entries in the @a name array.
[ "Get", "the", "list", "of", "the", "guest", "properties", "matching", "a", "set", "of", "patterns", "along", "with", "their", "values", "timestamps", "and", "flags", "and", "give", "responsibility", "for", "managing", "properties", "to", "the", "console", "." ]
python
train
tBaxter/tango-shared-core
build/lib/tango_shared/models.py
https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/models.py#L151-L157
def save(self, *args, **kwargs): """ Clean text and save formatted version. """ self.text = clean_text(self.text) self.text_formatted = format_text(self.text) super(BaseUserContentModel, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "text", "=", "clean_text", "(", "self", ".", "text", ")", "self", ".", "text_formatted", "=", "format_text", "(", "self", ".", "text", ")", "super", "(", "BaseUserContentModel", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Clean text and save formatted version.
[ "Clean", "text", "and", "save", "formatted", "version", "." ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1246-L1263
def change_custom_svc_var(self, service, varname, varvalue): """Change custom service variable Format of the line that triggers function call:: CHANGE_CUSTOM_SVC_VAR;<host_name>;<service_description>;<varname>;<varvalue> :param service: service to edit :type service: alignak.objects.service.Service :param varname: variable name to change :type varvalue: str :param varvalue: variable new value :type varname: str :return: None """ if varname.upper() in service.customs: service.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value service.customs[varname.upper()] = varvalue self.send_an_element(service.get_update_status_brok())
[ "def", "change_custom_svc_var", "(", "self", ",", "service", ",", "varname", ",", "varvalue", ")", ":", "if", "varname", ".", "upper", "(", ")", "in", "service", ".", "customs", ":", "service", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_CUSTOM_VARIABLE\"", "]", ".", "value", "service", ".", "customs", "[", "varname", ".", "upper", "(", ")", "]", "=", "varvalue", "self", ".", "send_an_element", "(", "service", ".", "get_update_status_brok", "(", ")", ")" ]
Change custom service variable Format of the line that triggers function call:: CHANGE_CUSTOM_SVC_VAR;<host_name>;<service_description>;<varname>;<varvalue> :param service: service to edit :type service: alignak.objects.service.Service :param varname: variable name to change :type varvalue: str :param varvalue: variable new value :type varname: str :return: None
[ "Change", "custom", "service", "variable", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
miguelgrinberg/python-engineio
engineio/server.py
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/server.py#L513-L522
def _get_socket(self, sid): """Return the socket object for a given session.""" try: s = self.sockets[sid] except KeyError: raise KeyError('Session not found') if s.closed: del self.sockets[sid] raise KeyError('Session is disconnected') return s
[ "def", "_get_socket", "(", "self", ",", "sid", ")", ":", "try", ":", "s", "=", "self", ".", "sockets", "[", "sid", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "'Session not found'", ")", "if", "s", ".", "closed", ":", "del", "self", ".", "sockets", "[", "sid", "]", "raise", "KeyError", "(", "'Session is disconnected'", ")", "return", "s" ]
Return the socket object for a given session.
[ "Return", "the", "socket", "object", "for", "a", "given", "session", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/metis_tuner/Regression_GP/OutlierDetection.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/metis_tuner/Regression_GP/OutlierDetection.py#L32-L53
def _outlierDetection_threaded(inputs): ''' Detect the outlier ''' [samples_idx, samples_x, samples_y_aggregation] = inputs sys.stderr.write("[%s] DEBUG: Evaluating %dth of %d samples\n"\ % (os.path.basename(__file__), samples_idx + 1, len(samples_x))) outlier = None # Create a diagnostic regression model which removes the sample that we want to evaluate diagnostic_regressor_gp = gp_create_model.create_model(\ samples_x[0:samples_idx] + samples_x[samples_idx + 1:],\ samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:]) mu, sigma = gp_prediction.predict(samples_x[samples_idx], diagnostic_regressor_gp['model']) # 2.33 is the z-score for 98% confidence level if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma): outlier = {"samples_idx": samples_idx, "expected_mu": mu, "expected_sigma": sigma, "difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)} return outlier
[ "def", "_outlierDetection_threaded", "(", "inputs", ")", ":", "[", "samples_idx", ",", "samples_x", ",", "samples_y_aggregation", "]", "=", "inputs", "sys", ".", "stderr", ".", "write", "(", "\"[%s] DEBUG: Evaluating %dth of %d samples\\n\"", "%", "(", "os", ".", "path", ".", "basename", "(", "__file__", ")", ",", "samples_idx", "+", "1", ",", "len", "(", "samples_x", ")", ")", ")", "outlier", "=", "None", "# Create a diagnostic regression model which removes the sample that we want to evaluate", "diagnostic_regressor_gp", "=", "gp_create_model", ".", "create_model", "(", "samples_x", "[", "0", ":", "samples_idx", "]", "+", "samples_x", "[", "samples_idx", "+", "1", ":", "]", ",", "samples_y_aggregation", "[", "0", ":", "samples_idx", "]", "+", "samples_y_aggregation", "[", "samples_idx", "+", "1", ":", "]", ")", "mu", ",", "sigma", "=", "gp_prediction", ".", "predict", "(", "samples_x", "[", "samples_idx", "]", ",", "diagnostic_regressor_gp", "[", "'model'", "]", ")", "# 2.33 is the z-score for 98% confidence level", "if", "abs", "(", "samples_y_aggregation", "[", "samples_idx", "]", "-", "mu", ")", ">", "(", "2.33", "*", "sigma", ")", ":", "outlier", "=", "{", "\"samples_idx\"", ":", "samples_idx", ",", "\"expected_mu\"", ":", "mu", ",", "\"expected_sigma\"", ":", "sigma", ",", "\"difference\"", ":", "abs", "(", "samples_y_aggregation", "[", "samples_idx", "]", "-", "mu", ")", "-", "(", "2.33", "*", "sigma", ")", "}", "return", "outlier" ]
Detect the outlier
[ "Detect", "the", "outlier" ]
python
train
carljm/django-adminfiles
adminfiles/views.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/views.py#L43-L51
def link_text(cls): """ Return link text for this view. """ link = cls.__name__ if link.endswith('View'): link = link[:-4] return link
[ "def", "link_text", "(", "cls", ")", ":", "link", "=", "cls", ".", "__name__", "if", "link", ".", "endswith", "(", "'View'", ")", ":", "link", "=", "link", "[", ":", "-", "4", "]", "return", "link" ]
Return link text for this view.
[ "Return", "link", "text", "for", "this", "view", "." ]
python
train
FlaskGuys/Flask-Imagine
flask_imagine/core.py
https://github.com/FlaskGuys/Flask-Imagine/blob/f79c6517ecb5480b63a2b3b8554edb6e2ac8be8c/flask_imagine/core.py#L199-L232
def add_filter_set(self, filter_name, filter_set, cached=True): """ Manual addition of filter set :param filter_name: str :param filter_set: list :param cached: bool """ try: hash(filter_name) except TypeError as err: raise ValueError('Filter set name must be as instance of hashable type: %s' % str(err)) if not isinstance(filter_set, list): raise ValueError('Filters must be a list.') if len(filter_set) == 0: raise ValueError('Filters count must be greater than 0.') for filter_instance in filter_set: if not isinstance(filter_instance, ImagineFilterInterface): raise ValueError('All filters must implement of ImagineFilterInterface.') if not isinstance(cached, bool): raise ValueError('Cached parameter must be a bool.') filter_config = { 'filters': filter_set, 'cached': cached } if filter_name not in self._filter_sets: self._filter_sets.update({filter_name: filter_config}) else: raise ValueError('Duplicate filter set name.')
[ "def", "add_filter_set", "(", "self", ",", "filter_name", ",", "filter_set", ",", "cached", "=", "True", ")", ":", "try", ":", "hash", "(", "filter_name", ")", "except", "TypeError", "as", "err", ":", "raise", "ValueError", "(", "'Filter set name must be as instance of hashable type: %s'", "%", "str", "(", "err", ")", ")", "if", "not", "isinstance", "(", "filter_set", ",", "list", ")", ":", "raise", "ValueError", "(", "'Filters must be a list.'", ")", "if", "len", "(", "filter_set", ")", "==", "0", ":", "raise", "ValueError", "(", "'Filters count must be greater than 0.'", ")", "for", "filter_instance", "in", "filter_set", ":", "if", "not", "isinstance", "(", "filter_instance", ",", "ImagineFilterInterface", ")", ":", "raise", "ValueError", "(", "'All filters must implement of ImagineFilterInterface.'", ")", "if", "not", "isinstance", "(", "cached", ",", "bool", ")", ":", "raise", "ValueError", "(", "'Cached parameter must be a bool.'", ")", "filter_config", "=", "{", "'filters'", ":", "filter_set", ",", "'cached'", ":", "cached", "}", "if", "filter_name", "not", "in", "self", ".", "_filter_sets", ":", "self", ".", "_filter_sets", ".", "update", "(", "{", "filter_name", ":", "filter_config", "}", ")", "else", ":", "raise", "ValueError", "(", "'Duplicate filter set name.'", ")" ]
Manual addition of filter set :param filter_name: str :param filter_set: list :param cached: bool
[ "Manual", "addition", "of", "filter", "set", ":", "param", "filter_name", ":", "str", ":", "param", "filter_set", ":", "list", ":", "param", "cached", ":", "bool" ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/bucket.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/bucket.py#L1165-L1192
def labels(self): """Retrieve or set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels .. note:: The getter for this property returns a dict which is a *copy* of the bucket's labels. Mutating that dict has no effect unless you then re-assign the dict via the setter. E.g.: >>> labels = bucket.labels >>> labels['new_key'] = 'some-label' >>> del labels['old_key'] >>> bucket.labels = labels >>> bucket.update() :setter: Set labels for this bucket. :getter: Gets the labels for this bucket. :rtype: :class:`dict` :returns: Name-value pairs (string->string) labelling the bucket. """ labels = self._properties.get("labels") if labels is None: return {} return copy.deepcopy(labels)
[ "def", "labels", "(", "self", ")", ":", "labels", "=", "self", ".", "_properties", ".", "get", "(", "\"labels\"", ")", "if", "labels", "is", "None", ":", "return", "{", "}", "return", "copy", ".", "deepcopy", "(", "labels", ")" ]
Retrieve or set labels assigned to this bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets#labels .. note:: The getter for this property returns a dict which is a *copy* of the bucket's labels. Mutating that dict has no effect unless you then re-assign the dict via the setter. E.g.: >>> labels = bucket.labels >>> labels['new_key'] = 'some-label' >>> del labels['old_key'] >>> bucket.labels = labels >>> bucket.update() :setter: Set labels for this bucket. :getter: Gets the labels for this bucket. :rtype: :class:`dict` :returns: Name-value pairs (string->string) labelling the bucket.
[ "Retrieve", "or", "set", "labels", "assigned", "to", "this", "bucket", "." ]
python
train
wbond/asn1crypto
asn1crypto/x509.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L2713-L2745
def valid_domains(self): """ :return: A list of unicode strings of valid domain names for the certificate. Wildcard certificates will have a domain in the form: *.example.com """ if self._valid_domains is None: self._valid_domains = [] # For the subject alt name extension, we can look at the name of # the choice selected since it distinguishes between domain names, # email addresses, IPs, etc if self.subject_alt_name_value: for general_name in self.subject_alt_name_value: if general_name.name == 'dns_name' and general_name.native not in self._valid_domains: self._valid_domains.append(general_name.native) # If there was no subject alt name extension, and the common name # in the subject looks like a domain, that is considered the valid # list. This is done because according to # https://tools.ietf.org/html/rfc6125#section-6.4.4, the common # name should not be used if the subject alt name is present. else: pattern = re.compile('^(\\*\\.)?(?:[a-zA-Z0-9](?:[a-zA-Z0-9\\-]*[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,}$') for rdn in self.subject.chosen: for name_type_value in rdn: if name_type_value['type'].native == 'common_name': value = name_type_value['value'].native if pattern.match(value): self._valid_domains.append(value) return self._valid_domains
[ "def", "valid_domains", "(", "self", ")", ":", "if", "self", ".", "_valid_domains", "is", "None", ":", "self", ".", "_valid_domains", "=", "[", "]", "# For the subject alt name extension, we can look at the name of", "# the choice selected since it distinguishes between domain names,", "# email addresses, IPs, etc", "if", "self", ".", "subject_alt_name_value", ":", "for", "general_name", "in", "self", ".", "subject_alt_name_value", ":", "if", "general_name", ".", "name", "==", "'dns_name'", "and", "general_name", ".", "native", "not", "in", "self", ".", "_valid_domains", ":", "self", ".", "_valid_domains", ".", "append", "(", "general_name", ".", "native", ")", "# If there was no subject alt name extension, and the common name", "# in the subject looks like a domain, that is considered the valid", "# list. This is done because according to", "# https://tools.ietf.org/html/rfc6125#section-6.4.4, the common", "# name should not be used if the subject alt name is present.", "else", ":", "pattern", "=", "re", ".", "compile", "(", "'^(\\\\*\\\\.)?(?:[a-zA-Z0-9](?:[a-zA-Z0-9\\\\-]*[a-zA-Z0-9])?\\\\.)+[a-zA-Z]{2,}$'", ")", "for", "rdn", "in", "self", ".", "subject", ".", "chosen", ":", "for", "name_type_value", "in", "rdn", ":", "if", "name_type_value", "[", "'type'", "]", ".", "native", "==", "'common_name'", ":", "value", "=", "name_type_value", "[", "'value'", "]", ".", "native", "if", "pattern", ".", "match", "(", "value", ")", ":", "self", ".", "_valid_domains", ".", "append", "(", "value", ")", "return", "self", ".", "_valid_domains" ]
:return: A list of unicode strings of valid domain names for the certificate. Wildcard certificates will have a domain in the form: *.example.com
[ ":", "return", ":", "A", "list", "of", "unicode", "strings", "of", "valid", "domain", "names", "for", "the", "certificate", ".", "Wildcard", "certificates", "will", "have", "a", "domain", "in", "the", "form", ":", "*", ".", "example", ".", "com" ]
python
train
boundlessgeo/gsconfig
src/geoserver/catalog.py
https://github.com/boundlessgeo/gsconfig/blob/532f561f32b91ea8debea0573c503dd20988bf40/src/geoserver/catalog.py#L978-L986
def get_layergroup(self, name, workspace=None): ''' returns a single layergroup object. Will return None if no layergroup is found. Will raise an error if more than one layergroup with the same name is found. ''' layergroups = self.get_layergroups(names=name, workspaces=workspace) return self._return_first_item(layergroups)
[ "def", "get_layergroup", "(", "self", ",", "name", ",", "workspace", "=", "None", ")", ":", "layergroups", "=", "self", ".", "get_layergroups", "(", "names", "=", "name", ",", "workspaces", "=", "workspace", ")", "return", "self", ".", "_return_first_item", "(", "layergroups", ")" ]
returns a single layergroup object. Will return None if no layergroup is found. Will raise an error if more than one layergroup with the same name is found.
[ "returns", "a", "single", "layergroup", "object", ".", "Will", "return", "None", "if", "no", "layergroup", "is", "found", ".", "Will", "raise", "an", "error", "if", "more", "than", "one", "layergroup", "with", "the", "same", "name", "is", "found", "." ]
python
valid
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_slipmap_util.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_slipmap_util.py#L56-L62
def update_position(self, newpos): '''update object position''' if getattr(self, 'trail', None) is not None: self.trail.update_position(newpos) self.latlon = newpos.latlon if hasattr(self, 'rotation'): self.rotation = newpos.rotation
[ "def", "update_position", "(", "self", ",", "newpos", ")", ":", "if", "getattr", "(", "self", ",", "'trail'", ",", "None", ")", "is", "not", "None", ":", "self", ".", "trail", ".", "update_position", "(", "newpos", ")", "self", ".", "latlon", "=", "newpos", ".", "latlon", "if", "hasattr", "(", "self", ",", "'rotation'", ")", ":", "self", ".", "rotation", "=", "newpos", ".", "rotation" ]
update object position
[ "update", "object", "position" ]
python
train
hotdoc/hotdoc
hotdoc/core/comment.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/comment.py#L159-L170
def comment_from_tag(tag): """ Convenience function to create a full-fledged comment for a given tag, for example it is convenient to assign a Comment to a ReturnValueSymbol. """ if not tag: return None comment = Comment(name=tag.name, meta={'description': tag.description}, annotations=tag.annotations) return comment
[ "def", "comment_from_tag", "(", "tag", ")", ":", "if", "not", "tag", ":", "return", "None", "comment", "=", "Comment", "(", "name", "=", "tag", ".", "name", ",", "meta", "=", "{", "'description'", ":", "tag", ".", "description", "}", ",", "annotations", "=", "tag", ".", "annotations", ")", "return", "comment" ]
Convenience function to create a full-fledged comment for a given tag, for example it is convenient to assign a Comment to a ReturnValueSymbol.
[ "Convenience", "function", "to", "create", "a", "full", "-", "fledged", "comment", "for", "a", "given", "tag", "for", "example", "it", "is", "convenient", "to", "assign", "a", "Comment", "to", "a", "ReturnValueSymbol", "." ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L821-L835
def get_tags_of_article_per_page(self, article_id, per_page=1000, page=1): """ Get articles tags per page :param article_id: the article id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=ARTICLE_TAGS, per_page=per_page, page=page, params={'article_id': article_id}, )
[ "def", "get_tags_of_article_per_page", "(", "self", ",", "article_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "ARTICLE_TAGS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'article_id'", ":", "article_id", "}", ",", ")" ]
Get articles tags per page :param article_id: the article id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "articles", "tags", "per", "page" ]
python
train