repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Esri/ArcREST
src/arcrest/manageorg/_portals.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_portals.py#L1536-L1543
def portalAdmin(self): """gets a reference to a portal administration class""" from ..manageportal import PortalAdministration return PortalAdministration(admin_url="https://%s/portaladmin" % self.portalHostname, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initalize=False)
[ "def", "portalAdmin", "(", "self", ")", ":", "from", ".", ".", "manageportal", "import", "PortalAdministration", "return", "PortalAdministration", "(", "admin_url", "=", "\"https://%s/portaladmin\"", "%", "self", ".", "portalHostname", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "initalize", "=", "False", ")" ]
gets a reference to a portal administration class
[ "gets", "a", "reference", "to", "a", "portal", "administration", "class" ]
python
train
60.625
pywbem/pywbem
attic/cim_provider.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider.py#L661-L676
def MI_deleteInstance(self, env, instanceName): # pylint: disable=invalid-name """Delete a CIM instance Implements the WBEM operation DeleteInstance in terms of the delete_instance method. A derived class will not normally override this method. """ logger = env.get_logger() logger.log_debug('CIMProvider MI_deleteInstance called...') self.delete_instance(env=env, instance_name=instanceName) logger.log_debug('CIMProvider MI_deleteInstance returning')
[ "def", "MI_deleteInstance", "(", "self", ",", "env", ",", "instanceName", ")", ":", "# pylint: disable=invalid-name", "logger", "=", "env", ".", "get_logger", "(", ")", "logger", ".", "log_debug", "(", "'CIMProvider MI_deleteInstance called...'", ")", "self", ".", "delete_instance", "(", "env", "=", "env", ",", "instance_name", "=", "instanceName", ")", "logger", ".", "log_debug", "(", "'CIMProvider MI_deleteInstance returning'", ")" ]
Delete a CIM instance Implements the WBEM operation DeleteInstance in terms of the delete_instance method. A derived class will not normally override this method.
[ "Delete", "a", "CIM", "instance" ]
python
train
35.8125
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L3888-L3894
def fileUpd(self, buffer=None, filename=None, ufilename=None, desc=None): """Update annotation attached file.""" CheckParent(self) return _fitz.Annot_fileUpd(self, buffer, filename, ufilename, desc)
[ "def", "fileUpd", "(", "self", ",", "buffer", "=", "None", ",", "filename", "=", "None", ",", "ufilename", "=", "None", ",", "desc", "=", "None", ")", ":", "CheckParent", "(", "self", ")", "return", "_fitz", ".", "Annot_fileUpd", "(", "self", ",", "buffer", ",", "filename", ",", "ufilename", ",", "desc", ")" ]
Update annotation attached file.
[ "Update", "annotation", "attached", "file", "." ]
python
train
31.285714
mitsei/dlkit
dlkit/json_/commenting/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/searches.py#L97-L108
def get_comments(self): """Gets the comment list resulting from a search. return: (osid.commenting.CommentList) - the comment list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.CommentList(self._results, runtime=self._runtime)
[ "def", "get_comments", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "CommentList", "(", "self", ".", "_results", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the comment list resulting from a search. return: (osid.commenting.CommentList) - the comment list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "comment", "list", "resulting", "from", "a", "search", "." ]
python
train
40.333333
sendgrid/sendgrid-python
examples/helpers/mail_example.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/examples/helpers/mail_example.py#L49-L79
def get_mock_personalization_dict(): """Get a dict of personalization mock.""" mock_pers = dict() mock_pers['to_list'] = [To("[email protected]", "Example User"), To("[email protected]", "Example User")] mock_pers['cc_list'] = [To("[email protected]", "Example User"), To("[email protected]", "Example User")] mock_pers['bcc_list'] = [To("[email protected]"), To("[email protected]")] mock_pers['subject'] = ("Hello World from the Personalized " "SendGrid Python Library") mock_pers['headers'] = [Header("X-Test", "test"), Header("X-Mock", "true")] mock_pers['substitutions'] = [Substitution("%name%", "Example User"), Substitution("%city%", "Denver")] mock_pers['custom_args'] = [CustomArg("user_id", "343"), CustomArg("type", "marketing")] mock_pers['send_at'] = 1443636843 return mock_pers
[ "def", "get_mock_personalization_dict", "(", ")", ":", "mock_pers", "=", "dict", "(", ")", "mock_pers", "[", "'to_list'", "]", "=", "[", "To", "(", "\"[email protected]\"", ",", "\"Example User\"", ")", ",", "To", "(", "\"[email protected]\"", ",", "\"Example User\"", ")", "]", "mock_pers", "[", "'cc_list'", "]", "=", "[", "To", "(", "\"[email protected]\"", ",", "\"Example User\"", ")", ",", "To", "(", "\"[email protected]\"", ",", "\"Example User\"", ")", "]", "mock_pers", "[", "'bcc_list'", "]", "=", "[", "To", "(", "\"[email protected]\"", ")", ",", "To", "(", "\"[email protected]\"", ")", "]", "mock_pers", "[", "'subject'", "]", "=", "(", "\"Hello World from the Personalized \"", "\"SendGrid Python Library\"", ")", "mock_pers", "[", "'headers'", "]", "=", "[", "Header", "(", "\"X-Test\"", ",", "\"test\"", ")", ",", "Header", "(", "\"X-Mock\"", ",", "\"true\"", ")", "]", "mock_pers", "[", "'substitutions'", "]", "=", "[", "Substitution", "(", "\"%name%\"", ",", "\"Example User\"", ")", ",", "Substitution", "(", "\"%city%\"", ",", "\"Denver\"", ")", "]", "mock_pers", "[", "'custom_args'", "]", "=", "[", "CustomArg", "(", "\"user_id\"", ",", "\"343\"", ")", ",", "CustomArg", "(", "\"type\"", ",", "\"marketing\"", ")", "]", "mock_pers", "[", "'send_at'", "]", "=", "1443636843", "return", "mock_pers" ]
Get a dict of personalization mock.
[ "Get", "a", "dict", "of", "personalization", "mock", "." ]
python
train
37.322581
sassoo/goldman
goldman/deserializers/form_data.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/form_data.py#L35-L60
def deserialize(self, mimetypes): # pylint: disable=arguments-differ """ Invoke the deserializer Upon successful deserialization a dict will be returned containing the following key/vals: { 'content': <uploaded object>, 'content-type': <content-type of content>, 'file-ext': <file extension based on content-type>, 'file-name': <file name of content>, } :param mimetypes: allowed mimetypes of the object in the request payload :return: normalized dict """ super(Deserializer, self).deserialize() parts = self.parse(mimetypes) data = self.normalize(parts) return data
[ "def", "deserialize", "(", "self", ",", "mimetypes", ")", ":", "# pylint: disable=arguments-differ", "super", "(", "Deserializer", ",", "self", ")", ".", "deserialize", "(", ")", "parts", "=", "self", ".", "parse", "(", "mimetypes", ")", "data", "=", "self", ".", "normalize", "(", "parts", ")", "return", "data" ]
Invoke the deserializer Upon successful deserialization a dict will be returned containing the following key/vals: { 'content': <uploaded object>, 'content-type': <content-type of content>, 'file-ext': <file extension based on content-type>, 'file-name': <file name of content>, } :param mimetypes: allowed mimetypes of the object in the request payload :return: normalized dict
[ "Invoke", "the", "deserializer" ]
python
train
28.923077
fgmacedo/django-export-action
export_action/introspection.py
https://github.com/fgmacedo/django-export-action/blob/215fecb9044d22e3ae19d86c3b220041a11fad07/export_action/introspection.py#L66-L74
def get_direct_fields_from_model(model_class): """ Direct, not m2m, not FK """ direct_fields = [] all_fields_names = _get_all_field_names(model_class) for field_name in all_fields_names: field, model, direct, m2m = _get_field_by_name(model_class, field_name) if direct and not m2m and not _get_remote_field(field): direct_fields += [field] return direct_fields
[ "def", "get_direct_fields_from_model", "(", "model_class", ")", ":", "direct_fields", "=", "[", "]", "all_fields_names", "=", "_get_all_field_names", "(", "model_class", ")", "for", "field_name", "in", "all_fields_names", ":", "field", ",", "model", ",", "direct", ",", "m2m", "=", "_get_field_by_name", "(", "model_class", ",", "field_name", ")", "if", "direct", "and", "not", "m2m", "and", "not", "_get_remote_field", "(", "field", ")", ":", "direct_fields", "+=", "[", "field", "]", "return", "direct_fields" ]
Direct, not m2m, not FK
[ "Direct", "not", "m2m", "not", "FK" ]
python
train
44.444444
JoelBender/bacpypes
py25/bacpypes/debugging.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/debugging.py#L30-L36
def xtob(data, sep=''): """Interpret the hex encoding of a blob (string).""" # remove the non-hex characters data = re.sub("[^0-9a-fA-F]", '', data) # interpret the hex return binascii.unhexlify(data)
[ "def", "xtob", "(", "data", ",", "sep", "=", "''", ")", ":", "# remove the non-hex characters", "data", "=", "re", ".", "sub", "(", "\"[^0-9a-fA-F]\"", ",", "''", ",", "data", ")", "# interpret the hex", "return", "binascii", ".", "unhexlify", "(", "data", ")" ]
Interpret the hex encoding of a blob (string).
[ "Interpret", "the", "hex", "encoding", "of", "a", "blob", "(", "string", ")", "." ]
python
train
30.714286
sixty-north/asq
asq/queryables.py
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L1391-L1452
def join(self, inner_iterable, outer_key_selector=identity, inner_key_selector=identity, result_selector=lambda outer, inner: (outer, inner)): '''Perform an inner join with a second sequence using selected keys. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from two matching elements of the outer and inner. If omitted the result elements will be a 2-tuple pair of the matching outer and inner elements. Returns: A Queryable whose elements are the result of performing an inner- join on two sequences. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable. ''' if self.closed(): raise ValueError("Attempt to call join() on a closed Queryable.") if not is_iterable(inner_iterable): raise TypeError("Cannot compute join() with inner_iterable of " "non-iterable {0}".format(str(type(inner_iterable))[7: -1])) if not is_callable(outer_key_selector): raise TypeError("join() parameter outer_key_selector={0} is not " "callable".format(repr(outer_key_selector))) if not is_callable(inner_key_selector): raise TypeError("join() parameter inner_key_selector={0} is not " "callable".format(repr(inner_key_selector))) if not is_callable(result_selector): raise TypeError("join() parameter result_selector={0} is not " "callable".format(repr(result_selector))) return self._create(self._generate_join_result(inner_iterable, outer_key_selector, inner_key_selector, result_selector))
[ "def", "join", "(", "self", ",", "inner_iterable", ",", "outer_key_selector", "=", "identity", ",", "inner_key_selector", "=", "identity", ",", "result_selector", "=", "lambda", "outer", ",", "inner", ":", "(", "outer", ",", "inner", ")", ")", ":", "if", "self", ".", "closed", "(", ")", ":", "raise", "ValueError", "(", "\"Attempt to call join() on a closed Queryable.\"", ")", "if", "not", "is_iterable", "(", "inner_iterable", ")", ":", "raise", "TypeError", "(", "\"Cannot compute join() with inner_iterable of \"", "\"non-iterable {0}\"", ".", "format", "(", "str", "(", "type", "(", "inner_iterable", ")", ")", "[", "7", ":", "-", "1", "]", ")", ")", "if", "not", "is_callable", "(", "outer_key_selector", ")", ":", "raise", "TypeError", "(", "\"join() parameter outer_key_selector={0} is not \"", "\"callable\"", ".", "format", "(", "repr", "(", "outer_key_selector", ")", ")", ")", "if", "not", "is_callable", "(", "inner_key_selector", ")", ":", "raise", "TypeError", "(", "\"join() parameter inner_key_selector={0} is not \"", "\"callable\"", ".", "format", "(", "repr", "(", "inner_key_selector", ")", ")", ")", "if", "not", "is_callable", "(", "result_selector", ")", ":", "raise", "TypeError", "(", "\"join() parameter result_selector={0} is not \"", "\"callable\"", ".", "format", "(", "repr", "(", "result_selector", ")", ")", ")", "return", "self", ".", "_create", "(", "self", ".", "_generate_join_result", "(", "inner_iterable", ",", "outer_key_selector", ",", "inner_key_selector", ",", "result_selector", ")", ")" ]
Perform an inner join with a second sequence using selected keys. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from two matching elements of the outer and inner. If omitted the result elements will be a 2-tuple pair of the matching outer and inner elements. Returns: A Queryable whose elements are the result of performing an inner- join on two sequences. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable.
[ "Perform", "an", "inner", "join", "with", "a", "second", "sequence", "using", "selected", "keys", "." ]
python
train
48.145161
Nic30/hwt
hwt/synthesizer/param.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/synthesizer/param.py#L50-L64
def set(self, val): """ set value of this param """ assert not self.__isReadOnly, \ ("This parameter(%s) was locked" " and now it can not be changed" % self.name) assert self.replacedWith is None, \ ("This param was replaced with new one and this " "should not exists") val = toHVal(val) self.defVal = val self._val = val.staticEval() self._dtype = self._val._dtype
[ "def", "set", "(", "self", ",", "val", ")", ":", "assert", "not", "self", ".", "__isReadOnly", ",", "(", "\"This parameter(%s) was locked\"", "\" and now it can not be changed\"", "%", "self", ".", "name", ")", "assert", "self", ".", "replacedWith", "is", "None", ",", "(", "\"This param was replaced with new one and this \"", "\"should not exists\"", ")", "val", "=", "toHVal", "(", "val", ")", "self", ".", "defVal", "=", "val", "self", ".", "_val", "=", "val", ".", "staticEval", "(", ")", "self", ".", "_dtype", "=", "self", ".", "_val", ".", "_dtype" ]
set value of this param
[ "set", "value", "of", "this", "param" ]
python
test
31.6
Anaconda-Platform/anaconda-client
binstar_client/inspect_package/pypi.py
https://github.com/Anaconda-Platform/anaconda-client/blob/b276f0572744c73c184a8b43a897cfa7fc1dc523/binstar_client/inspect_package/pypi.py#L297-L391
def format_sdist_header_metadata(data, filename): """ Format the metadata of pypi packages stored in email header format. Currently only used as backup on the wheel (compressed) file format. """ description = get_header_description(data) config_items = python_version_check(data) attrs = dict(config_items) name = pop_key(attrs, 'Name', None) basename = path.basename(filename) if name is None: name = basename.split('-')[0] package_data = { 'name': name, 'summary': pop_key(attrs, 'Summary', None), 'license': pop_key(attrs, 'License', None), } release_data = { 'version': pop_key(attrs, 'Version'), 'description': pop_key(attrs, 'Description', description), 'home_page': pop_key(attrs, 'Home-page', None), } file_data = { 'basename': basename, 'attrs': { 'packagetype': 'sdist', 'python_version': 'source', } } # Parse multiple keys deps = [] exts = {} environments = {} for key, val in config_items: if key in ['Requires-Dist', 'Requires']: name, extras, const, marker, url = parse_specification(val) name = norm_package_name(name) specs = const.split(',') new_specs = [] for spec in specs: pos = [i for i, c in enumerate(spec) if c in '0123456789'] if pos: pos = pos[0] comp, spec_ = spec[:pos].strip(), spec[pos:].strip() new_specs.append((comp, spec_)) # TODO: All this is to preserve the format used originally # but is this really needed? if marker: if marker.startswith('extra'): marker = marker.replace('extra', '') marker = marker.replace('==', '').strip() ext = marker.rsplit(' ')[-1] if '"' in ext or "'" in ext: ext = ext[1:-1] if ext not in exts: exts[ext] = [{'name': name, 'specs': new_specs}] else: exts[ext].append({'name': name, 'specs': new_specs}) else: if marker not in environments: environments[marker] = [{'name': name, 'specs': new_specs}] else: environments[marker].append({'name': name, 'specs': new_specs}) else: deps.append({ 'name': name, 'specs': new_specs, }) deps.sort(key=lambda o: o['name']) new_exts = [] for key, values in exts.items(): new_exts.append({'name': key, 'depends': values}) new_environments = [] for key, values in environments.items(): new_environments.append({'name': key, 'depends': values}) file_data.update(dependencies={ 'has_dep_errors': False, 'depends': deps, 'extras': new_exts, 'environments': new_environments, }) return package_data, release_data, file_data
[ "def", "format_sdist_header_metadata", "(", "data", ",", "filename", ")", ":", "description", "=", "get_header_description", "(", "data", ")", "config_items", "=", "python_version_check", "(", "data", ")", "attrs", "=", "dict", "(", "config_items", ")", "name", "=", "pop_key", "(", "attrs", ",", "'Name'", ",", "None", ")", "basename", "=", "path", ".", "basename", "(", "filename", ")", "if", "name", "is", "None", ":", "name", "=", "basename", ".", "split", "(", "'-'", ")", "[", "0", "]", "package_data", "=", "{", "'name'", ":", "name", ",", "'summary'", ":", "pop_key", "(", "attrs", ",", "'Summary'", ",", "None", ")", ",", "'license'", ":", "pop_key", "(", "attrs", ",", "'License'", ",", "None", ")", ",", "}", "release_data", "=", "{", "'version'", ":", "pop_key", "(", "attrs", ",", "'Version'", ")", ",", "'description'", ":", "pop_key", "(", "attrs", ",", "'Description'", ",", "description", ")", ",", "'home_page'", ":", "pop_key", "(", "attrs", ",", "'Home-page'", ",", "None", ")", ",", "}", "file_data", "=", "{", "'basename'", ":", "basename", ",", "'attrs'", ":", "{", "'packagetype'", ":", "'sdist'", ",", "'python_version'", ":", "'source'", ",", "}", "}", "# Parse multiple keys", "deps", "=", "[", "]", "exts", "=", "{", "}", "environments", "=", "{", "}", "for", "key", ",", "val", "in", "config_items", ":", "if", "key", "in", "[", "'Requires-Dist'", ",", "'Requires'", "]", ":", "name", ",", "extras", ",", "const", ",", "marker", ",", "url", "=", "parse_specification", "(", "val", ")", "name", "=", "norm_package_name", "(", "name", ")", "specs", "=", "const", ".", "split", "(", "','", ")", "new_specs", "=", "[", "]", "for", "spec", "in", "specs", ":", "pos", "=", "[", "i", "for", "i", ",", "c", "in", "enumerate", "(", "spec", ")", "if", "c", "in", "'0123456789'", "]", "if", "pos", ":", "pos", "=", "pos", "[", "0", "]", "comp", ",", "spec_", "=", "spec", "[", ":", "pos", "]", ".", "strip", "(", ")", ",", "spec", "[", "pos", ":", "]", ".", "strip", "(", ")", "new_specs", ".", "append", "(", "(", "comp", ",", "spec_", ")", ")", "# TODO: All this is to preserve the format used originally", "# but is this really needed?", "if", "marker", ":", "if", "marker", ".", "startswith", "(", "'extra'", ")", ":", "marker", "=", "marker", ".", "replace", "(", "'extra'", ",", "''", ")", "marker", "=", "marker", ".", "replace", "(", "'=='", ",", "''", ")", ".", "strip", "(", ")", "ext", "=", "marker", ".", "rsplit", "(", "' '", ")", "[", "-", "1", "]", "if", "'\"'", "in", "ext", "or", "\"'\"", "in", "ext", ":", "ext", "=", "ext", "[", "1", ":", "-", "1", "]", "if", "ext", "not", "in", "exts", ":", "exts", "[", "ext", "]", "=", "[", "{", "'name'", ":", "name", ",", "'specs'", ":", "new_specs", "}", "]", "else", ":", "exts", "[", "ext", "]", ".", "append", "(", "{", "'name'", ":", "name", ",", "'specs'", ":", "new_specs", "}", ")", "else", ":", "if", "marker", "not", "in", "environments", ":", "environments", "[", "marker", "]", "=", "[", "{", "'name'", ":", "name", ",", "'specs'", ":", "new_specs", "}", "]", "else", ":", "environments", "[", "marker", "]", ".", "append", "(", "{", "'name'", ":", "name", ",", "'specs'", ":", "new_specs", "}", ")", "else", ":", "deps", ".", "append", "(", "{", "'name'", ":", "name", ",", "'specs'", ":", "new_specs", ",", "}", ")", "deps", ".", "sort", "(", "key", "=", "lambda", "o", ":", "o", "[", "'name'", "]", ")", "new_exts", "=", "[", "]", "for", "key", ",", "values", "in", "exts", ".", "items", "(", ")", ":", "new_exts", ".", "append", "(", "{", "'name'", ":", "key", ",", "'depends'", ":", "values", "}", ")", "new_environments", "=", "[", "]", "for", "key", ",", "values", "in", "environments", ".", "items", "(", ")", ":", "new_environments", ".", "append", "(", "{", "'name'", ":", "key", ",", "'depends'", ":", "values", "}", ")", "file_data", ".", "update", "(", "dependencies", "=", "{", "'has_dep_errors'", ":", "False", ",", "'depends'", ":", "deps", ",", "'extras'", ":", "new_exts", ",", "'environments'", ":", "new_environments", ",", "}", ")", "return", "package_data", ",", "release_data", ",", "file_data" ]
Format the metadata of pypi packages stored in email header format. Currently only used as backup on the wheel (compressed) file format.
[ "Format", "the", "metadata", "of", "pypi", "packages", "stored", "in", "email", "header", "format", "." ]
python
train
32.642105
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/plugins/xunit.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/xunit.py#L164-L184
def report(self, stream): """Writes an Xunit-formatted XML file The file includes a report of test errors and failures. """ self.stats['encoding'] = self.encoding self.stats['total'] = (self.stats['errors'] + self.stats['failures'] + self.stats['passes'] + self.stats['skipped']) self.error_report_file.write( u'<?xml version="1.0" encoding="%(encoding)s"?>' u'<testsuite name="nosetests" tests="%(total)d" ' u'errors="%(errors)d" failures="%(failures)d" ' u'skip="%(skipped)d">' % self.stats) self.error_report_file.write(u''.join([self._forceUnicode(e) for e in self.errorlist])) self.error_report_file.write(u'</testsuite>') self.error_report_file.close() if self.config.verbosity > 1: stream.writeln("-" * 70) stream.writeln("XML: %s" % self.error_report_file.name)
[ "def", "report", "(", "self", ",", "stream", ")", ":", "self", ".", "stats", "[", "'encoding'", "]", "=", "self", ".", "encoding", "self", ".", "stats", "[", "'total'", "]", "=", "(", "self", ".", "stats", "[", "'errors'", "]", "+", "self", ".", "stats", "[", "'failures'", "]", "+", "self", ".", "stats", "[", "'passes'", "]", "+", "self", ".", "stats", "[", "'skipped'", "]", ")", "self", ".", "error_report_file", ".", "write", "(", "u'<?xml version=\"1.0\" encoding=\"%(encoding)s\"?>'", "u'<testsuite name=\"nosetests\" tests=\"%(total)d\" '", "u'errors=\"%(errors)d\" failures=\"%(failures)d\" '", "u'skip=\"%(skipped)d\">'", "%", "self", ".", "stats", ")", "self", ".", "error_report_file", ".", "write", "(", "u''", ".", "join", "(", "[", "self", ".", "_forceUnicode", "(", "e", ")", "for", "e", "in", "self", ".", "errorlist", "]", ")", ")", "self", ".", "error_report_file", ".", "write", "(", "u'</testsuite>'", ")", "self", ".", "error_report_file", ".", "close", "(", ")", "if", "self", ".", "config", ".", "verbosity", ">", "1", ":", "stream", ".", "writeln", "(", "\"-\"", "*", "70", ")", "stream", ".", "writeln", "(", "\"XML: %s\"", "%", "self", ".", "error_report_file", ".", "name", ")" ]
Writes an Xunit-formatted XML file The file includes a report of test errors and failures.
[ "Writes", "an", "Xunit", "-", "formatted", "XML", "file" ]
python
test
46.714286
NYUCCL/psiTurk
psiturk/psiturk_org_services.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_org_services.py#L94-L99
def delete_record(self, name, recordid, username, password): ''' Delete record ''' #headers = {'key': username, 'secret': password} req = requests.delete(self.api_server + '/api/' + name + '/' + str(recordid), auth=(username, password)) return req
[ "def", "delete_record", "(", "self", ",", "name", ",", "recordid", ",", "username", ",", "password", ")", ":", "#headers = {'key': username, 'secret': password}", "req", "=", "requests", ".", "delete", "(", "self", ".", "api_server", "+", "'/api/'", "+", "name", "+", "'/'", "+", "str", "(", "recordid", ")", ",", "auth", "=", "(", "username", ",", "password", ")", ")", "return", "req" ]
Delete record
[ "Delete", "record" ]
python
train
50.666667
lowandrew/OLCTools
databasesetup/database_setup.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L242-L258
def univec(self, databasepath): """ Download the UniVec core database :param databasepath: path to use to save the database """ logging.info('Downloading univec database') databasepath = self.create_database_folder(databasepath, 'univec') # Set the name of the output file outputfile = os.path.join(databasepath, 'UniVec_core.tfa') target_url = 'ftp://ftp.ncbi.nlm.nih.gov/pub/UniVec/UniVec_Core' self.database_download(output_file=outputfile, target_url=target_url, database_path=databasepath) # Create a copy of the file with a .fasta extension if os.path.isfile(outputfile): renamed = os.path.splitext(outputfile)[0] + '.fasta' shutil.copy(outputfile, renamed)
[ "def", "univec", "(", "self", ",", "databasepath", ")", ":", "logging", ".", "info", "(", "'Downloading univec database'", ")", "databasepath", "=", "self", ".", "create_database_folder", "(", "databasepath", ",", "'univec'", ")", "# Set the name of the output file", "outputfile", "=", "os", ".", "path", ".", "join", "(", "databasepath", ",", "'UniVec_core.tfa'", ")", "target_url", "=", "'ftp://ftp.ncbi.nlm.nih.gov/pub/UniVec/UniVec_Core'", "self", ".", "database_download", "(", "output_file", "=", "outputfile", ",", "target_url", "=", "target_url", ",", "database_path", "=", "databasepath", ")", "# Create a copy of the file with a .fasta extension", "if", "os", ".", "path", ".", "isfile", "(", "outputfile", ")", ":", "renamed", "=", "os", ".", "path", ".", "splitext", "(", "outputfile", ")", "[", "0", "]", "+", "'.fasta'", "shutil", ".", "copy", "(", "outputfile", ",", "renamed", ")" ]
Download the UniVec core database :param databasepath: path to use to save the database
[ "Download", "the", "UniVec", "core", "database", ":", "param", "databasepath", ":", "path", "to", "use", "to", "save", "the", "database" ]
python
train
48.764706
thetarkus/django-semanticui-forms
semanticuiforms/utils.py
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/utils.py#L25-L66
def get_choices(field): """ Find choices of a field, whether it has choices or has a queryset. Args: field (BoundField): Django form boundfield Returns: list: List of choices """ empty_label = getattr(field.field, "empty_label", False) needs_empty_value = False choices = [] # Data is the choices if hasattr(field.field, "_choices"): choices = field.field._choices # Data is a queryset elif hasattr(field.field, "_queryset"): queryset = field.field._queryset field_name = getattr(field.field, "to_field_name") or "pk" choices += ((getattr(obj, field_name), str(obj)) for obj in queryset) # Determine if an empty value is needed if choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]): needs_empty_value = True # Delete empty option if not choices[0][0]: del choices[0] # Remove dashed empty choice if empty_label == BLANK_CHOICE_DASH[0][1]: empty_label = None # Add custom empty value if empty_label or not field.field.required: if needs_empty_value: choices.insert(0, ("", empty_label or BLANK_CHOICE_DASH[0][1])) return choices
[ "def", "get_choices", "(", "field", ")", ":", "empty_label", "=", "getattr", "(", "field", ".", "field", ",", "\"empty_label\"", ",", "False", ")", "needs_empty_value", "=", "False", "choices", "=", "[", "]", "# Data is the choices", "if", "hasattr", "(", "field", ".", "field", ",", "\"_choices\"", ")", ":", "choices", "=", "field", ".", "field", ".", "_choices", "# Data is a queryset", "elif", "hasattr", "(", "field", ".", "field", ",", "\"_queryset\"", ")", ":", "queryset", "=", "field", ".", "field", ".", "_queryset", "field_name", "=", "getattr", "(", "field", ".", "field", ",", "\"to_field_name\"", ")", "or", "\"pk\"", "choices", "+=", "(", "(", "getattr", "(", "obj", ",", "field_name", ")", ",", "str", "(", "obj", ")", ")", "for", "obj", "in", "queryset", ")", "# Determine if an empty value is needed", "if", "choices", "and", "(", "choices", "[", "0", "]", "[", "1", "]", "==", "BLANK_CHOICE_DASH", "[", "0", "]", "[", "1", "]", "or", "choices", "[", "0", "]", "[", "0", "]", ")", ":", "needs_empty_value", "=", "True", "# Delete empty option", "if", "not", "choices", "[", "0", "]", "[", "0", "]", ":", "del", "choices", "[", "0", "]", "# Remove dashed empty choice", "if", "empty_label", "==", "BLANK_CHOICE_DASH", "[", "0", "]", "[", "1", "]", ":", "empty_label", "=", "None", "# Add custom empty value", "if", "empty_label", "or", "not", "field", ".", "field", ".", "required", ":", "if", "needs_empty_value", ":", "choices", ".", "insert", "(", "0", ",", "(", "\"\"", ",", "empty_label", "or", "BLANK_CHOICE_DASH", "[", "0", "]", "[", "1", "]", ")", ")", "return", "choices" ]
Find choices of a field, whether it has choices or has a queryset. Args: field (BoundField): Django form boundfield Returns: list: List of choices
[ "Find", "choices", "of", "a", "field", "whether", "it", "has", "choices", "or", "has", "a", "queryset", "." ]
python
train
25.333333
a1ezzz/wasp-general
wasp_general/network/web/session.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/web/session.py#L67-L88
def read_request_line(self, request_line): """ Read HTTP-request line :param request_line: line to parse for HTTP/0.9 is GET <Request-URI> for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0 or 1.1. for HTTP/2: binary headers are used """ request = self.__request_cls.parse_request_line(self, request_line) protocol_version = self.protocol_version() if protocol_version == '0.9': if request.method() != 'GET': raise Exception('HTTP/0.9 standard violation') elif protocol_version == '1.0' or protocol_version == '1.1': pass elif protocol_version == '2': pass else: raise RuntimeError('Unsupported HTTP-protocol')
[ "def", "read_request_line", "(", "self", ",", "request_line", ")", ":", "request", "=", "self", ".", "__request_cls", ".", "parse_request_line", "(", "self", ",", "request_line", ")", "protocol_version", "=", "self", ".", "protocol_version", "(", ")", "if", "protocol_version", "==", "'0.9'", ":", "if", "request", ".", "method", "(", ")", "!=", "'GET'", ":", "raise", "Exception", "(", "'HTTP/0.9 standard violation'", ")", "elif", "protocol_version", "==", "'1.0'", "or", "protocol_version", "==", "'1.1'", ":", "pass", "elif", "protocol_version", "==", "'2'", ":", "pass", "else", ":", "raise", "RuntimeError", "(", "'Unsupported HTTP-protocol'", ")" ]
Read HTTP-request line :param request_line: line to parse for HTTP/0.9 is GET <Request-URI> for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0 or 1.1. for HTTP/2: binary headers are used
[ "Read", "HTTP", "-", "request", "line" ]
python
train
30.954545
playpauseandstop/rororo
rororo/logger.py
https://github.com/playpauseandstop/rororo/blob/28a04e8028c29647941e727116335e9d6fd64c27/rororo/logger.py#L28-L75
def default_logging_dict(*loggers: str, **kwargs: Any) -> DictStrAny: r"""Prepare logging dict suitable with ``logging.config.dictConfig``. **Usage**:: from logging.config import dictConfig dictConfig(default_logging_dict('yourlogger')) :param \*loggers: Enable logging for each logger in sequence. :param \*\*kwargs: Setup additional logger params via keyword arguments. """ kwargs.setdefault('level', 'INFO') return { 'version': 1, 'disable_existing_loggers': True, 'filters': { 'ignore_errors': { '()': IgnoreErrorsFilter, }, }, 'formatters': { 'default': { 'format': '%(asctime)s [%(levelname)s:%(name)s] %(message)s', }, 'naked': { 'format': u'%(message)s', }, }, 'handlers': { 'stdout': { 'class': 'logging.StreamHandler', 'filters': ['ignore_errors'], 'formatter': 'default', 'level': 'DEBUG', 'stream': sys.stdout, }, 'stderr': { 'class': 'logging.StreamHandler', 'formatter': 'default', 'level': 'WARNING', 'stream': sys.stderr, }, }, 'loggers': { logger: dict(handlers=['stdout', 'stderr'], **kwargs) for logger in loggers }, }
[ "def", "default_logging_dict", "(", "*", "loggers", ":", "str", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "DictStrAny", ":", "kwargs", ".", "setdefault", "(", "'level'", ",", "'INFO'", ")", "return", "{", "'version'", ":", "1", ",", "'disable_existing_loggers'", ":", "True", ",", "'filters'", ":", "{", "'ignore_errors'", ":", "{", "'()'", ":", "IgnoreErrorsFilter", ",", "}", ",", "}", ",", "'formatters'", ":", "{", "'default'", ":", "{", "'format'", ":", "'%(asctime)s [%(levelname)s:%(name)s] %(message)s'", ",", "}", ",", "'naked'", ":", "{", "'format'", ":", "u'%(message)s'", ",", "}", ",", "}", ",", "'handlers'", ":", "{", "'stdout'", ":", "{", "'class'", ":", "'logging.StreamHandler'", ",", "'filters'", ":", "[", "'ignore_errors'", "]", ",", "'formatter'", ":", "'default'", ",", "'level'", ":", "'DEBUG'", ",", "'stream'", ":", "sys", ".", "stdout", ",", "}", ",", "'stderr'", ":", "{", "'class'", ":", "'logging.StreamHandler'", ",", "'formatter'", ":", "'default'", ",", "'level'", ":", "'WARNING'", ",", "'stream'", ":", "sys", ".", "stderr", ",", "}", ",", "}", ",", "'loggers'", ":", "{", "logger", ":", "dict", "(", "handlers", "=", "[", "'stdout'", ",", "'stderr'", "]", ",", "*", "*", "kwargs", ")", "for", "logger", "in", "loggers", "}", ",", "}" ]
r"""Prepare logging dict suitable with ``logging.config.dictConfig``. **Usage**:: from logging.config import dictConfig dictConfig(default_logging_dict('yourlogger')) :param \*loggers: Enable logging for each logger in sequence. :param \*\*kwargs: Setup additional logger params via keyword arguments.
[ "r", "Prepare", "logging", "dict", "suitable", "with", "logging", ".", "config", ".", "dictConfig", "." ]
python
train
30.354167
arista-eosplus/pyeapi
pyeapi/api/vrrp.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/vrrp.py#L174-L229
def get(self, name): """Get the vrrp configurations for a single node interface Args: name (string): The name of the interface for which vrrp configurations will be retrieved. Returns: A dictionary containing the vrrp configurations on the interface. Returns None if no vrrp configurations are defined or if the interface is not configured. """ # Validate the interface and vrid are specified interface = name if not interface: raise ValueError("Vrrp.get(): interface must contain a value.") # Get the config for the interface. Return None if the # interface is not defined config = self.get_block('interface %s' % interface) if config is None: return config # Find all occurrences of vrids in this interface and make # a set of the unique vrid numbers match = set(re.findall(r'^\s+(?:no |)vrrp (\d+)', config, re.M)) if not match: return None # Initialize the result dict result = dict() for vrid in match: subd = dict() # Parse the vrrp configuration for the vrid(s) in the list subd.update(self._parse_delay_reload(config, vrid)) subd.update(self._parse_description(config, vrid)) subd.update(self._parse_enable(config, vrid)) subd.update(self._parse_ip_version(config, vrid)) subd.update(self._parse_mac_addr_adv_interval(config, vrid)) subd.update(self._parse_preempt(config, vrid)) subd.update(self._parse_preempt_delay_min(config, vrid)) subd.update(self._parse_preempt_delay_reload(config, vrid)) subd.update(self._parse_primary_ip(config, vrid)) subd.update(self._parse_priority(config, vrid)) subd.update(self._parse_secondary_ip(config, vrid)) subd.update(self._parse_timers_advertise(config, vrid)) subd.update(self._parse_track(config, vrid)) subd.update(self._parse_bfd_ip(config, vrid)) result.update({int(vrid): subd}) # If result dict is empty, return None, otherwise return result return result if result else None
[ "def", "get", "(", "self", ",", "name", ")", ":", "# Validate the interface and vrid are specified", "interface", "=", "name", "if", "not", "interface", ":", "raise", "ValueError", "(", "\"Vrrp.get(): interface must contain a value.\"", ")", "# Get the config for the interface. Return None if the", "# interface is not defined", "config", "=", "self", ".", "get_block", "(", "'interface %s'", "%", "interface", ")", "if", "config", "is", "None", ":", "return", "config", "# Find all occurrences of vrids in this interface and make", "# a set of the unique vrid numbers", "match", "=", "set", "(", "re", ".", "findall", "(", "r'^\\s+(?:no |)vrrp (\\d+)'", ",", "config", ",", "re", ".", "M", ")", ")", "if", "not", "match", ":", "return", "None", "# Initialize the result dict", "result", "=", "dict", "(", ")", "for", "vrid", "in", "match", ":", "subd", "=", "dict", "(", ")", "# Parse the vrrp configuration for the vrid(s) in the list", "subd", ".", "update", "(", "self", ".", "_parse_delay_reload", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_description", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_enable", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_ip_version", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_mac_addr_adv_interval", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_preempt", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_preempt_delay_min", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_preempt_delay_reload", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_primary_ip", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_priority", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_secondary_ip", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_timers_advertise", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_track", "(", "config", ",", "vrid", ")", ")", "subd", ".", "update", "(", "self", ".", "_parse_bfd_ip", "(", "config", ",", "vrid", ")", ")", "result", ".", "update", "(", "{", "int", "(", "vrid", ")", ":", "subd", "}", ")", "# If result dict is empty, return None, otherwise return result", "return", "result", "if", "result", "else", "None" ]
Get the vrrp configurations for a single node interface Args: name (string): The name of the interface for which vrrp configurations will be retrieved. Returns: A dictionary containing the vrrp configurations on the interface. Returns None if no vrrp configurations are defined or if the interface is not configured.
[ "Get", "the", "vrrp", "configurations", "for", "a", "single", "node", "interface" ]
python
train
40.196429
yeraydiazdiaz/lunr.py
lunr/builder.py
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/builder.py#L69-L98
def field(self, field_name, boost=1, extractor=None): """Adds a field to the list of document fields that will be indexed. Every document being indexed should have this field. None values for this field in indexed documents will not cause errors but will limit the chance of that document being retrieved by searches. All fields should be added before adding documents to the index. Adding fields after a document has been indexed will have no effect on already indexed documents. Fields can be boosted at build time. This allows terms within that field to have more importance on search results. Use a field boost to specify that matches within one field are more important that other fields. Args: field_name (str): Name of the field to be added, must not include a forward slash '/'. boost (int): Optional boost factor to apply to field. extractor (callable): Optional function to extract a field from the document. Raises: ValueError: If the field name contains a `/`. """ if "/" in field_name: raise ValueError("Field {} contains illegal character `/`") self._fields[field_name] = Field(field_name, boost, extractor)
[ "def", "field", "(", "self", ",", "field_name", ",", "boost", "=", "1", ",", "extractor", "=", "None", ")", ":", "if", "\"/\"", "in", "field_name", ":", "raise", "ValueError", "(", "\"Field {} contains illegal character `/`\"", ")", "self", ".", "_fields", "[", "field_name", "]", "=", "Field", "(", "field_name", ",", "boost", ",", "extractor", ")" ]
Adds a field to the list of document fields that will be indexed. Every document being indexed should have this field. None values for this field in indexed documents will not cause errors but will limit the chance of that document being retrieved by searches. All fields should be added before adding documents to the index. Adding fields after a document has been indexed will have no effect on already indexed documents. Fields can be boosted at build time. This allows terms within that field to have more importance on search results. Use a field boost to specify that matches within one field are more important that other fields. Args: field_name (str): Name of the field to be added, must not include a forward slash '/'. boost (int): Optional boost factor to apply to field. extractor (callable): Optional function to extract a field from the document. Raises: ValueError: If the field name contains a `/`.
[ "Adds", "a", "field", "to", "the", "list", "of", "document", "fields", "that", "will", "be", "indexed", "." ]
python
train
43.933333
pylp/pylp
pylp/lib/runner.py
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L63-L77
def call_task_fn(self): """Call the function attached to the task.""" if not self.fn: return self.log_finished() future = asyncio.Future() future.add_done_callback(lambda x: self.log_finished()) if inspect.iscoroutinefunction(self.fn): f = asyncio.ensure_future(self.fn()) f.add_done_callback(lambda x: self.bind_end(x.result(), future)) else: self.bind_end(self.fn(), future) return future
[ "def", "call_task_fn", "(", "self", ")", ":", "if", "not", "self", ".", "fn", ":", "return", "self", ".", "log_finished", "(", ")", "future", "=", "asyncio", ".", "Future", "(", ")", "future", ".", "add_done_callback", "(", "lambda", "x", ":", "self", ".", "log_finished", "(", ")", ")", "if", "inspect", ".", "iscoroutinefunction", "(", "self", ".", "fn", ")", ":", "f", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "fn", "(", ")", ")", "f", ".", "add_done_callback", "(", "lambda", "x", ":", "self", ".", "bind_end", "(", "x", ".", "result", "(", ")", ",", "future", ")", ")", "else", ":", "self", ".", "bind_end", "(", "self", ".", "fn", "(", ")", ",", "future", ")", "return", "future" ]
Call the function attached to the task.
[ "Call", "the", "function", "attached", "to", "the", "task", "." ]
python
train
27
bitesofcode/projexui
projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py#L120-L130
def createMenu( self ): """ Creates a new menu with the given name. """ name, accepted = QInputDialog.getText( self, 'Create Menu', 'Name: ') if ( accepted ): self.addMenuItem(self.createMenuItem(name), self.uiMenuTREE.currentItem())
[ "def", "createMenu", "(", "self", ")", ":", "name", ",", "accepted", "=", "QInputDialog", ".", "getText", "(", "self", ",", "'Create Menu'", ",", "'Name: '", ")", "if", "(", "accepted", ")", ":", "self", ".", "addMenuItem", "(", "self", ".", "createMenuItem", "(", "name", ")", ",", "self", ".", "uiMenuTREE", ".", "currentItem", "(", ")", ")" ]
Creates a new menu with the given name.
[ "Creates", "a", "new", "menu", "with", "the", "given", "name", "." ]
python
train
37.090909
cytoscape/py2cytoscape
py2cytoscape/cyrest/networks.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/networks.py#L937-L949
def getGroup(self, networkId, groupNodeId, verbose=None): """ Returns the group specified by the `groupNodeId` and `networkId` parameters. :param networkId: SUID of the Network :param groupNodeId: SUID of the Node representing the Group :param verbose: print more :returns: 200: successful operation """ response=api(url=self.___url+'networks/'+str(networkId)+'/groups/'+str(groupNodeId)+'', method="GET", verbose=verbose, parse_params=False) return response
[ "def", "getGroup", "(", "self", ",", "networkId", ",", "groupNodeId", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'networks/'", "+", "str", "(", "networkId", ")", "+", "'/groups/'", "+", "str", "(", "groupNodeId", ")", "+", "''", ",", "method", "=", "\"GET\"", ",", "verbose", "=", "verbose", ",", "parse_params", "=", "False", ")", "return", "response" ]
Returns the group specified by the `groupNodeId` and `networkId` parameters. :param networkId: SUID of the Network :param groupNodeId: SUID of the Node representing the Group :param verbose: print more :returns: 200: successful operation
[ "Returns", "the", "group", "specified", "by", "the", "groupNodeId", "and", "networkId", "parameters", "." ]
python
train
40.076923
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/eplusdata.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/eplusdata.py#L234-L238
def addinnode(self, otherplus, node, objectname): """add an item to the node. example: add a new zone to the element 'ZONE' """ # do a test for unique object here newelement = otherplus.dt[node.upper()]
[ "def", "addinnode", "(", "self", ",", "otherplus", ",", "node", ",", "objectname", ")", ":", "# do a test for unique object here", "newelement", "=", "otherplus", ".", "dt", "[", "node", ".", "upper", "(", ")", "]" ]
add an item to the node. example: add a new zone to the element 'ZONE'
[ "add", "an", "item", "to", "the", "node", ".", "example", ":", "add", "a", "new", "zone", "to", "the", "element", "ZONE" ]
python
train
46
noobermin/pys
pys/__init__.py
https://github.com/noobermin/pys/blob/e01b74210c65eb96d019bb42e0a3c9e6676da943/pys/__init__.py#L69-L75
def filelines(fname,strip=False): '''read lines from a file into lines...optional strip''' with open(fname,'r') as f: lines = f.readlines(); if strip: lines[:] = [line.strip() for line in lines] return lines;
[ "def", "filelines", "(", "fname", ",", "strip", "=", "False", ")", ":", "with", "open", "(", "fname", ",", "'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "if", "strip", ":", "lines", "[", ":", "]", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "lines", "]", "return", "lines" ]
read lines from a file into lines...optional strip
[ "read", "lines", "from", "a", "file", "into", "lines", "...", "optional", "strip" ]
python
train
33.428571
intuition-io/insights
insights/plugins/hipchat.py
https://github.com/intuition-io/insights/blob/a4eae53a1886164db96751d2b0964aa2acb7c2d7/insights/plugins/hipchat.py#L53-L68
def message(self, body, room_id, style='text'): ''' Send a message to the given room ''' # TODO Automatically detect body format ? path = 'rooms/message' data = { 'room_id': room_id, 'message': body, 'from': self.name, 'notify': 1, 'message_format': style, 'color': self.bg_color } log.info('sending message to hipchat', message=body, room=room_id) feedback = self._api_call(path, data, requests.post) log.debug(feedback) return feedback
[ "def", "message", "(", "self", ",", "body", ",", "room_id", ",", "style", "=", "'text'", ")", ":", "# TODO Automatically detect body format ?", "path", "=", "'rooms/message'", "data", "=", "{", "'room_id'", ":", "room_id", ",", "'message'", ":", "body", ",", "'from'", ":", "self", ".", "name", ",", "'notify'", ":", "1", ",", "'message_format'", ":", "style", ",", "'color'", ":", "self", ".", "bg_color", "}", "log", ".", "info", "(", "'sending message to hipchat'", ",", "message", "=", "body", ",", "room", "=", "room_id", ")", "feedback", "=", "self", ".", "_api_call", "(", "path", ",", "data", ",", "requests", ".", "post", ")", "log", ".", "debug", "(", "feedback", ")", "return", "feedback" ]
Send a message to the given room
[ "Send", "a", "message", "to", "the", "given", "room" ]
python
train
35.375
Jajcus/pyxmpp2
pyxmpp2/mainloop/glib.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/mainloop/glib.py#L147-L175
def _prepare_io_handler(self, handler): """Call the `interfaces.IOHandler.prepare` method and remove the handler from unprepared handler list when done. """ logger.debug(" preparing handler: {0!r}".format(handler)) self._unprepared_pending.discard(handler) ret = handler.prepare() logger.debug(" prepare result: {0!r}".format(ret)) if isinstance(ret, HandlerReady): del self._unprepared_handlers[handler] prepared = True elif isinstance(ret, PrepareAgain): if ret.timeout == 0: tag = glib.idle_add(self._prepare_io_handler_cb, handler) self._prepare_sources[handler] = tag elif ret.timeout is not None: timeout = ret.timeout timeout = int(timeout * 1000) if not timeout: timeout = 1 tag = glib.timeout_add(timeout, self._prepare_io_handler_cb, handler) self._prepare_sources[handler] = tag else: self._unprepared_pending.add(handler) prepared = False else: raise TypeError("Unexpected result type from prepare()") return prepared
[ "def", "_prepare_io_handler", "(", "self", ",", "handler", ")", ":", "logger", ".", "debug", "(", "\" preparing handler: {0!r}\"", ".", "format", "(", "handler", ")", ")", "self", ".", "_unprepared_pending", ".", "discard", "(", "handler", ")", "ret", "=", "handler", ".", "prepare", "(", ")", "logger", ".", "debug", "(", "\" prepare result: {0!r}\"", ".", "format", "(", "ret", ")", ")", "if", "isinstance", "(", "ret", ",", "HandlerReady", ")", ":", "del", "self", ".", "_unprepared_handlers", "[", "handler", "]", "prepared", "=", "True", "elif", "isinstance", "(", "ret", ",", "PrepareAgain", ")", ":", "if", "ret", ".", "timeout", "==", "0", ":", "tag", "=", "glib", ".", "idle_add", "(", "self", ".", "_prepare_io_handler_cb", ",", "handler", ")", "self", ".", "_prepare_sources", "[", "handler", "]", "=", "tag", "elif", "ret", ".", "timeout", "is", "not", "None", ":", "timeout", "=", "ret", ".", "timeout", "timeout", "=", "int", "(", "timeout", "*", "1000", ")", "if", "not", "timeout", ":", "timeout", "=", "1", "tag", "=", "glib", ".", "timeout_add", "(", "timeout", ",", "self", ".", "_prepare_io_handler_cb", ",", "handler", ")", "self", ".", "_prepare_sources", "[", "handler", "]", "=", "tag", "else", ":", "self", ".", "_unprepared_pending", ".", "add", "(", "handler", ")", "prepared", "=", "False", "else", ":", "raise", "TypeError", "(", "\"Unexpected result type from prepare()\"", ")", "return", "prepared" ]
Call the `interfaces.IOHandler.prepare` method and remove the handler from unprepared handler list when done.
[ "Call", "the", "interfaces", ".", "IOHandler", ".", "prepare", "method", "and", "remove", "the", "handler", "from", "unprepared", "handler", "list", "when", "done", "." ]
python
valid
44.517241
RazerM/yourls-python
yourls/core.py
https://github.com/RazerM/yourls-python/blob/716845562a2bbb430de3c379c9481b195e451ccf/yourls/core.py#L166-L181
def db_stats(self): """Get database statistics. Returns: DBStats: Total clicks and links statistics. Raises: requests.exceptions.HTTPError: Generic HTTP Error """ data = dict(action='db-stats') jsondata = self._api_request(params=data) stats = DBStats(total_clicks=int(jsondata['db-stats']['total_clicks']), total_links=int(jsondata['db-stats']['total_links'])) return stats
[ "def", "db_stats", "(", "self", ")", ":", "data", "=", "dict", "(", "action", "=", "'db-stats'", ")", "jsondata", "=", "self", ".", "_api_request", "(", "params", "=", "data", ")", "stats", "=", "DBStats", "(", "total_clicks", "=", "int", "(", "jsondata", "[", "'db-stats'", "]", "[", "'total_clicks'", "]", ")", ",", "total_links", "=", "int", "(", "jsondata", "[", "'db-stats'", "]", "[", "'total_links'", "]", ")", ")", "return", "stats" ]
Get database statistics. Returns: DBStats: Total clicks and links statistics. Raises: requests.exceptions.HTTPError: Generic HTTP Error
[ "Get", "database", "statistics", "." ]
python
test
29.6875
siznax/wptools
wptools/page.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/page.py#L576-L605
def get_parse(self, show=True, proxy=None, timeout=0): """ GET MediaWiki:API action=parse request https://en.wikipedia.org/w/api.php?action=help&modules=parse Required {params}: title OR pageid - title: <str> article title - pageid: <int> Wikipedia database ID Optional arguments: - [show]: <bool> echo page data if true - [proxy]: <str> use this HTTP proxy - [timeout]: <int> timeout in seconds (0=wait forever) Data captured: - image: <dict> {parse-image, parse-cover} - infobox: <dict> Infobox data as python dictionary - iwlinks: <list> interwiki links - pageid: <int> Wikipedia database ID - parsetree: <str> XML parse tree - requests: list of request actions made - wikibase: <str> Wikidata entity ID or wikidata URL - wikitext: <str> raw wikitext URL """ if not self.params.get('title') and not self.params.get('pageid'): raise ValueError("get_parse needs title or pageid") self._get('parse', show, proxy, timeout) return self
[ "def", "get_parse", "(", "self", ",", "show", "=", "True", ",", "proxy", "=", "None", ",", "timeout", "=", "0", ")", ":", "if", "not", "self", ".", "params", ".", "get", "(", "'title'", ")", "and", "not", "self", ".", "params", ".", "get", "(", "'pageid'", ")", ":", "raise", "ValueError", "(", "\"get_parse needs title or pageid\"", ")", "self", ".", "_get", "(", "'parse'", ",", "show", ",", "proxy", ",", "timeout", ")", "return", "self" ]
GET MediaWiki:API action=parse request https://en.wikipedia.org/w/api.php?action=help&modules=parse Required {params}: title OR pageid - title: <str> article title - pageid: <int> Wikipedia database ID Optional arguments: - [show]: <bool> echo page data if true - [proxy]: <str> use this HTTP proxy - [timeout]: <int> timeout in seconds (0=wait forever) Data captured: - image: <dict> {parse-image, parse-cover} - infobox: <dict> Infobox data as python dictionary - iwlinks: <list> interwiki links - pageid: <int> Wikipedia database ID - parsetree: <str> XML parse tree - requests: list of request actions made - wikibase: <str> Wikidata entity ID or wikidata URL - wikitext: <str> raw wikitext URL
[ "GET", "MediaWiki", ":", "API", "action", "=", "parse", "request", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "w", "/", "api", ".", "php?action", "=", "help&modules", "=", "parse" ]
python
train
36.833333
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L152-L188
def get_addresses_details(address_list, coin_symbol='btc', txn_limit=None, api_key=None, before_bh=None, after_bh=None, unspent_only=False, show_confidence=False, confirmations=0, include_script=False): ''' Batch version of get_address_details method ''' for address in address_list: assert is_valid_address_for_coinsymbol( b58_address=address, coin_symbol=coin_symbol), address assert isinstance(show_confidence, bool), show_confidence kwargs = dict(addrs=';'.join([str(addr) for addr in address_list])) url = make_url(coin_symbol, **kwargs) params = {} if txn_limit: params['limit'] = txn_limit if api_key: params['token'] = api_key if before_bh: params['before'] = before_bh if after_bh: params['after'] = after_bh if confirmations: params['confirmations'] = confirmations if unspent_only: params['unspentOnly'] = 'true' if show_confidence: params['includeConfidence'] = 'true' if include_script: params['includeScript'] = 'true' r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) r = get_valid_json(r) return [_clean_tx(response_dict=d) for d in r]
[ "def", "get_addresses_details", "(", "address_list", ",", "coin_symbol", "=", "'btc'", ",", "txn_limit", "=", "None", ",", "api_key", "=", "None", ",", "before_bh", "=", "None", ",", "after_bh", "=", "None", ",", "unspent_only", "=", "False", ",", "show_confidence", "=", "False", ",", "confirmations", "=", "0", ",", "include_script", "=", "False", ")", ":", "for", "address", "in", "address_list", ":", "assert", "is_valid_address_for_coinsymbol", "(", "b58_address", "=", "address", ",", "coin_symbol", "=", "coin_symbol", ")", ",", "address", "assert", "isinstance", "(", "show_confidence", ",", "bool", ")", ",", "show_confidence", "kwargs", "=", "dict", "(", "addrs", "=", "';'", ".", "join", "(", "[", "str", "(", "addr", ")", "for", "addr", "in", "address_list", "]", ")", ")", "url", "=", "make_url", "(", "coin_symbol", ",", "*", "*", "kwargs", ")", "params", "=", "{", "}", "if", "txn_limit", ":", "params", "[", "'limit'", "]", "=", "txn_limit", "if", "api_key", ":", "params", "[", "'token'", "]", "=", "api_key", "if", "before_bh", ":", "params", "[", "'before'", "]", "=", "before_bh", "if", "after_bh", ":", "params", "[", "'after'", "]", "=", "after_bh", "if", "confirmations", ":", "params", "[", "'confirmations'", "]", "=", "confirmations", "if", "unspent_only", ":", "params", "[", "'unspentOnly'", "]", "=", "'true'", "if", "show_confidence", ":", "params", "[", "'includeConfidence'", "]", "=", "'true'", "if", "include_script", ":", "params", "[", "'includeScript'", "]", "=", "'true'", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "verify", "=", "True", ",", "timeout", "=", "TIMEOUT_IN_SECONDS", ")", "r", "=", "get_valid_json", "(", "r", ")", "return", "[", "_clean_tx", "(", "response_dict", "=", "d", ")", "for", "d", "in", "r", "]" ]
Batch version of get_address_details method
[ "Batch", "version", "of", "get_address_details", "method" ]
python
train
33.594595
MacHu-GWU/crawlib-project
crawlib/util.py
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/util.py#L36-L60
def join_all(domain, *parts): """ Join all url components. Example:: >>> join_all("https://www.apple.com", "iphone") https://www.apple.com/iphone :param domain: Domain parts, example: https://www.python.org :param parts: Other parts, example: "/doc", "/py27" :return: url """ l = list() if domain.endswith("/"): domain = domain[:-1] l.append(domain) for part in parts: for i in part.split("/"): if i.strip(): l.append(i) url = "/".join(l) return url
[ "def", "join_all", "(", "domain", ",", "*", "parts", ")", ":", "l", "=", "list", "(", ")", "if", "domain", ".", "endswith", "(", "\"/\"", ")", ":", "domain", "=", "domain", "[", ":", "-", "1", "]", "l", ".", "append", "(", "domain", ")", "for", "part", "in", "parts", ":", "for", "i", "in", "part", ".", "split", "(", "\"/\"", ")", ":", "if", "i", ".", "strip", "(", ")", ":", "l", ".", "append", "(", "i", ")", "url", "=", "\"/\"", ".", "join", "(", "l", ")", "return", "url" ]
Join all url components. Example:: >>> join_all("https://www.apple.com", "iphone") https://www.apple.com/iphone :param domain: Domain parts, example: https://www.python.org :param parts: Other parts, example: "/doc", "/py27" :return: url
[ "Join", "all", "url", "components", "." ]
python
train
21.68
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jar_task.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jar_task.py#L204-L212
def writejar(self, jar): """Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest. :param string jar: the path to the pre-existing jar to graft into this jar """ if not jar or not isinstance(jar, string_types): raise ValueError('The jar path must be a non-empty string') self._jars.append(jar)
[ "def", "writejar", "(", "self", ",", "jar", ")", ":", "if", "not", "jar", "or", "not", "isinstance", "(", "jar", ",", "string_types", ")", ":", "raise", "ValueError", "(", "'The jar path must be a non-empty string'", ")", "self", ".", "_jars", ".", "append", "(", "jar", ")" ]
Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest. :param string jar: the path to the pre-existing jar to graft into this jar
[ "Schedules", "all", "entries", "from", "the", "given", "jar", "s", "to", "be", "added", "to", "this", "jar", "save", "for", "the", "manifest", "." ]
python
train
39.111111
mitsei/dlkit
dlkit/handcar/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/managers.py#L3190-L3220
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy, *args, **kwargs): """Gets the ``OsidSession`` associated with the activity admin service for the given objective bank. :param objective_bank_id: the ``Id`` of the objective bank :type objective_bank_id: ``osid.id.Id`` :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: a ``ActivityAdminSession`` :rtype: ``osid.learning.ActivityAdminSession`` :raise: ``NotFound`` -- ``objective_bank_id`` not found :raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null`` :raise: ``OperationFailed`` -- ``unable to complete request`` :raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not objective_bank_id: raise NullArgument if not self.supports_activity_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ActivityAdminSession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
[ "def", "get_activity_admin_session_for_objective_bank", "(", "self", ",", "objective_bank_id", ",", "proxy", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "objective_bank_id", ":", "raise", "NullArgument", "if", "not", "self", ".", "supports_activity_admin", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "OperationFailed", "(", ")", "proxy", "=", "self", ".", "_convert_proxy", "(", "proxy", ")", "try", ":", "session", "=", "sessions", ".", "ActivityAdminSession", "(", "objective_bank_id", "=", "objective_bank_id", ",", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "OperationFailed", "(", ")", "return", "session" ]
Gets the ``OsidSession`` associated with the activity admin service for the given objective bank. :param objective_bank_id: the ``Id`` of the objective bank :type objective_bank_id: ``osid.id.Id`` :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: a ``ActivityAdminSession`` :rtype: ``osid.learning.ActivityAdminSession`` :raise: ``NotFound`` -- ``objective_bank_id`` not found :raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null`` :raise: ``OperationFailed`` -- ``unable to complete request`` :raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_admin()`` and ``supports_visible_federation()`` are ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "activity", "admin", "service", "for", "the", "given", "objective", "bank", "." ]
python
train
48.16129
DLR-RM/RAFCON
source/rafcon/gui/controllers/global_variable_manager.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/global_variable_manager.py#L158-L170
def remove_core_element(self, model): """Remove respective core element of handed global variable name :param str model: String that is the key/gv_name of core element which should be removed :return: """ gv_name = model if self.global_variable_is_editable(gv_name, "Deletion"): try: self.model.global_variable_manager.delete_variable(gv_name) except AttributeError as e: logger.warning("The respective global variable '{1}' couldn't be removed. -> {0}" "".format(e, model))
[ "def", "remove_core_element", "(", "self", ",", "model", ")", ":", "gv_name", "=", "model", "if", "self", ".", "global_variable_is_editable", "(", "gv_name", ",", "\"Deletion\"", ")", ":", "try", ":", "self", ".", "model", ".", "global_variable_manager", ".", "delete_variable", "(", "gv_name", ")", "except", "AttributeError", "as", "e", ":", "logger", ".", "warning", "(", "\"The respective global variable '{1}' couldn't be removed. -> {0}\"", "\"\"", ".", "format", "(", "e", ",", "model", ")", ")" ]
Remove respective core element of handed global variable name :param str model: String that is the key/gv_name of core element which should be removed :return:
[ "Remove", "respective", "core", "element", "of", "handed", "global", "variable", "name" ]
python
train
45.769231
ChrisCummins/labm8
system.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/system.py#L237-L284
def which(program, path=None): """ Returns the full path of shell commands. Replicates the functionality of system which (1) command. Looks for the named program in the directories indicated in the $PATH environment variable, and returns the full path if found. Examples: >>> system.which("ls") "/bin/ls" >>> system.which("/bin/ls") "/bin/ls" >>> system.which("not-a-real-command") None >>> system.which("ls", path=("/usr/bin", "/bin")) "/bin/ls" Arguments: program (str): The name of the program to look for. Can be an absolute path. path (sequence of str, optional): A list of directories to look for the pgoram in. Default value is system $PATH. Returns: str: Full path to program if found, else None. """ # If path is not given, read the $PATH environment variable. path = path or os.environ["PATH"].split(os.pathsep) abspath = True if os.path.split(program)[0] else False if abspath: if fs.isexe(program): return program else: for directory in path: # De-quote directories. directory = directory.strip('"') exe_file = os.path.join(directory, program) if fs.isexe(exe_file): return exe_file return None
[ "def", "which", "(", "program", ",", "path", "=", "None", ")", ":", "# If path is not given, read the $PATH environment variable.", "path", "=", "path", "or", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "os", ".", "pathsep", ")", "abspath", "=", "True", "if", "os", ".", "path", ".", "split", "(", "program", ")", "[", "0", "]", "else", "False", "if", "abspath", ":", "if", "fs", ".", "isexe", "(", "program", ")", ":", "return", "program", "else", ":", "for", "directory", "in", "path", ":", "# De-quote directories.", "directory", "=", "directory", ".", "strip", "(", "'\"'", ")", "exe_file", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "program", ")", "if", "fs", ".", "isexe", "(", "exe_file", ")", ":", "return", "exe_file", "return", "None" ]
Returns the full path of shell commands. Replicates the functionality of system which (1) command. Looks for the named program in the directories indicated in the $PATH environment variable, and returns the full path if found. Examples: >>> system.which("ls") "/bin/ls" >>> system.which("/bin/ls") "/bin/ls" >>> system.which("not-a-real-command") None >>> system.which("ls", path=("/usr/bin", "/bin")) "/bin/ls" Arguments: program (str): The name of the program to look for. Can be an absolute path. path (sequence of str, optional): A list of directories to look for the pgoram in. Default value is system $PATH. Returns: str: Full path to program if found, else None.
[ "Returns", "the", "full", "path", "of", "shell", "commands", "." ]
python
train
27.6875
inonit/drf-haystack
drf_haystack/serializers.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L470-L477
def get_document_field(instance): """ Returns which field the search index has marked as it's `document=True` field. """ for name, field in instance.searchindex.fields.items(): if field.document is True: return name
[ "def", "get_document_field", "(", "instance", ")", ":", "for", "name", ",", "field", "in", "instance", ".", "searchindex", ".", "fields", ".", "items", "(", ")", ":", "if", "field", ".", "document", "is", "True", ":", "return", "name" ]
Returns which field the search index has marked as it's `document=True` field.
[ "Returns", "which", "field", "the", "search", "index", "has", "marked", "as", "it", "s", "document", "=", "True", "field", "." ]
python
train
34.5
secdev/scapy
scapy/layers/tls/cert.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/cert.py#L80-L91
def pem2der(pem_string): """Convert PEM string to DER format""" # Encode all lines between the first '-----\n' and the 2nd-to-last '-----'. pem_string = pem_string.replace(b"\r", b"") first_idx = pem_string.find(b"-----\n") + 6 if pem_string.find(b"-----BEGIN", first_idx) != -1: raise Exception("pem2der() expects only one PEM-encoded object") last_idx = pem_string.rfind(b"-----", 0, pem_string.rfind(b"-----")) base64_string = pem_string[first_idx:last_idx] base64_string.replace(b"\n", b"") der_string = base64.b64decode(base64_string) return der_string
[ "def", "pem2der", "(", "pem_string", ")", ":", "# Encode all lines between the first '-----\\n' and the 2nd-to-last '-----'.", "pem_string", "=", "pem_string", ".", "replace", "(", "b\"\\r\"", ",", "b\"\"", ")", "first_idx", "=", "pem_string", ".", "find", "(", "b\"-----\\n\"", ")", "+", "6", "if", "pem_string", ".", "find", "(", "b\"-----BEGIN\"", ",", "first_idx", ")", "!=", "-", "1", ":", "raise", "Exception", "(", "\"pem2der() expects only one PEM-encoded object\"", ")", "last_idx", "=", "pem_string", ".", "rfind", "(", "b\"-----\"", ",", "0", ",", "pem_string", ".", "rfind", "(", "b\"-----\"", ")", ")", "base64_string", "=", "pem_string", "[", "first_idx", ":", "last_idx", "]", "base64_string", ".", "replace", "(", "b\"\\n\"", ",", "b\"\"", ")", "der_string", "=", "base64", ".", "b64decode", "(", "base64_string", ")", "return", "der_string" ]
Convert PEM string to DER format
[ "Convert", "PEM", "string", "to", "DER", "format" ]
python
train
49.5
saltstack/salt
salt/cloud/clouds/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L1645-L1661
def list_datastores_full(kwargs=None, call=None): ''' List all the datastores for this VMware environment, with extra information CLI Example: .. code-block:: bash salt-cloud -f list_datastores_full my-vmware-config ''' if call != 'function': raise SaltCloudSystemExit( 'The list_datastores_full function must be called with ' '-f or --function.' ) return {'Datastores': salt.utils.vmware.list_datastores_full(_get_si())}
[ "def", "list_datastores_full", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_datastores_full function must be called with '", "'-f or --function.'", ")", "return", "{", "'Datastores'", ":", "salt", ".", "utils", ".", "vmware", ".", "list_datastores_full", "(", "_get_si", "(", ")", ")", "}" ]
List all the datastores for this VMware environment, with extra information CLI Example: .. code-block:: bash salt-cloud -f list_datastores_full my-vmware-config
[ "List", "all", "the", "datastores", "for", "this", "VMware", "environment", "with", "extra", "information" ]
python
train
28.529412
quantumlib/Cirq
cirq/google/engine/engine.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/engine/engine.py#L460-L470
def cancel_job(self, job_resource_name: str): """Cancels the given job. See also the cancel method on EngineJob. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. """ self.service.projects().programs().jobs().cancel( name=job_resource_name, body={}).execute()
[ "def", "cancel_job", "(", "self", ",", "job_resource_name", ":", "str", ")", ":", "self", ".", "service", ".", "projects", "(", ")", ".", "programs", "(", ")", ".", "jobs", "(", ")", ".", "cancel", "(", "name", "=", "job_resource_name", ",", "body", "=", "{", "}", ")", ".", "execute", "(", ")" ]
Cancels the given job. See also the cancel method on EngineJob. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`.
[ "Cancels", "the", "given", "job", "." ]
python
train
34.909091
erikdejonge/pyprofiler
main_profile.py
https://github.com/erikdejonge/pyprofiler/blob/e32dfcfeb5f1340d4274e7a683c1c5b414595c0e/main_profile.py#L85-L94
def get_print_list(): """ get_print_list """ profiler = start_profile() meth1() meth2() meth3() meth4() return end_profile(profiler, returnvalue=True)
[ "def", "get_print_list", "(", ")", ":", "profiler", "=", "start_profile", "(", ")", "meth1", "(", ")", "meth2", "(", ")", "meth3", "(", ")", "meth4", "(", ")", "return", "end_profile", "(", "profiler", ",", "returnvalue", "=", "True", ")" ]
get_print_list
[ "get_print_list" ]
python
train
17.7
maas/python-libmaas
maas/client/viscera/bcache_cache_sets.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/bcache_cache_sets.py#L73-L103
async def create( cls, node: Union[Node, str], cache_device: Union[BlockDevice, Partition]): """ Create a BcacheCacheSet on a Node. :param node: Node to create the interface on. :type node: `Node` or `str` :param cache_device: Block device or partition to create the cache set on. :type cache_device: `BlockDevice` or `Partition` """ params = {} if isinstance(node, str): params['system_id'] = node elif isinstance(node, Node): params['system_id'] = node.system_id else: raise TypeError( 'node must be a Node or str, not %s' % ( type(node).__name__)) if isinstance(cache_device, BlockDevice): params['cache_device'] = cache_device.id elif isinstance(cache_device, Partition): params['cache_partition'] = cache_device.id else: raise TypeError( 'cache_device must be a BlockDevice or Partition, not %s' % ( type(cache_device).__name__)) return cls._object(await cls._handler.create(**params))
[ "async", "def", "create", "(", "cls", ",", "node", ":", "Union", "[", "Node", ",", "str", "]", ",", "cache_device", ":", "Union", "[", "BlockDevice", ",", "Partition", "]", ")", ":", "params", "=", "{", "}", "if", "isinstance", "(", "node", ",", "str", ")", ":", "params", "[", "'system_id'", "]", "=", "node", "elif", "isinstance", "(", "node", ",", "Node", ")", ":", "params", "[", "'system_id'", "]", "=", "node", ".", "system_id", "else", ":", "raise", "TypeError", "(", "'node must be a Node or str, not %s'", "%", "(", "type", "(", "node", ")", ".", "__name__", ")", ")", "if", "isinstance", "(", "cache_device", ",", "BlockDevice", ")", ":", "params", "[", "'cache_device'", "]", "=", "cache_device", ".", "id", "elif", "isinstance", "(", "cache_device", ",", "Partition", ")", ":", "params", "[", "'cache_partition'", "]", "=", "cache_device", ".", "id", "else", ":", "raise", "TypeError", "(", "'cache_device must be a BlockDevice or Partition, not %s'", "%", "(", "type", "(", "cache_device", ")", ".", "__name__", ")", ")", "return", "cls", ".", "_object", "(", "await", "cls", ".", "_handler", ".", "create", "(", "*", "*", "params", ")", ")" ]
Create a BcacheCacheSet on a Node. :param node: Node to create the interface on. :type node: `Node` or `str` :param cache_device: Block device or partition to create the cache set on. :type cache_device: `BlockDevice` or `Partition`
[ "Create", "a", "BcacheCacheSet", "on", "a", "Node", "." ]
python
train
37.483871
sentinel-hub/eo-learn
core/eolearn/core/constants.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/constants.py#L88-L101
def ndim(self): """If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays.""" if self.is_raster(): return { FeatureType.DATA: 4, FeatureType.MASK: 4, FeatureType.SCALAR: 2, FeatureType.LABEL: 2, FeatureType.DATA_TIMELESS: 3, FeatureType.MASK_TIMELESS: 3, FeatureType.SCALAR_TIMELESS: 1, FeatureType.LABEL_TIMELESS: 1 }[self] return None
[ "def", "ndim", "(", "self", ")", ":", "if", "self", ".", "is_raster", "(", ")", ":", "return", "{", "FeatureType", ".", "DATA", ":", "4", ",", "FeatureType", ".", "MASK", ":", "4", ",", "FeatureType", ".", "SCALAR", ":", "2", ",", "FeatureType", ".", "LABEL", ":", "2", ",", "FeatureType", ".", "DATA_TIMELESS", ":", "3", ",", "FeatureType", ".", "MASK_TIMELESS", ":", "3", ",", "FeatureType", ".", "SCALAR_TIMELESS", ":", "1", ",", "FeatureType", ".", "LABEL_TIMELESS", ":", "1", "}", "[", "self", "]", "return", "None" ]
If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays.
[ "If", "given", "FeatureType", "stores", "a", "dictionary", "of", "numpy", ".", "ndarrays", "it", "returns", "dimensions", "of", "such", "arrays", "." ]
python
train
38.571429
fictorial/filesysdb
filesysdb/__init__.py
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L141-L148
def each_object_id(collection): """Yields each object ID in the given ``collection``. The objects are not loaded.""" c_path = collection_path(collection) paths = glob('%s/*.%s' % (c_path, _ext)) for path in paths: match = regex.match(r'.+/(.+)\.%s$' % _ext, path) yield match.groups()[0]
[ "def", "each_object_id", "(", "collection", ")", ":", "c_path", "=", "collection_path", "(", "collection", ")", "paths", "=", "glob", "(", "'%s/*.%s'", "%", "(", "c_path", ",", "_ext", ")", ")", "for", "path", "in", "paths", ":", "match", "=", "regex", ".", "match", "(", "r'.+/(.+)\\.%s$'", "%", "_ext", ",", "path", ")", "yield", "match", ".", "groups", "(", ")", "[", "0", "]" ]
Yields each object ID in the given ``collection``. The objects are not loaded.
[ "Yields", "each", "object", "ID", "in", "the", "given", "collection", ".", "The", "objects", "are", "not", "loaded", "." ]
python
train
39.5
onnx/onnxmltools
onnxutils/onnxconverter_common/onnx_ops.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/onnx_ops.py#L128-L160
def apply_cast(scope, input_name, output_name, container, operator_name=None, to=None): ''' :param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64. ''' name = _create_name_or_use_existing_one(scope, 'Cast', operator_name) attrs = {'name': name} d = onnx_proto.TensorProto.DataType.DESCRIPTOR allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()} if to not in allowed_type_name_and_type_enum_pairs: raise ValueError('Attribute "to" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys()) if container.target_opset < 9: if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]: raise ValueError('Attribute "to" cannot correspond to a String or Complex TensorProto type.') if container.target_opset < 6: # Convert enum to string, for example, TensorProto.INT64 to 'INT64' attrs['to'] = allowed_type_name_and_type_enum_pairs[to] op_version = 1 else: # Enum, for example, TensorProto.INT64 attrs['to'] = to op_version = 6 else: # Enum value, for example, TensorProto.INT64 # String casting is supported in opset 9 if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]: raise ValueError('Attribute "to" cannot correspond to a Complex TensorProto type.') attrs['to'] = to op_version = 9 container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs)
[ "def", "apply_cast", "(", "scope", ",", "input_name", ",", "output_name", ",", "container", ",", "operator_name", "=", "None", ",", "to", "=", "None", ")", ":", "name", "=", "_create_name_or_use_existing_one", "(", "scope", ",", "'Cast'", ",", "operator_name", ")", "attrs", "=", "{", "'name'", ":", "name", "}", "d", "=", "onnx_proto", ".", "TensorProto", ".", "DataType", ".", "DESCRIPTOR", "allowed_type_name_and_type_enum_pairs", "=", "{", "v", ".", "number", ":", "k", "for", "k", ",", "v", "in", "d", ".", "values_by_name", ".", "items", "(", ")", "}", "if", "to", "not", "in", "allowed_type_name_and_type_enum_pairs", ":", "raise", "ValueError", "(", "'Attribute \"to\" must be one of %s'", "%", "allowed_type_name_and_type_enum_pairs", ".", "keys", "(", ")", ")", "if", "container", ".", "target_opset", "<", "9", ":", "if", "to", "in", "[", "onnx_proto", ".", "TensorProto", ".", "STRING", ",", "onnx_proto", ".", "TensorProto", ".", "COMPLEX64", ",", "onnx_proto", ".", "TensorProto", ".", "COMPLEX128", "]", ":", "raise", "ValueError", "(", "'Attribute \"to\" cannot correspond to a String or Complex TensorProto type.'", ")", "if", "container", ".", "target_opset", "<", "6", ":", "# Convert enum to string, for example, TensorProto.INT64 to 'INT64'", "attrs", "[", "'to'", "]", "=", "allowed_type_name_and_type_enum_pairs", "[", "to", "]", "op_version", "=", "1", "else", ":", "# Enum, for example, TensorProto.INT64", "attrs", "[", "'to'", "]", "=", "to", "op_version", "=", "6", "else", ":", "# Enum value, for example, TensorProto.INT64", "# String casting is supported in opset 9", "if", "to", "in", "[", "onnx_proto", ".", "TensorProto", ".", "COMPLEX64", ",", "onnx_proto", ".", "TensorProto", ".", "COMPLEX128", "]", ":", "raise", "ValueError", "(", "'Attribute \"to\" cannot correspond to a Complex TensorProto type.'", ")", "attrs", "[", "'to'", "]", "=", "to", "op_version", "=", "9", "container", ".", "add_node", "(", "'Cast'", ",", "input_name", ",", "output_name", ",", "op_version", "=", "op_version", ",", "*", "*", "attrs", ")" ]
:param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
[ ":", "param", "to", ":", "enum", "defined", "in", "ONNX", "TensorProto", ".", "DataType", "for", "example", "TensorProto", ".", "FLOAT", "and", "TensorProto", ".", "INT64", "." ]
python
train
49.242424
tus/tus-py-client
tusclient/uploader.py
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L148-L154
def headers_as_list(self): """ Does the same as 'headers' except it is returned as a list. """ headers = self.headers headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)] return headers_list
[ "def", "headers_as_list", "(", "self", ")", ":", "headers", "=", "self", ".", "headers", "headers_list", "=", "[", "'{}: {}'", ".", "format", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "iteritems", "(", "headers", ")", "]", "return", "headers_list" ]
Does the same as 'headers' except it is returned as a list.
[ "Does", "the", "same", "as", "headers", "except", "it", "is", "returned", "as", "a", "list", "." ]
python
train
37.285714
google/python-gflags
gflags/flag.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flag.py#L359-L386
def parse(self, arguments): """Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item. """ if not isinstance(arguments, list): # Default value may be a list of values. Most other arguments # will not be, so convert them into a single-item list to make # processing simpler below. arguments = [arguments] if self.present: # keep a backup reference to list of previously supplied option values values = self.value else: # "erase" the defaults with an empty list values = [] for item in arguments: # have Flag superclass parse argument, overwriting self.value reference Flag.Parse(self, item) # also increments self.present values.append(self.value) # put list of option values back in the 'value' attribute self.value = values
[ "def", "parse", "(", "self", ",", "arguments", ")", ":", "if", "not", "isinstance", "(", "arguments", ",", "list", ")", ":", "# Default value may be a list of values. Most other arguments", "# will not be, so convert them into a single-item list to make", "# processing simpler below.", "arguments", "=", "[", "arguments", "]", "if", "self", ".", "present", ":", "# keep a backup reference to list of previously supplied option values", "values", "=", "self", ".", "value", "else", ":", "# \"erase\" the defaults with an empty list", "values", "=", "[", "]", "for", "item", "in", "arguments", ":", "# have Flag superclass parse argument, overwriting self.value reference", "Flag", ".", "Parse", "(", "self", ",", "item", ")", "# also increments self.present", "values", ".", "append", "(", "self", ".", "value", ")", "# put list of option values back in the 'value' attribute", "self", ".", "value", "=", "values" ]
Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item.
[ "Parses", "one", "or", "more", "arguments", "with", "the", "installed", "parser", "." ]
python
train
35.642857
ib-lundgren/flask-oauthprovider
flask_oauthprovider.py
https://github.com/ib-lundgren/flask-oauthprovider/blob/6c91e8c11fc3cee410cb755d52d9d2c5331ee324/flask_oauthprovider.py#L219-L228
def authorized(self, request_token): """Create a verifier for an user authorized client""" verifier = generate_token(length=self.verifier_length[1]) self.save_verifier(request_token, verifier) response = [ (u'oauth_token', request_token), (u'oauth_verifier', verifier) ] callback = self.get_callback(request_token) return redirect(add_params_to_uri(callback, response))
[ "def", "authorized", "(", "self", ",", "request_token", ")", ":", "verifier", "=", "generate_token", "(", "length", "=", "self", ".", "verifier_length", "[", "1", "]", ")", "self", ".", "save_verifier", "(", "request_token", ",", "verifier", ")", "response", "=", "[", "(", "u'oauth_token'", ",", "request_token", ")", ",", "(", "u'oauth_verifier'", ",", "verifier", ")", "]", "callback", "=", "self", ".", "get_callback", "(", "request_token", ")", "return", "redirect", "(", "add_params_to_uri", "(", "callback", ",", "response", ")", ")" ]
Create a verifier for an user authorized client
[ "Create", "a", "verifier", "for", "an", "user", "authorized", "client" ]
python
train
44
google/grr
grr/server/grr_response_server/databases/mem_hunts.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_hunts.py#L261-L291
def ReadHuntCounters(self, hunt_id): """Reads hunt counters.""" num_clients = self.CountHuntFlows(hunt_id) num_successful_clients = self.CountHuntFlows( hunt_id, filter_condition=db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY) num_failed_clients = self.CountHuntFlows( hunt_id, filter_condition=db.HuntFlowsCondition.FAILED_FLOWS_ONLY) num_clients_with_results = len( set(r[0].client_id for r in self.flow_results.values() if r and r[0].hunt_id == hunt_id)) num_crashed_clients = self.CountHuntFlows( hunt_id, filter_condition=db.HuntFlowsCondition.CRASHED_FLOWS_ONLY) num_results = self.CountHuntResults(hunt_id) total_cpu_seconds = 0 total_network_bytes_sent = 0 for f in self.ReadHuntFlows(hunt_id, 0, sys.maxsize): total_cpu_seconds += ( f.cpu_time_used.user_cpu_time + f.cpu_time_used.system_cpu_time) total_network_bytes_sent += f.network_bytes_sent return db.HuntCounters( num_clients=num_clients, num_successful_clients=num_successful_clients, num_failed_clients=num_failed_clients, num_clients_with_results=num_clients_with_results, num_crashed_clients=num_crashed_clients, num_results=num_results, total_cpu_seconds=total_cpu_seconds, total_network_bytes_sent=total_network_bytes_sent)
[ "def", "ReadHuntCounters", "(", "self", ",", "hunt_id", ")", ":", "num_clients", "=", "self", ".", "CountHuntFlows", "(", "hunt_id", ")", "num_successful_clients", "=", "self", ".", "CountHuntFlows", "(", "hunt_id", ",", "filter_condition", "=", "db", ".", "HuntFlowsCondition", ".", "SUCCEEDED_FLOWS_ONLY", ")", "num_failed_clients", "=", "self", ".", "CountHuntFlows", "(", "hunt_id", ",", "filter_condition", "=", "db", ".", "HuntFlowsCondition", ".", "FAILED_FLOWS_ONLY", ")", "num_clients_with_results", "=", "len", "(", "set", "(", "r", "[", "0", "]", ".", "client_id", "for", "r", "in", "self", ".", "flow_results", ".", "values", "(", ")", "if", "r", "and", "r", "[", "0", "]", ".", "hunt_id", "==", "hunt_id", ")", ")", "num_crashed_clients", "=", "self", ".", "CountHuntFlows", "(", "hunt_id", ",", "filter_condition", "=", "db", ".", "HuntFlowsCondition", ".", "CRASHED_FLOWS_ONLY", ")", "num_results", "=", "self", ".", "CountHuntResults", "(", "hunt_id", ")", "total_cpu_seconds", "=", "0", "total_network_bytes_sent", "=", "0", "for", "f", "in", "self", ".", "ReadHuntFlows", "(", "hunt_id", ",", "0", ",", "sys", ".", "maxsize", ")", ":", "total_cpu_seconds", "+=", "(", "f", ".", "cpu_time_used", ".", "user_cpu_time", "+", "f", ".", "cpu_time_used", ".", "system_cpu_time", ")", "total_network_bytes_sent", "+=", "f", ".", "network_bytes_sent", "return", "db", ".", "HuntCounters", "(", "num_clients", "=", "num_clients", ",", "num_successful_clients", "=", "num_successful_clients", ",", "num_failed_clients", "=", "num_failed_clients", ",", "num_clients_with_results", "=", "num_clients_with_results", ",", "num_crashed_clients", "=", "num_crashed_clients", ",", "num_results", "=", "num_results", ",", "total_cpu_seconds", "=", "total_cpu_seconds", ",", "total_network_bytes_sent", "=", "total_network_bytes_sent", ")" ]
Reads hunt counters.
[ "Reads", "hunt", "counters", "." ]
python
train
43.516129
tanghaibao/jcvi
jcvi/utils/cbook.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/cbook.py#L473-L484
def tile(lt, width=70, gap=1): """ Pretty print list of items. """ from jcvi.utils.iter import grouper max_len = max(len(x) for x in lt) + gap items_per_line = max(width // max_len, 1) lt = [x.rjust(max_len) for x in lt] g = list(grouper(lt, items_per_line, fillvalue="")) return "\n".join("".join(x) for x in g)
[ "def", "tile", "(", "lt", ",", "width", "=", "70", ",", "gap", "=", "1", ")", ":", "from", "jcvi", ".", "utils", ".", "iter", "import", "grouper", "max_len", "=", "max", "(", "len", "(", "x", ")", "for", "x", "in", "lt", ")", "+", "gap", "items_per_line", "=", "max", "(", "width", "//", "max_len", ",", "1", ")", "lt", "=", "[", "x", ".", "rjust", "(", "max_len", ")", "for", "x", "in", "lt", "]", "g", "=", "list", "(", "grouper", "(", "lt", ",", "items_per_line", ",", "fillvalue", "=", "\"\"", ")", ")", "return", "\"\\n\"", ".", "join", "(", "\"\"", ".", "join", "(", "x", ")", "for", "x", "in", "g", ")" ]
Pretty print list of items.
[ "Pretty", "print", "list", "of", "items", "." ]
python
train
28.25
gitpython-developers/GitPython
git/refs/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/remote.py#L28-L46
def delete(cls, repo, *refs, **kwargs): """Delete the given remote references :note: kwargs are given for comparability with the base class method as we should not narrow the signature.""" repo.git.branch("-d", "-r", *refs) # the official deletion method will ignore remote symbolic refs - these # are generally ignored in the refs/ folder. We don't though # and delete remainders manually for ref in refs: try: os.remove(osp.join(repo.common_dir, ref.path)) except OSError: pass try: os.remove(osp.join(repo.git_dir, ref.path)) except OSError: pass
[ "def", "delete", "(", "cls", ",", "repo", ",", "*", "refs", ",", "*", "*", "kwargs", ")", ":", "repo", ".", "git", ".", "branch", "(", "\"-d\"", ",", "\"-r\"", ",", "*", "refs", ")", "# the official deletion method will ignore remote symbolic refs - these", "# are generally ignored in the refs/ folder. We don't though", "# and delete remainders manually", "for", "ref", "in", "refs", ":", "try", ":", "os", ".", "remove", "(", "osp", ".", "join", "(", "repo", ".", "common_dir", ",", "ref", ".", "path", ")", ")", "except", "OSError", ":", "pass", "try", ":", "os", ".", "remove", "(", "osp", ".", "join", "(", "repo", ".", "git_dir", ",", "ref", ".", "path", ")", ")", "except", "OSError", ":", "pass" ]
Delete the given remote references :note: kwargs are given for comparability with the base class method as we should not narrow the signature.
[ "Delete", "the", "given", "remote", "references" ]
python
train
38.105263
spyder-ide/spyder
spyder/plugins/explorer/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L729-L756
def move(self, fnames=None, directory=None): """Move files/directories""" if fnames is None: fnames = self.get_selected_filenames() orig = fixpath(osp.dirname(fnames[0])) while True: self.redirect_stdio.emit(False) if directory is None: folder = getexistingdirectory(self, _("Select directory"), orig) else: folder = directory self.redirect_stdio.emit(True) if folder: folder = fixpath(folder) if folder != orig: break else: return for fname in fnames: basename = osp.basename(fname) try: misc.move_file(fname, osp.join(folder, basename)) except EnvironmentError as error: QMessageBox.critical(self, _("Error"), _("<b>Unable to move <i>%s</i></b>" "<br><br>Error message:<br>%s" ) % (basename, to_text_string(error)))
[ "def", "move", "(", "self", ",", "fnames", "=", "None", ",", "directory", "=", "None", ")", ":", "if", "fnames", "is", "None", ":", "fnames", "=", "self", ".", "get_selected_filenames", "(", ")", "orig", "=", "fixpath", "(", "osp", ".", "dirname", "(", "fnames", "[", "0", "]", ")", ")", "while", "True", ":", "self", ".", "redirect_stdio", ".", "emit", "(", "False", ")", "if", "directory", "is", "None", ":", "folder", "=", "getexistingdirectory", "(", "self", ",", "_", "(", "\"Select directory\"", ")", ",", "orig", ")", "else", ":", "folder", "=", "directory", "self", ".", "redirect_stdio", ".", "emit", "(", "True", ")", "if", "folder", ":", "folder", "=", "fixpath", "(", "folder", ")", "if", "folder", "!=", "orig", ":", "break", "else", ":", "return", "for", "fname", "in", "fnames", ":", "basename", "=", "osp", ".", "basename", "(", "fname", ")", "try", ":", "misc", ".", "move_file", "(", "fname", ",", "osp", ".", "join", "(", "folder", ",", "basename", ")", ")", "except", "EnvironmentError", "as", "error", ":", "QMessageBox", ".", "critical", "(", "self", ",", "_", "(", "\"Error\"", ")", ",", "_", "(", "\"<b>Unable to move <i>%s</i></b>\"", "\"<br><br>Error message:<br>%s\"", ")", "%", "(", "basename", ",", "to_text_string", "(", "error", ")", ")", ")" ]
Move files/directories
[ "Move", "files", "/", "directories" ]
python
train
41.785714
aleju/imgaug
imgaug/augmenters/convolutional.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/convolutional.py#L296-L378
def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=None): """ Augmenter that embosses images and overlays the result with the original image. The embossed version pronounces highlights and shadows, letting the image look as if it was recreated on a metal plate ("embossed"). dtype support:: See ``imgaug.augmenters.convolutional.Convolve``. Parameters ---------- alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Visibility of the sharpened image. At 0, only the original image is visible, at 1.0 only its sharpened version is visible. * If an int or float, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Parameter that controls the strength of the embossing. Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard embossing effect. Default value is 1. * If an int or float, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5)) embosses an image with a variable strength in the range ``0.5 <= x <= 1.5`` and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0`` over the old image. """ alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True) strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True) def create_matrices(image, nb_channels, random_state_func): alpha_sample = alpha_param.draw_sample(random_state=random_state_func) ia.do_assert(0 <= alpha_sample <= 1.0) strength_sample = strength_param.draw_sample(random_state=random_state_func) matrix_nochange = np.array([ [0, 0, 0], [0, 1, 0], [0, 0, 0] ], dtype=np.float32) matrix_effect = np.array([ [-1-strength_sample, 0-strength_sample, 0], [0-strength_sample, 1, 0+strength_sample], [0, 0+strength_sample, 1+strength_sample] ], dtype=np.float32) matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect return [matrix] * nb_channels if name is None: name = "Unnamed%s" % (ia.caller_name(),) return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)
[ "def", "Emboss", "(", "alpha", "=", "0", ",", "strength", "=", "1", ",", "name", "=", "None", ",", "deterministic", "=", "False", ",", "random_state", "=", "None", ")", ":", "alpha_param", "=", "iap", ".", "handle_continuous_param", "(", "alpha", ",", "\"alpha\"", ",", "value_range", "=", "(", "0", ",", "1.0", ")", ",", "tuple_to_uniform", "=", "True", ",", "list_to_choice", "=", "True", ")", "strength_param", "=", "iap", ".", "handle_continuous_param", "(", "strength", ",", "\"strength\"", ",", "value_range", "=", "(", "0", ",", "None", ")", ",", "tuple_to_uniform", "=", "True", ",", "list_to_choice", "=", "True", ")", "def", "create_matrices", "(", "image", ",", "nb_channels", ",", "random_state_func", ")", ":", "alpha_sample", "=", "alpha_param", ".", "draw_sample", "(", "random_state", "=", "random_state_func", ")", "ia", ".", "do_assert", "(", "0", "<=", "alpha_sample", "<=", "1.0", ")", "strength_sample", "=", "strength_param", ".", "draw_sample", "(", "random_state", "=", "random_state_func", ")", "matrix_nochange", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", ",", "0", "]", ",", "[", "0", ",", "1", ",", "0", "]", ",", "[", "0", ",", "0", ",", "0", "]", "]", ",", "dtype", "=", "np", ".", "float32", ")", "matrix_effect", "=", "np", ".", "array", "(", "[", "[", "-", "1", "-", "strength_sample", ",", "0", "-", "strength_sample", ",", "0", "]", ",", "[", "0", "-", "strength_sample", ",", "1", ",", "0", "+", "strength_sample", "]", ",", "[", "0", ",", "0", "+", "strength_sample", ",", "1", "+", "strength_sample", "]", "]", ",", "dtype", "=", "np", ".", "float32", ")", "matrix", "=", "(", "1", "-", "alpha_sample", ")", "*", "matrix_nochange", "+", "alpha_sample", "*", "matrix_effect", "return", "[", "matrix", "]", "*", "nb_channels", "if", "name", "is", "None", ":", "name", "=", "\"Unnamed%s\"", "%", "(", "ia", ".", "caller_name", "(", ")", ",", ")", "return", "Convolve", "(", "create_matrices", ",", "name", "=", "name", ",", "deterministic", "=", "deterministic", ",", "random_state", "=", "random_state", ")" ]
Augmenter that embosses images and overlays the result with the original image. The embossed version pronounces highlights and shadows, letting the image look as if it was recreated on a metal plate ("embossed"). dtype support:: See ``imgaug.augmenters.convolutional.Convolve``. Parameters ---------- alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Visibility of the sharpened image. At 0, only the original image is visible, at 1.0 only its sharpened version is visible. * If an int or float, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Parameter that controls the strength of the embossing. Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard embossing effect. Default value is 1. * If an int or float, exactly that value will be used. * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will be sampled per image. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, a value will be sampled from the parameter per image. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5)) embosses an image with a variable strength in the range ``0.5 <= x <= 1.5`` and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0`` over the old image.
[ "Augmenter", "that", "embosses", "images", "and", "overlays", "the", "result", "with", "the", "original", "image", "." ]
python
valid
43.518072
google/python_portpicker
src/portserver.py
https://github.com/google/python_portpicker/blob/f737189ea7a2d4b97048a2f4e37609e293b03546/src/portserver.py#L199-L205
def add_port_to_free_pool(self, port): """Add a new port to the free pool for allocation.""" if port < 1 or port > 65535: raise ValueError( 'Port must be in the [1, 65535] range, not %d.' % port) port_info = _PortInfo(port=port) self._port_queue.append(port_info)
[ "def", "add_port_to_free_pool", "(", "self", ",", "port", ")", ":", "if", "port", "<", "1", "or", "port", ">", "65535", ":", "raise", "ValueError", "(", "'Port must be in the [1, 65535] range, not %d.'", "%", "port", ")", "port_info", "=", "_PortInfo", "(", "port", "=", "port", ")", "self", ".", "_port_queue", ".", "append", "(", "port_info", ")" ]
Add a new port to the free pool for allocation.
[ "Add", "a", "new", "port", "to", "the", "free", "pool", "for", "allocation", "." ]
python
train
45.285714
openstax/cnx-epub
cnxepub/html_parsers.py
https://github.com/openstax/cnx-epub/blob/f648a309eff551b0a68a115a98ddf7858149a2ea/cnxepub/html_parsers.py#L22-L39
def parse_navigation_html_to_tree(html, id): """Parse the given ``html`` (an etree object) to a tree. The ``id`` is required in order to assign the top-level tree id value. """ def xpath(x): return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES) try: value = xpath('//*[@data-type="binding"]/@data-value')[0] is_translucent = value == 'translucent' except IndexError: is_translucent = False if is_translucent: id = TRANSLUCENT_BINDER_ID tree = {'id': id, 'title': xpath('//*[@data-type="document-title"]/text()')[0], 'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])] } return tree
[ "def", "parse_navigation_html_to_tree", "(", "html", ",", "id", ")", ":", "def", "xpath", "(", "x", ")", ":", "return", "html", ".", "xpath", "(", "x", ",", "namespaces", "=", "HTML_DOCUMENT_NAMESPACES", ")", "try", ":", "value", "=", "xpath", "(", "'//*[@data-type=\"binding\"]/@data-value'", ")", "[", "0", "]", "is_translucent", "=", "value", "==", "'translucent'", "except", "IndexError", ":", "is_translucent", "=", "False", "if", "is_translucent", ":", "id", "=", "TRANSLUCENT_BINDER_ID", "tree", "=", "{", "'id'", ":", "id", ",", "'title'", ":", "xpath", "(", "'//*[@data-type=\"document-title\"]/text()'", ")", "[", "0", "]", ",", "'contents'", ":", "[", "x", "for", "x", "in", "_nav_to_tree", "(", "xpath", "(", "'//xhtml:nav'", ")", "[", "0", "]", ")", "]", "}", "return", "tree" ]
Parse the given ``html`` (an etree object) to a tree. The ``id`` is required in order to assign the top-level tree id value.
[ "Parse", "the", "given", "html", "(", "an", "etree", "object", ")", "to", "a", "tree", ".", "The", "id", "is", "required", "in", "order", "to", "assign", "the", "top", "-", "level", "tree", "id", "value", "." ]
python
train
38.388889
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L1157-L1171
def quaternion_about_axis(angle, axis): """Return quaternion for rotation about axis. >>> q = quaternion_about_axis(0.123, (1, 0, 0)) >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) True """ quaternion = numpy.zeros((4, ), dtype=numpy.float64) quaternion[:3] = axis[:3] qlen = vector_norm(quaternion) if qlen > _EPS: quaternion *= math.sin(angle/2.0) / qlen quaternion[3] = math.cos(angle/2.0) return quaternion
[ "def", "quaternion_about_axis", "(", "angle", ",", "axis", ")", ":", "quaternion", "=", "numpy", ".", "zeros", "(", "(", "4", ",", ")", ",", "dtype", "=", "numpy", ".", "float64", ")", "quaternion", "[", ":", "3", "]", "=", "axis", "[", ":", "3", "]", "qlen", "=", "vector_norm", "(", "quaternion", ")", "if", "qlen", ">", "_EPS", ":", "quaternion", "*=", "math", ".", "sin", "(", "angle", "/", "2.0", ")", "/", "qlen", "quaternion", "[", "3", "]", "=", "math", ".", "cos", "(", "angle", "/", "2.0", ")", "return", "quaternion" ]
Return quaternion for rotation about axis. >>> q = quaternion_about_axis(0.123, (1, 0, 0)) >>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947]) True
[ "Return", "quaternion", "for", "rotation", "about", "axis", "." ]
python
train
30.466667
mbedmicro/pyOCD
pyocd/core/coresight_target.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/core/coresight_target.py#L225-L232
def check_for_cores(self): """! @brief Init task: verify that at least one core was discovered.""" if not len(self.cores): # Allow the user to override the exception to enable uses like chip bringup. if self.session.options.get('allow_no_cores', False): logging.error("No cores were discovered!") else: raise exceptions.DebugError("No cores were discovered!")
[ "def", "check_for_cores", "(", "self", ")", ":", "if", "not", "len", "(", "self", ".", "cores", ")", ":", "# Allow the user to override the exception to enable uses like chip bringup.", "if", "self", ".", "session", ".", "options", ".", "get", "(", "'allow_no_cores'", ",", "False", ")", ":", "logging", ".", "error", "(", "\"No cores were discovered!\"", ")", "else", ":", "raise", "exceptions", ".", "DebugError", "(", "\"No cores were discovered!\"", ")" ]
! @brief Init task: verify that at least one core was discovered.
[ "!" ]
python
train
54.5
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L769-L781
def src_builder(self): """Fetch the source code builder for this node. If there isn't one, we cache the source code builder specified for the directory (which in turn will cache the value from its parent directory, and so on up to the file system root). """ try: scb = self.sbuilder except AttributeError: scb = self.dir.src_builder() self.sbuilder = scb return scb
[ "def", "src_builder", "(", "self", ")", ":", "try", ":", "scb", "=", "self", ".", "sbuilder", "except", "AttributeError", ":", "scb", "=", "self", ".", "dir", ".", "src_builder", "(", ")", "self", ".", "sbuilder", "=", "scb", "return", "scb" ]
Fetch the source code builder for this node. If there isn't one, we cache the source code builder specified for the directory (which in turn will cache the value from its parent directory, and so on up to the file system root).
[ "Fetch", "the", "source", "code", "builder", "for", "this", "node", "." ]
python
train
34.923077
markovmodel/PyEMMA
pyemma/util/contexts.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/contexts.py#L51-L66
def random_seed(seed=42): """ sets the random seed of Python within the context. Example ------- >>> import random >>> with random_seed(seed=0): ... random.randint(0, 1000) # doctest: +SKIP 864 """ old_state = random.getstate() random.seed(seed) try: yield finally: random.setstate(old_state)
[ "def", "random_seed", "(", "seed", "=", "42", ")", ":", "old_state", "=", "random", ".", "getstate", "(", ")", "random", ".", "seed", "(", "seed", ")", "try", ":", "yield", "finally", ":", "random", ".", "setstate", "(", "old_state", ")" ]
sets the random seed of Python within the context. Example ------- >>> import random >>> with random_seed(seed=0): ... random.randint(0, 1000) # doctest: +SKIP 864
[ "sets", "the", "random", "seed", "of", "Python", "within", "the", "context", "." ]
python
train
21.5625
twilio/twilio-python
twilio/rest/api/v2010/account/conference/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/conference/__init__.py#L240-L249
def get_instance(self, payload): """ Build an instance of ConferenceInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.conference.ConferenceInstance :rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance """ return ConferenceInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "ConferenceInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of ConferenceInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.conference.ConferenceInstance :rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
[ "Build", "an", "instance", "of", "ConferenceInstance" ]
python
train
41.3
tensorflow/probability
tensorflow_probability/python/distributions/von_mises_fisher.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L358-L385
def _sample_3d(self, n, seed=None): """Specialized inversion sampler for 3D.""" seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d') u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0) z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype) # TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could # be bisected for bounded sampling runtime (i.e. not rejection sampling). # [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/ # The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa # We must protect against both kappa and z being zero. safe_conc = tf.where(self.concentration > 0, self.concentration, tf.ones_like(self.concentration)) safe_z = tf.where(z > 0, z, tf.ones_like(z)) safe_u = 1 + tf.reduce_logsumexp( input_tensor=[ tf.math.log(safe_z), tf.math.log1p(-safe_z) - 2 * safe_conc ], axis=0) / safe_conc # Limit of the above expression as kappa->0 is 2*z-1 u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u, 2 * z - 1) # Limit of the expression as z->0 is -1. u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u) if not self._allow_nan_stats: u = tf.debugging.check_numerics(u, 'u in _sample_3d') return u[..., tf.newaxis]
[ "def", "_sample_3d", "(", "self", ",", "n", ",", "seed", "=", "None", ")", ":", "seed", "=", "seed_stream", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'von_mises_fisher_3d'", ")", "u_shape", "=", "tf", ".", "concat", "(", "[", "[", "n", "]", ",", "self", ".", "_batch_shape_tensor", "(", ")", "]", ",", "axis", "=", "0", ")", "z", "=", "tf", ".", "random", ".", "uniform", "(", "u_shape", ",", "seed", "=", "seed", "(", ")", ",", "dtype", "=", "self", ".", "dtype", ")", "# TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could", "# be bisected for bounded sampling runtime (i.e. not rejection sampling).", "# [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/", "# The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa", "# We must protect against both kappa and z being zero.", "safe_conc", "=", "tf", ".", "where", "(", "self", ".", "concentration", ">", "0", ",", "self", ".", "concentration", ",", "tf", ".", "ones_like", "(", "self", ".", "concentration", ")", ")", "safe_z", "=", "tf", ".", "where", "(", "z", ">", "0", ",", "z", ",", "tf", ".", "ones_like", "(", "z", ")", ")", "safe_u", "=", "1", "+", "tf", ".", "reduce_logsumexp", "(", "input_tensor", "=", "[", "tf", ".", "math", ".", "log", "(", "safe_z", ")", ",", "tf", ".", "math", ".", "log1p", "(", "-", "safe_z", ")", "-", "2", "*", "safe_conc", "]", ",", "axis", "=", "0", ")", "/", "safe_conc", "# Limit of the above expression as kappa->0 is 2*z-1", "u", "=", "tf", ".", "where", "(", "self", ".", "concentration", ">", "tf", ".", "zeros_like", "(", "safe_u", ")", ",", "safe_u", ",", "2", "*", "z", "-", "1", ")", "# Limit of the expression as z->0 is -1.", "u", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "z", ",", "0", ")", ",", "-", "tf", ".", "ones_like", "(", "u", ")", ",", "u", ")", "if", "not", "self", ".", "_allow_nan_stats", ":", "u", "=", "tf", ".", "debugging", ".", "check_numerics", "(", "u", ",", "'u in _sample_3d'", ")", "return", "u", "[", "...", ",", "tf", ".", "newaxis", "]" ]
Specialized inversion sampler for 3D.
[ "Specialized", "inversion", "sampler", "for", "3D", "." ]
python
test
49.5
PrefPy/prefpy
prefpy/profile.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/profile.py#L261-L284
def importPreflibFile(self, fileName): """ Imports a preflib format file that contains all the information of a Profile. This function will completely override all members of the current Profile object. Currently, we assume that in an election where incomplete ordering are allowed, if a voter ranks only one candidate, then the voter did not prefer any candidates over another. This may lead to some discrepancies when importing and exporting a .toi preflib file or a .soi preflib file. :ivar str fileName: The name of the input file to be imported. """ # Use the functionality found in io to read the file. elecFileObj = open(fileName, 'r') self.candMap, rankMaps, wmgMapsCounts, self.numVoters = prefpy_io.read_election_file(elecFileObj) elecFileObj.close() self.numCands = len(self.candMap.keys()) # Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a # Preference object. self.preferences = [] for i in range(0, len(rankMaps)): wmgMap = self.genWmgMapFromRankMap(rankMaps[i]) self.preferences.append(Preference(wmgMap, wmgMapsCounts[i]))
[ "def", "importPreflibFile", "(", "self", ",", "fileName", ")", ":", "# Use the functionality found in io to read the file.", "elecFileObj", "=", "open", "(", "fileName", ",", "'r'", ")", "self", ".", "candMap", ",", "rankMaps", ",", "wmgMapsCounts", ",", "self", ".", "numVoters", "=", "prefpy_io", ".", "read_election_file", "(", "elecFileObj", ")", "elecFileObj", ".", "close", "(", ")", "self", ".", "numCands", "=", "len", "(", "self", ".", "candMap", ".", "keys", "(", ")", ")", "# Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a", "# Preference object.", "self", ".", "preferences", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "rankMaps", ")", ")", ":", "wmgMap", "=", "self", ".", "genWmgMapFromRankMap", "(", "rankMaps", "[", "i", "]", ")", "self", ".", "preferences", ".", "append", "(", "Preference", "(", "wmgMap", ",", "wmgMapsCounts", "[", "i", "]", ")", ")" ]
Imports a preflib format file that contains all the information of a Profile. This function will completely override all members of the current Profile object. Currently, we assume that in an election where incomplete ordering are allowed, if a voter ranks only one candidate, then the voter did not prefer any candidates over another. This may lead to some discrepancies when importing and exporting a .toi preflib file or a .soi preflib file. :ivar str fileName: The name of the input file to be imported.
[ "Imports", "a", "preflib", "format", "file", "that", "contains", "all", "the", "information", "of", "a", "Profile", ".", "This", "function", "will", "completely", "override", "all", "members", "of", "the", "current", "Profile", "object", ".", "Currently", "we", "assume", "that", "in", "an", "election", "where", "incomplete", "ordering", "are", "allowed", "if", "a", "voter", "ranks", "only", "one", "candidate", "then", "the", "voter", "did", "not", "prefer", "any", "candidates", "over", "another", ".", "This", "may", "lead", "to", "some", "discrepancies", "when", "importing", "and", "exporting", "a", ".", "toi", "preflib", "file", "or", "a", ".", "soi", "preflib", "file", "." ]
python
train
50.875
numenta/htmresearch
htmresearch/frameworks/layers/object_machine_base.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/object_machine_base.py#L132-L145
def randomTraversal(sensations, numTraversals): """ Given a list of sensations, return the SDRs that would be obtained by numTraversals random traversals of that set of sensations. Each sensation is a dict mapping cortical column index to a pair of SDR's (one location and one feature). """ newSensations = [] for _ in range(numTraversals): s = copy.deepcopy(sensations) random.shuffle(s) newSensations += s return newSensations
[ "def", "randomTraversal", "(", "sensations", ",", "numTraversals", ")", ":", "newSensations", "=", "[", "]", "for", "_", "in", "range", "(", "numTraversals", ")", ":", "s", "=", "copy", ".", "deepcopy", "(", "sensations", ")", "random", ".", "shuffle", "(", "s", ")", "newSensations", "+=", "s", "return", "newSensations" ]
Given a list of sensations, return the SDRs that would be obtained by numTraversals random traversals of that set of sensations. Each sensation is a dict mapping cortical column index to a pair of SDR's (one location and one feature).
[ "Given", "a", "list", "of", "sensations", "return", "the", "SDRs", "that", "would", "be", "obtained", "by", "numTraversals", "random", "traversals", "of", "that", "set", "of", "sensations", "." ]
python
train
33.571429
f3at/feat
src/feat/agencies/net/agency.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/net/agency.py#L455-L464
def actually_start_agent(self, descriptor, **kwargs): """ This method will be run only on the master agency. """ factory = IAgentFactory( applications.lookup_agent(descriptor.type_name)) if factory.standalone: return self.start_standalone_agent(descriptor, factory, **kwargs) else: return self.start_agent_locally(descriptor, **kwargs)
[ "def", "actually_start_agent", "(", "self", ",", "descriptor", ",", "*", "*", "kwargs", ")", ":", "factory", "=", "IAgentFactory", "(", "applications", ".", "lookup_agent", "(", "descriptor", ".", "type_name", ")", ")", "if", "factory", ".", "standalone", ":", "return", "self", ".", "start_standalone_agent", "(", "descriptor", ",", "factory", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "start_agent_locally", "(", "descriptor", ",", "*", "*", "kwargs", ")" ]
This method will be run only on the master agency.
[ "This", "method", "will", "be", "run", "only", "on", "the", "master", "agency", "." ]
python
train
41
eaton-lab/toytree
toytree/Multitree.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Multitree.py#L407-L418
def hash_trees(self): "hash ladderized tree topologies" observed = {} for idx, tree in enumerate(self.treelist): nwk = tree.write(tree_format=9) hashed = md5(nwk.encode("utf-8")).hexdigest() if hashed not in observed: observed[hashed] = idx self.treedict[idx] = 1 else: idx = observed[hashed] self.treedict[idx] += 1
[ "def", "hash_trees", "(", "self", ")", ":", "observed", "=", "{", "}", "for", "idx", ",", "tree", "in", "enumerate", "(", "self", ".", "treelist", ")", ":", "nwk", "=", "tree", ".", "write", "(", "tree_format", "=", "9", ")", "hashed", "=", "md5", "(", "nwk", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", "if", "hashed", "not", "in", "observed", ":", "observed", "[", "hashed", "]", "=", "idx", "self", ".", "treedict", "[", "idx", "]", "=", "1", "else", ":", "idx", "=", "observed", "[", "hashed", "]", "self", ".", "treedict", "[", "idx", "]", "+=", "1" ]
hash ladderized tree topologies
[ "hash", "ladderized", "tree", "topologies" ]
python
train
37.333333
aws/aws-xray-sdk-python
aws_xray_sdk/core/context.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/context.py#L40-L54
def end_segment(self, end_time=None): """ End the current active segment. :param int end_time: epoch in seconds. If not specified the current system time will be used. """ entity = self.get_trace_entity() if not entity: log.warning("No segment to end") return if self._is_subsegment(entity): entity.parent_segment.close(end_time) else: entity.close(end_time)
[ "def", "end_segment", "(", "self", ",", "end_time", "=", "None", ")", ":", "entity", "=", "self", ".", "get_trace_entity", "(", ")", "if", "not", "entity", ":", "log", ".", "warning", "(", "\"No segment to end\"", ")", "return", "if", "self", ".", "_is_subsegment", "(", "entity", ")", ":", "entity", ".", "parent_segment", ".", "close", "(", "end_time", ")", "else", ":", "entity", ".", "close", "(", "end_time", ")" ]
End the current active segment. :param int end_time: epoch in seconds. If not specified the current system time will be used.
[ "End", "the", "current", "active", "segment", "." ]
python
train
31.266667
mitodl/PyLmod
pylmod/gradebook.py
https://github.com/mitodl/PyLmod/blob/b798b86c33d1eb615e7cd4f3457b5c15da1d86e0/pylmod/gradebook.py#L723-L838
def get_students( self, gradebook_id='', simple=False, section_name='', include_photo=False, include_grade_info=False, include_grade_history=False, include_makeup_grades=False ): """Get students for a gradebook. Get a list of students for a given gradebook, specified by a gradebook id. Does not include grade data. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): if ``True``, just return dictionary with keys ``email``, ``name``, ``section``, default = ``False`` section_name (str): section name include_photo (bool): include student photo, default= ``False`` include_grade_info (bool): include student's grade info, default= ``False`` include_grade_history (bool): include student's grade history, default= ``False`` include_makeup_grades (bool): include student's makeup grades, default= ``False`` Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: list of student dictionaries .. code-block:: python [{ u'accountEmail': u'[email protected]', u'displayName': u'Molly Parker', u'photoUrl': None, u'middleName': None, u'section': u'Unassigned', u'sectionId': 1293925, u'editable': False, u'overallGradeInformation': None, u'studentId': 1145, u'studentAssignmentInfo': None, u'sortableName': u'Parker, Molly', u'surname': u'Parker', u'givenName': u'Molly', u'nickName': u'Molly', u'email': u'[email protected]' },] """ # These are parameters required for the remote API call, so # there aren't too many arguments, or too many variables # pylint: disable=too-many-arguments,too-many-locals # Set params by arguments params = dict( includePhoto=json.dumps(include_photo), includeGradeInfo=json.dumps(include_grade_info), includeGradeHistory=json.dumps(include_grade_history), includeMakeupGrades=json.dumps(include_makeup_grades), ) url = 'students/{gradebookId}' if section_name: group_id, _ = self.get_section_by_name(section_name) if group_id is None: failure_message = ( 'in get_students -- Error: ' 'No such section %s' % section_name ) log.critical(failure_message) raise PyLmodNoSuchSection(failure_message) url += '/section/{0}'.format(group_id) student_data = self.get( url.format( gradebookId=gradebook_id or self.gradebook_id ), params=params, ) if simple: # just return dict with keys email, name, section student_map = dict( accountEmail='email', displayName='name', section='section' ) def remap(students): """Convert mit.edu domain to upper-case for student emails. The mit.edu domain for user email must be upper-case, i.e. MIT.EDU. Args: students (list): list of students Returns: dict: dictionary of updated student email domains """ newx = dict((student_map[k], students[k]) for k in student_map) # match certs newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU') return newx return [remap(x) for x in student_data['data']] return student_data['data']
[ "def", "get_students", "(", "self", ",", "gradebook_id", "=", "''", ",", "simple", "=", "False", ",", "section_name", "=", "''", ",", "include_photo", "=", "False", ",", "include_grade_info", "=", "False", ",", "include_grade_history", "=", "False", ",", "include_makeup_grades", "=", "False", ")", ":", "# These are parameters required for the remote API call, so", "# there aren't too many arguments, or too many variables", "# pylint: disable=too-many-arguments,too-many-locals", "# Set params by arguments", "params", "=", "dict", "(", "includePhoto", "=", "json", ".", "dumps", "(", "include_photo", ")", ",", "includeGradeInfo", "=", "json", ".", "dumps", "(", "include_grade_info", ")", ",", "includeGradeHistory", "=", "json", ".", "dumps", "(", "include_grade_history", ")", ",", "includeMakeupGrades", "=", "json", ".", "dumps", "(", "include_makeup_grades", ")", ",", ")", "url", "=", "'students/{gradebookId}'", "if", "section_name", ":", "group_id", ",", "_", "=", "self", ".", "get_section_by_name", "(", "section_name", ")", "if", "group_id", "is", "None", ":", "failure_message", "=", "(", "'in get_students -- Error: '", "'No such section %s'", "%", "section_name", ")", "log", ".", "critical", "(", "failure_message", ")", "raise", "PyLmodNoSuchSection", "(", "failure_message", ")", "url", "+=", "'/section/{0}'", ".", "format", "(", "group_id", ")", "student_data", "=", "self", ".", "get", "(", "url", ".", "format", "(", "gradebookId", "=", "gradebook_id", "or", "self", ".", "gradebook_id", ")", ",", "params", "=", "params", ",", ")", "if", "simple", ":", "# just return dict with keys email, name, section", "student_map", "=", "dict", "(", "accountEmail", "=", "'email'", ",", "displayName", "=", "'name'", ",", "section", "=", "'section'", ")", "def", "remap", "(", "students", ")", ":", "\"\"\"Convert mit.edu domain to upper-case for student emails.\n\n The mit.edu domain for user email must be upper-case,\n i.e. MIT.EDU.\n\n Args:\n students (list): list of students\n\n Returns:\n dict: dictionary of updated student email domains\n \"\"\"", "newx", "=", "dict", "(", "(", "student_map", "[", "k", "]", ",", "students", "[", "k", "]", ")", "for", "k", "in", "student_map", ")", "# match certs", "newx", "[", "'email'", "]", "=", "newx", "[", "'email'", "]", ".", "replace", "(", "'@mit.edu'", ",", "'@MIT.EDU'", ")", "return", "newx", "return", "[", "remap", "(", "x", ")", "for", "x", "in", "student_data", "[", "'data'", "]", "]", "return", "student_data", "[", "'data'", "]" ]
Get students for a gradebook. Get a list of students for a given gradebook, specified by a gradebook id. Does not include grade data. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): if ``True``, just return dictionary with keys ``email``, ``name``, ``section``, default = ``False`` section_name (str): section name include_photo (bool): include student photo, default= ``False`` include_grade_info (bool): include student's grade info, default= ``False`` include_grade_history (bool): include student's grade history, default= ``False`` include_makeup_grades (bool): include student's makeup grades, default= ``False`` Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: list of student dictionaries .. code-block:: python [{ u'accountEmail': u'[email protected]', u'displayName': u'Molly Parker', u'photoUrl': None, u'middleName': None, u'section': u'Unassigned', u'sectionId': 1293925, u'editable': False, u'overallGradeInformation': None, u'studentId': 1145, u'studentAssignmentInfo': None, u'sortableName': u'Parker, Molly', u'surname': u'Parker', u'givenName': u'Molly', u'nickName': u'Molly', u'email': u'[email protected]' },]
[ "Get", "students", "for", "a", "gradebook", "." ]
python
train
35.12931
dsoprea/PySchedules
pyschedules/xml_callbacks.py
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L268-L275
def _startGenresNode(self, name, attrs): """Process the start of a node under xtvd/genres""" if name == 'programGenre': self._programId = attrs.get('program') elif name == 'genre': self._genre = None self._relevance = None
[ "def", "_startGenresNode", "(", "self", ",", "name", ",", "attrs", ")", ":", "if", "name", "==", "'programGenre'", ":", "self", ".", "_programId", "=", "attrs", ".", "get", "(", "'program'", ")", "elif", "name", "==", "'genre'", ":", "self", ".", "_genre", "=", "None", "self", ".", "_relevance", "=", "None" ]
Process the start of a node under xtvd/genres
[ "Process", "the", "start", "of", "a", "node", "under", "xtvd", "/", "genres" ]
python
train
34.5
kensho-technologies/graphql-compiler
graphql_compiler/compiler/compiler_frontend.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/compiler_frontend.py#L190-L209
def _get_inline_fragment(ast): """Return the inline fragment at the current AST node, or None if no fragment exists.""" if not ast.selection_set: # There is nothing selected here, so no fragment. return None fragments = [ ast_node for ast_node in ast.selection_set.selections if isinstance(ast_node, InlineFragment) ] if not fragments: return None if len(fragments) > 1: raise GraphQLCompilationError(u'Cannot compile GraphQL with more than one fragment in ' u'a given selection set.') return fragments[0]
[ "def", "_get_inline_fragment", "(", "ast", ")", ":", "if", "not", "ast", ".", "selection_set", ":", "# There is nothing selected here, so no fragment.", "return", "None", "fragments", "=", "[", "ast_node", "for", "ast_node", "in", "ast", ".", "selection_set", ".", "selections", "if", "isinstance", "(", "ast_node", ",", "InlineFragment", ")", "]", "if", "not", "fragments", ":", "return", "None", "if", "len", "(", "fragments", ")", ">", "1", ":", "raise", "GraphQLCompilationError", "(", "u'Cannot compile GraphQL with more than one fragment in '", "u'a given selection set.'", ")", "return", "fragments", "[", "0", "]" ]
Return the inline fragment at the current AST node, or None if no fragment exists.
[ "Return", "the", "inline", "fragment", "at", "the", "current", "AST", "node", "or", "None", "if", "no", "fragment", "exists", "." ]
python
train
30.6
knipknap/exscript
Exscript/protocols/telnetlib.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/telnetlib.py#L324-L338
def read_some(self): """Read at least one byte of cooked data unless EOF is hit. Return '' if EOF is hit. Block if no data is immediately available. """ self.process_rawq() while self.cookedq.tell() == 0 and not self.eof: self.fill_rawq() self.process_rawq() buf = self.cookedq.getvalue() self.cookedq.seek(0) self.cookedq.truncate() return buf
[ "def", "read_some", "(", "self", ")", ":", "self", ".", "process_rawq", "(", ")", "while", "self", ".", "cookedq", ".", "tell", "(", ")", "==", "0", "and", "not", "self", ".", "eof", ":", "self", ".", "fill_rawq", "(", ")", "self", ".", "process_rawq", "(", ")", "buf", "=", "self", ".", "cookedq", ".", "getvalue", "(", ")", "self", ".", "cookedq", ".", "seek", "(", "0", ")", "self", ".", "cookedq", ".", "truncate", "(", ")", "return", "buf" ]
Read at least one byte of cooked data unless EOF is hit. Return '' if EOF is hit. Block if no data is immediately available.
[ "Read", "at", "least", "one", "byte", "of", "cooked", "data", "unless", "EOF", "is", "hit", "." ]
python
train
29.133333
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L957-L962
def _submit(self, body, future): """Enqueue a problem for submission to the server. This method is thread safe. """ self._submission_queue.put(self._submit.Message(body, future))
[ "def", "_submit", "(", "self", ",", "body", ",", "future", ")", ":", "self", ".", "_submission_queue", ".", "put", "(", "self", ".", "_submit", ".", "Message", "(", "body", ",", "future", ")", ")" ]
Enqueue a problem for submission to the server. This method is thread safe.
[ "Enqueue", "a", "problem", "for", "submission", "to", "the", "server", "." ]
python
train
34.333333
MartinThoma/mpu
mpu/io.py
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/io.py#L309-L330
def get_creation_datetime(filepath): """ Get the date that a file was created. Parameters ---------- filepath : str Returns ------- creation_datetime : datetime.datetime or None """ if platform.system() == 'Windows': return datetime.fromtimestamp(os.path.getctime(filepath)) else: stat = os.stat(filepath) try: return datetime.fromtimestamp(stat.st_birthtime) except AttributeError: # We're probably on Linux. No easy way to get creation dates here, # so we'll settle for when its content was last modified. return None
[ "def", "get_creation_datetime", "(", "filepath", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "return", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getctime", "(", "filepath", ")", ")", "else", ":", "stat", "=", "os", ".", "stat", "(", "filepath", ")", "try", ":", "return", "datetime", ".", "fromtimestamp", "(", "stat", ".", "st_birthtime", ")", "except", "AttributeError", ":", "# We're probably on Linux. No easy way to get creation dates here,", "# so we'll settle for when its content was last modified.", "return", "None" ]
Get the date that a file was created. Parameters ---------- filepath : str Returns ------- creation_datetime : datetime.datetime or None
[ "Get", "the", "date", "that", "a", "file", "was", "created", "." ]
python
train
28.363636
KnowledgeLinks/rdfframework
rdfframework/utilities/statistics.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/utilities/statistics.py#L144-L157
def update_counts(self, current): """ updates counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts """ for item in current: try: self.counts[item] += 1 except KeyError: self.counts[item] = 1
[ "def", "update_counts", "(", "self", ",", "current", ")", ":", "for", "item", "in", "current", ":", "try", ":", "self", ".", "counts", "[", "item", "]", "+=", "1", "except", "KeyError", ":", "self", ".", "counts", "[", "item", "]", "=", "1" ]
updates counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts
[ "updates", "counts", "for", "the", "class", "instance", "based", "on", "the", "current", "dictionary", "counts" ]
python
train
26.071429
kunitoki/django-custard
custard/builder.py
https://github.com/kunitoki/django-custard/blob/3cf3aa5acf84de2f653e96469e2f9c42813df50a/custard/builder.py#L279-L331
def create_mixin(self): """ This will create the custom Model Mixin to attach to your custom field enabled model. :return: """ _builder = self class CustomModelMixin(object): @cached_property def _content_type(self): return ContentType.objects.get_for_model(self) @classmethod def get_model_custom_fields(cls): """ Return a list of custom fields for this model, callable at model level """ return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls)) def get_custom_fields(self): """ Return a list of custom fields for this model """ return _builder.fields_model_class.objects.filter(content_type=self._content_type) def get_custom_value(self, field): """ Get a value for a specified custom field """ return _builder.values_model_class.objects.get(custom_field=field, content_type=self._content_type, object_id=self.pk) def set_custom_value(self, field, value): """ Set a value for a specified custom field """ custom_value, created = \ _builder.values_model_class.objects.get_or_create(custom_field=field, content_type=self._content_type, object_id=self.pk) custom_value.value = value custom_value.full_clean() custom_value.save() return custom_value #def __getattr__(self, name): # """ Get a value for a specified custom field """ # try: # obj = _builder.values_model_class.objects.get(custom_field__name=name, # content_type=self._content_type, # object_id=self.pk) # return obj.value # except ObjectDoesNotExist: # pass # return super(CustomModelMixin, self).__getattr__(name) return CustomModelMixin
[ "def", "create_mixin", "(", "self", ")", ":", "_builder", "=", "self", "class", "CustomModelMixin", "(", "object", ")", ":", "@", "cached_property", "def", "_content_type", "(", "self", ")", ":", "return", "ContentType", ".", "objects", ".", "get_for_model", "(", "self", ")", "@", "classmethod", "def", "get_model_custom_fields", "(", "cls", ")", ":", "\"\"\" Return a list of custom fields for this model, callable at model level \"\"\"", "return", "_builder", ".", "fields_model_class", ".", "objects", ".", "filter", "(", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "cls", ")", ")", "def", "get_custom_fields", "(", "self", ")", ":", "\"\"\" Return a list of custom fields for this model \"\"\"", "return", "_builder", ".", "fields_model_class", ".", "objects", ".", "filter", "(", "content_type", "=", "self", ".", "_content_type", ")", "def", "get_custom_value", "(", "self", ",", "field", ")", ":", "\"\"\" Get a value for a specified custom field \"\"\"", "return", "_builder", ".", "values_model_class", ".", "objects", ".", "get", "(", "custom_field", "=", "field", ",", "content_type", "=", "self", ".", "_content_type", ",", "object_id", "=", "self", ".", "pk", ")", "def", "set_custom_value", "(", "self", ",", "field", ",", "value", ")", ":", "\"\"\" Set a value for a specified custom field \"\"\"", "custom_value", ",", "created", "=", "_builder", ".", "values_model_class", ".", "objects", ".", "get_or_create", "(", "custom_field", "=", "field", ",", "content_type", "=", "self", ".", "_content_type", ",", "object_id", "=", "self", ".", "pk", ")", "custom_value", ".", "value", "=", "value", "custom_value", ".", "full_clean", "(", ")", "custom_value", ".", "save", "(", ")", "return", "custom_value", "#def __getattr__(self, name):", "# \"\"\" Get a value for a specified custom field \"\"\"", "# try:", "# obj = _builder.values_model_class.objects.get(custom_field__name=name,", "# content_type=self._content_type,", "# object_id=self.pk)", "# return obj.value", "# except ObjectDoesNotExist:", "# pass", "# return super(CustomModelMixin, self).__getattr__(name)", "return", "CustomModelMixin" ]
This will create the custom Model Mixin to attach to your custom field enabled model. :return:
[ "This", "will", "create", "the", "custom", "Model", "Mixin", "to", "attach", "to", "your", "custom", "field", "enabled", "model", "." ]
python
train
45.358491
saltstack/salt
salt/modules/dockercompose.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockercompose.py#L266-L280
def __dump_docker_compose(path, content, already_existed): ''' Dumps :param path: :param content: the not-yet dumped content :return: ''' try: dumped = yaml.safe_dump(content, indent=2, default_flow_style=False) return __write_docker_compose(path, dumped, already_existed) except TypeError as t_err: msg = 'Could not dump {0} {1}'.format(content, t_err) return __standardize_result(False, msg, None, None)
[ "def", "__dump_docker_compose", "(", "path", ",", "content", ",", "already_existed", ")", ":", "try", ":", "dumped", "=", "yaml", ".", "safe_dump", "(", "content", ",", "indent", "=", "2", ",", "default_flow_style", "=", "False", ")", "return", "__write_docker_compose", "(", "path", ",", "dumped", ",", "already_existed", ")", "except", "TypeError", "as", "t_err", ":", "msg", "=", "'Could not dump {0} {1}'", ".", "format", "(", "content", ",", "t_err", ")", "return", "__standardize_result", "(", "False", ",", "msg", ",", "None", ",", "None", ")" ]
Dumps :param path: :param content: the not-yet dumped content :return:
[ "Dumps" ]
python
train
32.8
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L5552-L5592
def _from_dict(cls, _dict): """Initialize a Log object from a json dictionary.""" args = {} if 'request' in _dict: args['request'] = MessageRequest._from_dict(_dict.get('request')) else: raise ValueError( 'Required property \'request\' not present in Log JSON') if 'response' in _dict: args['response'] = MessageResponse._from_dict(_dict.get('response')) else: raise ValueError( 'Required property \'response\' not present in Log JSON') if 'log_id' in _dict: args['log_id'] = _dict.get('log_id') else: raise ValueError( 'Required property \'log_id\' not present in Log JSON') if 'request_timestamp' in _dict: args['request_timestamp'] = _dict.get('request_timestamp') else: raise ValueError( 'Required property \'request_timestamp\' not present in Log JSON' ) if 'response_timestamp' in _dict: args['response_timestamp'] = _dict.get('response_timestamp') else: raise ValueError( 'Required property \'response_timestamp\' not present in Log JSON' ) if 'workspace_id' in _dict: args['workspace_id'] = _dict.get('workspace_id') else: raise ValueError( 'Required property \'workspace_id\' not present in Log JSON') if 'language' in _dict: args['language'] = _dict.get('language') else: raise ValueError( 'Required property \'language\' not present in Log JSON') return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'request'", "in", "_dict", ":", "args", "[", "'request'", "]", "=", "MessageRequest", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'request'", ")", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'request\\' not present in Log JSON'", ")", "if", "'response'", "in", "_dict", ":", "args", "[", "'response'", "]", "=", "MessageResponse", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'response'", ")", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'response\\' not present in Log JSON'", ")", "if", "'log_id'", "in", "_dict", ":", "args", "[", "'log_id'", "]", "=", "_dict", ".", "get", "(", "'log_id'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'log_id\\' not present in Log JSON'", ")", "if", "'request_timestamp'", "in", "_dict", ":", "args", "[", "'request_timestamp'", "]", "=", "_dict", ".", "get", "(", "'request_timestamp'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'request_timestamp\\' not present in Log JSON'", ")", "if", "'response_timestamp'", "in", "_dict", ":", "args", "[", "'response_timestamp'", "]", "=", "_dict", ".", "get", "(", "'response_timestamp'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'response_timestamp\\' not present in Log JSON'", ")", "if", "'workspace_id'", "in", "_dict", ":", "args", "[", "'workspace_id'", "]", "=", "_dict", ".", "get", "(", "'workspace_id'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'workspace_id\\' not present in Log JSON'", ")", "if", "'language'", "in", "_dict", ":", "args", "[", "'language'", "]", "=", "_dict", ".", "get", "(", "'language'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'language\\' not present in Log JSON'", ")", "return", "cls", "(", "*", "*", "args", ")" ]
Initialize a Log object from a json dictionary.
[ "Initialize", "a", "Log", "object", "from", "a", "json", "dictionary", "." ]
python
train
40.878049
DataDog/integrations-core
docker_daemon/datadog_checks/docker_daemon/docker_daemon.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/docker_daemon/datadog_checks/docker_daemon/docker_daemon.py#L623-L637
def _report_container_count(self, containers_by_id): """Report container count per state""" m_func = FUNC_MAP[GAUGE][self.use_histogram] per_state_count = defaultdict(int) filterlambda = lambda ctr: not self._is_container_excluded(ctr) containers = list(filter(filterlambda, containers_by_id.values())) for ctr in containers: per_state_count[ctr.get('State', '')] += 1 for state in per_state_count: if state: m_func(self, 'docker.container.count', per_state_count[state], tags=['container_state:%s' % state.lower()])
[ "def", "_report_container_count", "(", "self", ",", "containers_by_id", ")", ":", "m_func", "=", "FUNC_MAP", "[", "GAUGE", "]", "[", "self", ".", "use_histogram", "]", "per_state_count", "=", "defaultdict", "(", "int", ")", "filterlambda", "=", "lambda", "ctr", ":", "not", "self", ".", "_is_container_excluded", "(", "ctr", ")", "containers", "=", "list", "(", "filter", "(", "filterlambda", ",", "containers_by_id", ".", "values", "(", ")", ")", ")", "for", "ctr", "in", "containers", ":", "per_state_count", "[", "ctr", ".", "get", "(", "'State'", ",", "''", ")", "]", "+=", "1", "for", "state", "in", "per_state_count", ":", "if", "state", ":", "m_func", "(", "self", ",", "'docker.container.count'", ",", "per_state_count", "[", "state", "]", ",", "tags", "=", "[", "'container_state:%s'", "%", "state", ".", "lower", "(", ")", "]", ")" ]
Report container count per state
[ "Report", "container", "count", "per", "state" ]
python
train
40.133333
fulfilio/python-magento
magento/sales.py
https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/sales.py#L262-L284
def create(self, order_increment_id, items_qty, comment=None, email=True, include_comment=False): """ Create new shipment for order :param order_increment_id: Order Increment ID :type order_increment_id: str :param items_qty: items qty to ship :type items_qty: associative array (order_item_id ⇒ qty) as dict :param comment: Shipment Comment :type comment: str :param email: send e-mail flag (optional) :type email: bool :param include_comment: include comment in e-mail flag (optional) :type include_comment: bool """ if comment is None: comment = '' return self.call( 'sales_order_shipment.create', [ order_increment_id, items_qty, comment, email, include_comment ] )
[ "def", "create", "(", "self", ",", "order_increment_id", ",", "items_qty", ",", "comment", "=", "None", ",", "email", "=", "True", ",", "include_comment", "=", "False", ")", ":", "if", "comment", "is", "None", ":", "comment", "=", "''", "return", "self", ".", "call", "(", "'sales_order_shipment.create'", ",", "[", "order_increment_id", ",", "items_qty", ",", "comment", ",", "email", ",", "include_comment", "]", ")" ]
Create new shipment for order :param order_increment_id: Order Increment ID :type order_increment_id: str :param items_qty: items qty to ship :type items_qty: associative array (order_item_id ⇒ qty) as dict :param comment: Shipment Comment :type comment: str :param email: send e-mail flag (optional) :type email: bool :param include_comment: include comment in e-mail flag (optional) :type include_comment: bool
[ "Create", "new", "shipment", "for", "order" ]
python
train
36.521739
TUNE-Archive/freight_forwarder
freight_forwarder/container/container.py
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container/container.py#L341-L426
def _find_by_id(self, id): """ Expected response: { "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Created": "2013-05-07T14:51:42.041847+02:00", "Path": "date", "Args": [], "Config": { "Hostname": "4fa6e0f0c678", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Dns": null, "Image": "base", "Volumes": {}, "VolumesFrom": "", "WorkingDir":"" }, "State": { "Running": false, "Pid": 0, "ExitCode": 0, "StartedAt": "2013-05-07T14:51:42.087658+02:01360", "Ghost": false }, "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings": { "IpAddress": "", "IpPrefixLen": 0, "Gateway": "", "Bridge": "", "PortMapping": null }, "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "49153" } ] }, "Links": ["/name:alias"], "PublishAllPorts": false, "CapAdd: ["NET_ADMIN"], "CapDrop: ["MKNOD"] } } """ if not isinstance(id, six.string_types): raise TypeError('must supply a string as the id') # TODO: We should probably catch container not found error and return out own errors. response = normalize_keys(self.client.inspect_container(id)) # TODO: normalize response to change - to _ self.id = response['id'] self.name = response['name'].replace('/', '') self.image = response['image'] # come back and figure the timezone stuff out later. self.created_at = dateutil.parser.parse(response['created'], ignoretz=True) self.config = ContainerConfig(response['config']) self.host_config = HostConfig(response['host_config']) if self._transcribe: self.start_transcribing()
[ "def", "_find_by_id", "(", "self", ",", "id", ")", ":", "if", "not", "isinstance", "(", "id", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "'must supply a string as the id'", ")", "# TODO: We should probably catch container not found error and return out own errors.", "response", "=", "normalize_keys", "(", "self", ".", "client", ".", "inspect_container", "(", "id", ")", ")", "# TODO: normalize response to change - to _", "self", ".", "id", "=", "response", "[", "'id'", "]", "self", ".", "name", "=", "response", "[", "'name'", "]", ".", "replace", "(", "'/'", ",", "''", ")", "self", ".", "image", "=", "response", "[", "'image'", "]", "# come back and figure the timezone stuff out later.", "self", ".", "created_at", "=", "dateutil", ".", "parser", ".", "parse", "(", "response", "[", "'created'", "]", ",", "ignoretz", "=", "True", ")", "self", ".", "config", "=", "ContainerConfig", "(", "response", "[", "'config'", "]", ")", "self", ".", "host_config", "=", "HostConfig", "(", "response", "[", "'host_config'", "]", ")", "if", "self", ".", "_transcribe", ":", "self", ".", "start_transcribing", "(", ")" ]
Expected response: { "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Created": "2013-05-07T14:51:42.041847+02:00", "Path": "date", "Args": [], "Config": { "Hostname": "4fa6e0f0c678", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Dns": null, "Image": "base", "Volumes": {}, "VolumesFrom": "", "WorkingDir":"" }, "State": { "Running": false, "Pid": 0, "ExitCode": 0, "StartedAt": "2013-05-07T14:51:42.087658+02:01360", "Ghost": false }, "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings": { "IpAddress": "", "IpPrefixLen": 0, "Gateway": "", "Bridge": "", "PortMapping": null }, "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "49153" } ] }, "Links": ["/name:alias"], "PublishAllPorts": false, "CapAdd: ["NET_ADMIN"], "CapDrop: ["MKNOD"] } }
[ "Expected", "response", ":", "{", "Id", ":", "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Created", ":", "2013", "-", "05", "-", "07T14", ":", "51", ":", "42", ".", "041847", "+", "02", ":", "00", "Path", ":", "date", "Args", ":", "[]", "Config", ":", "{", "Hostname", ":", "4fa6e0f0c678", "User", ":", "Memory", ":", "0", "MemorySwap", ":", "0", "AttachStdin", ":", "false", "AttachStdout", ":", "true", "AttachStderr", ":", "true", "PortSpecs", ":", "null", "Tty", ":", "false", "OpenStdin", ":", "false", "StdinOnce", ":", "false", "Env", ":", "null", "Cmd", ":", "[", "date", "]", "Dns", ":", "null", "Image", ":", "base", "Volumes", ":", "{}", "VolumesFrom", ":", "WorkingDir", ":" ]
python
train
42.895349
sassoo/goldman
goldman/resources/model.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/model.py#L42-L55
def on_get(resc, req, resp, rid): """ Find the model by id & serialize it back """ signals.pre_req.send(resc.model) signals.pre_req_find.send(resc.model) model = find(resc.model, rid) props = to_rest_model(model, includes=req.includes) resp.last_modified = model.updated resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_find.send(resc.model)
[ "def", "on_get", "(", "resc", ",", "req", ",", "resp", ",", "rid", ")", ":", "signals", ".", "pre_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "pre_req_find", ".", "send", "(", "resc", ".", "model", ")", "model", "=", "find", "(", "resc", ".", "model", ",", "rid", ")", "props", "=", "to_rest_model", "(", "model", ",", "includes", "=", "req", ".", "includes", ")", "resp", ".", "last_modified", "=", "model", ".", "updated", "resp", ".", "serialize", "(", "props", ")", "signals", ".", "post_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "post_req_find", ".", "send", "(", "resc", ".", "model", ")" ]
Find the model by id & serialize it back
[ "Find", "the", "model", "by", "id", "&", "serialize", "it", "back" ]
python
train
28
twneale/visitors
visitors/ext/pyast.py
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/ext/pyast.py#L26-L55
def from_ast( pyast_node, node=None, node_cls=None, Node=Node, iter_fields=ast.iter_fields, AST=ast.AST): '''Convert the ast tree to a tater tree. ''' node_cls = node_cls or Node node = node or node_cls() name = pyast_node.__class__.__name__ attrs = [] for field, value in iter_fields(pyast_node): if name == 'Dict': for key, value in zip(pyast_node.keys, pyast_node.values): if isinstance(value, list): for item in value: if isinstance(item, AST): value = from_ast(item) elif isinstance(value, AST): value = from_ast(value) attrs.append((key.s, value)) else: if isinstance(value, list): for item in value: if isinstance(item, AST): value = from_ast(item) elif isinstance(value, AST): value = from_ast(value) attrs.append((field, value)) node.update(attrs, type=name) return node
[ "def", "from_ast", "(", "pyast_node", ",", "node", "=", "None", ",", "node_cls", "=", "None", ",", "Node", "=", "Node", ",", "iter_fields", "=", "ast", ".", "iter_fields", ",", "AST", "=", "ast", ".", "AST", ")", ":", "node_cls", "=", "node_cls", "or", "Node", "node", "=", "node", "or", "node_cls", "(", ")", "name", "=", "pyast_node", ".", "__class__", ".", "__name__", "attrs", "=", "[", "]", "for", "field", ",", "value", "in", "iter_fields", "(", "pyast_node", ")", ":", "if", "name", "==", "'Dict'", ":", "for", "key", ",", "value", "in", "zip", "(", "pyast_node", ".", "keys", ",", "pyast_node", ".", "values", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "item", "in", "value", ":", "if", "isinstance", "(", "item", ",", "AST", ")", ":", "value", "=", "from_ast", "(", "item", ")", "elif", "isinstance", "(", "value", ",", "AST", ")", ":", "value", "=", "from_ast", "(", "value", ")", "attrs", ".", "append", "(", "(", "key", ".", "s", ",", "value", ")", ")", "else", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "item", "in", "value", ":", "if", "isinstance", "(", "item", ",", "AST", ")", ":", "value", "=", "from_ast", "(", "item", ")", "elif", "isinstance", "(", "value", ",", "AST", ")", ":", "value", "=", "from_ast", "(", "value", ")", "attrs", ".", "append", "(", "(", "field", ",", "value", ")", ")", "node", ".", "update", "(", "attrs", ",", "type", "=", "name", ")", "return", "node" ]
Convert the ast tree to a tater tree.
[ "Convert", "the", "ast", "tree", "to", "a", "tater", "tree", "." ]
python
train
36.1
log2timeline/dftimewolf
dftimewolf/config.py
https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/config.py#L19-L33
def get_extra(cls, name=None): """Gets extra configuration parameters. These parameters should be loaded through load_extra or load_extra_data. Args: name: str, the name of the configuration data to load. Returns: A dictionary containing the requested configuration data. None if data was never loaded under that name. """ if not name: return cls._extra_config return cls._extra_config.get(name, None)
[ "def", "get_extra", "(", "cls", ",", "name", "=", "None", ")", ":", "if", "not", "name", ":", "return", "cls", ".", "_extra_config", "return", "cls", ".", "_extra_config", ".", "get", "(", "name", ",", "None", ")" ]
Gets extra configuration parameters. These parameters should be loaded through load_extra or load_extra_data. Args: name: str, the name of the configuration data to load. Returns: A dictionary containing the requested configuration data. None if data was never loaded under that name.
[ "Gets", "extra", "configuration", "parameters", "." ]
python
train
29.466667
juju/python-libjuju
juju/model.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L343-L355
def previous(self): """Return a copy of this object as was at its previous state in history. Returns None if this object is new (and therefore has no history). The returned object is always "disconnected", i.e. does not receive live updates. """ return self.model.state.get_entity( self.entity_type, self.entity_id, self._history_index - 1, connected=False)
[ "def", "previous", "(", "self", ")", ":", "return", "self", ".", "model", ".", "state", ".", "get_entity", "(", "self", ".", "entity_type", ",", "self", ".", "entity_id", ",", "self", ".", "_history_index", "-", "1", ",", "connected", "=", "False", ")" ]
Return a copy of this object as was at its previous state in history. Returns None if this object is new (and therefore has no history). The returned object is always "disconnected", i.e. does not receive live updates.
[ "Return", "a", "copy", "of", "this", "object", "as", "was", "at", "its", "previous", "state", "in", "history", "." ]
python
train
32.923077
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L878-L885
def blink_figure(self): """Blink figure once.""" if self.fig: self._blink_flag = not self._blink_flag self.repaint() if self._blink_flag: timer = QTimer() timer.singleShot(40, self.blink_figure)
[ "def", "blink_figure", "(", "self", ")", ":", "if", "self", ".", "fig", ":", "self", ".", "_blink_flag", "=", "not", "self", ".", "_blink_flag", "self", ".", "repaint", "(", ")", "if", "self", ".", "_blink_flag", ":", "timer", "=", "QTimer", "(", ")", "timer", ".", "singleShot", "(", "40", ",", "self", ".", "blink_figure", ")" ]
Blink figure once.
[ "Blink", "figure", "once", "." ]
python
train
33.875
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L4179-L4191
def select_loose_in(pl,k): ''' pl = ['bcd','xabcxx','x','y'] select_loose_in(pl,'abc') ''' def cond_func(ele,index,k): if(type(ele) == type([])): cond = loose_in(ele,k) else: cond = (k in ele) return(cond) arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[k]) return(arr)
[ "def", "select_loose_in", "(", "pl", ",", "k", ")", ":", "def", "cond_func", "(", "ele", ",", "index", ",", "k", ")", ":", "if", "(", "type", "(", "ele", ")", "==", "type", "(", "[", "]", ")", ")", ":", "cond", "=", "loose_in", "(", "ele", ",", "k", ")", "else", ":", "cond", "=", "(", "k", "in", "ele", ")", "return", "(", "cond", ")", "arr", "=", "cond_select_values_all2", "(", "pl", ",", "cond_func", "=", "cond_func", ",", "cond_func_args", "=", "[", "k", "]", ")", "return", "(", "arr", ")" ]
pl = ['bcd','xabcxx','x','y'] select_loose_in(pl,'abc')
[ "pl", "=", "[", "bcd", "xabcxx", "x", "y", "]", "select_loose_in", "(", "pl", "abc", ")" ]
python
valid
28
pycontribs/pyrax
pyrax/base_identity.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/base_identity.py#L1058-L1077
def list_roles(self, service_id=None, limit=None, marker=None): """ Returns a list of all global roles for users, optionally limited by service. Pagination can be handled through the standard 'limit' and 'marker' parameters. """ uri = "OS-KSADM/roles" pagination_items = [] if service_id is not None: pagination_items.append("serviceId=%s" % service_id) if limit is not None: pagination_items.append("limit=%s" % limit) if marker is not None: pagination_items.append("marker=%s" % marker) pagination = "&".join(pagination_items) if pagination: uri = "%s?%s" % (uri, pagination) resp, resp_body = self.method_get(uri) roles = resp_body.get("roles", []) return [Role(self, role) for role in roles]
[ "def", "list_roles", "(", "self", ",", "service_id", "=", "None", ",", "limit", "=", "None", ",", "marker", "=", "None", ")", ":", "uri", "=", "\"OS-KSADM/roles\"", "pagination_items", "=", "[", "]", "if", "service_id", "is", "not", "None", ":", "pagination_items", ".", "append", "(", "\"serviceId=%s\"", "%", "service_id", ")", "if", "limit", "is", "not", "None", ":", "pagination_items", ".", "append", "(", "\"limit=%s\"", "%", "limit", ")", "if", "marker", "is", "not", "None", ":", "pagination_items", ".", "append", "(", "\"marker=%s\"", "%", "marker", ")", "pagination", "=", "\"&\"", ".", "join", "(", "pagination_items", ")", "if", "pagination", ":", "uri", "=", "\"%s?%s\"", "%", "(", "uri", ",", "pagination", ")", "resp", ",", "resp_body", "=", "self", ".", "method_get", "(", "uri", ")", "roles", "=", "resp_body", ".", "get", "(", "\"roles\"", ",", "[", "]", ")", "return", "[", "Role", "(", "self", ",", "role", ")", "for", "role", "in", "roles", "]" ]
Returns a list of all global roles for users, optionally limited by service. Pagination can be handled through the standard 'limit' and 'marker' parameters.
[ "Returns", "a", "list", "of", "all", "global", "roles", "for", "users", "optionally", "limited", "by", "service", ".", "Pagination", "can", "be", "handled", "through", "the", "standard", "limit", "and", "marker", "parameters", "." ]
python
train
42.2
sdispater/poetry
get-poetry.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/get-poetry.py#L466-L490
def make_lib(self, version): """ Packs everything into a single lib/ directory. """ if os.path.exists(POETRY_LIB_BACKUP): shutil.rmtree(POETRY_LIB_BACKUP) # Backup the current installation if os.path.exists(POETRY_LIB): shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP) shutil.rmtree(POETRY_LIB) try: self._make_lib(version) except Exception: if not os.path.exists(POETRY_LIB_BACKUP): raise shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB) shutil.rmtree(POETRY_LIB_BACKUP) raise finally: if os.path.exists(POETRY_LIB_BACKUP): shutil.rmtree(POETRY_LIB_BACKUP)
[ "def", "make_lib", "(", "self", ",", "version", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "POETRY_LIB_BACKUP", ")", ":", "shutil", ".", "rmtree", "(", "POETRY_LIB_BACKUP", ")", "# Backup the current installation", "if", "os", ".", "path", ".", "exists", "(", "POETRY_LIB", ")", ":", "shutil", ".", "copytree", "(", "POETRY_LIB", ",", "POETRY_LIB_BACKUP", ")", "shutil", ".", "rmtree", "(", "POETRY_LIB", ")", "try", ":", "self", ".", "_make_lib", "(", "version", ")", "except", "Exception", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "POETRY_LIB_BACKUP", ")", ":", "raise", "shutil", ".", "copytree", "(", "POETRY_LIB_BACKUP", ",", "POETRY_LIB", ")", "shutil", ".", "rmtree", "(", "POETRY_LIB_BACKUP", ")", "raise", "finally", ":", "if", "os", ".", "path", ".", "exists", "(", "POETRY_LIB_BACKUP", ")", ":", "shutil", ".", "rmtree", "(", "POETRY_LIB_BACKUP", ")" ]
Packs everything into a single lib/ directory.
[ "Packs", "everything", "into", "a", "single", "lib", "/", "directory", "." ]
python
train
29.8
inveniosoftware/invenio-accounts
invenio_accounts/admin.py
https://github.com/inveniosoftware/invenio-accounts/blob/b0d2f0739b00dbefea22ca15d7d374a1b4a63aec/invenio_accounts/admin.py#L193-L199
def delete_model(self, model): """Delete a specific session.""" if SessionActivity.is_current(sid_s=model.sid_s): flash('You could not remove your current session', 'error') return delete_session(sid_s=model.sid_s) db.session.commit()
[ "def", "delete_model", "(", "self", ",", "model", ")", ":", "if", "SessionActivity", ".", "is_current", "(", "sid_s", "=", "model", ".", "sid_s", ")", ":", "flash", "(", "'You could not remove your current session'", ",", "'error'", ")", "return", "delete_session", "(", "sid_s", "=", "model", ".", "sid_s", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Delete a specific session.
[ "Delete", "a", "specific", "session", "." ]
python
train
40.571429
julienc91/utools
utools/files.py
https://github.com/julienc91/utools/blob/6b2f18a5cb30a9349ba25a20c720c737f0683099/utools/files.py#L39-L65
def read_mutiple_items(f, container_type, item_type, separator=" "): """ Extract an iterable from the current line of a file-like object. Args: f (file): the file-like object to read from container_type (type): type of the iterable that will be returned item_type (type): type of the values that will be elements of the returned iterable separator (str): the separator between two consecutive items Returns: The extracted iterable Example: The file "a.input" contains three lines and three comma-separated digits on each:: >>> with open("a.input") as f: ... print(utools.files.read_multiple_items(f, list, int, separator=",")) ... print(utools.files.read_multiple_items(f, set, str, separator=",")) ... print(utools.files.read_multiple_items(f, tuple, float, separator=",")) ... [1, 2, 3] {"4", "5", "6"} (7.0, 8.0, 9.0) """ return __read(f, lambda line: container_type(item_type(item) for item in line.split(separator)))
[ "def", "read_mutiple_items", "(", "f", ",", "container_type", ",", "item_type", ",", "separator", "=", "\" \"", ")", ":", "return", "__read", "(", "f", ",", "lambda", "line", ":", "container_type", "(", "item_type", "(", "item", ")", "for", "item", "in", "line", ".", "split", "(", "separator", ")", ")", ")" ]
Extract an iterable from the current line of a file-like object. Args: f (file): the file-like object to read from container_type (type): type of the iterable that will be returned item_type (type): type of the values that will be elements of the returned iterable separator (str): the separator between two consecutive items Returns: The extracted iterable Example: The file "a.input" contains three lines and three comma-separated digits on each:: >>> with open("a.input") as f: ... print(utools.files.read_multiple_items(f, list, int, separator=",")) ... print(utools.files.read_multiple_items(f, set, str, separator=",")) ... print(utools.files.read_multiple_items(f, tuple, float, separator=",")) ... [1, 2, 3] {"4", "5", "6"} (7.0, 8.0, 9.0)
[ "Extract", "an", "iterable", "from", "the", "current", "line", "of", "a", "file", "-", "like", "object", "." ]
python
train
40.037037
wakatime/wakatime
wakatime/packages/pygments/formatters/img.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L500-L509
def _draw_line_numbers(self): """ Create drawables for the line numbers. """ if not self.line_numbers: return for p in xrange(self.maxlineno): n = p + self.line_number_start if (n % self.line_number_step) == 0: self._draw_linenumber(p, n)
[ "def", "_draw_line_numbers", "(", "self", ")", ":", "if", "not", "self", ".", "line_numbers", ":", "return", "for", "p", "in", "xrange", "(", "self", ".", "maxlineno", ")", ":", "n", "=", "p", "+", "self", ".", "line_number_start", "if", "(", "n", "%", "self", ".", "line_number_step", ")", "==", "0", ":", "self", ".", "_draw_linenumber", "(", "p", ",", "n", ")" ]
Create drawables for the line numbers.
[ "Create", "drawables", "for", "the", "line", "numbers", "." ]
python
train
32.1
inveniosoftware-contrib/invenio-groups
invenio_groups/models.py
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/models.py#L755-L763
def get(cls, group, admin): """Get specific GroupAdmin object.""" try: ga = cls.query.filter_by( group=group, admin_id=admin.get_id(), admin_type=resolve_admin_type(admin)).one() return ga except Exception: return None
[ "def", "get", "(", "cls", ",", "group", ",", "admin", ")", ":", "try", ":", "ga", "=", "cls", ".", "query", ".", "filter_by", "(", "group", "=", "group", ",", "admin_id", "=", "admin", ".", "get_id", "(", ")", ",", "admin_type", "=", "resolve_admin_type", "(", "admin", ")", ")", ".", "one", "(", ")", "return", "ga", "except", "Exception", ":", "return", "None" ]
Get specific GroupAdmin object.
[ "Get", "specific", "GroupAdmin", "object", "." ]
python
valid
33.555556
praekeltfoundation/marathon-acme
marathon_acme/service.py
https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/service.py#L135-L155
def sync(self): """ Fetch the list of apps from Marathon, find the domains that require certificates, and issue certificates for any domains that don't already have a certificate. """ self.log.info('Starting a sync...') def log_success(result): self.log.info('Sync completed successfully') return result def log_failure(failure): self.log.failure('Sync failed', failure, LogLevel.error) return failure return (self.marathon_client.get_apps() .addCallback(self._apps_acme_domains) .addCallback(self._filter_new_domains) .addCallback(self._issue_certs) .addCallbacks(log_success, log_failure))
[ "def", "sync", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'Starting a sync...'", ")", "def", "log_success", "(", "result", ")", ":", "self", ".", "log", ".", "info", "(", "'Sync completed successfully'", ")", "return", "result", "def", "log_failure", "(", "failure", ")", ":", "self", ".", "log", ".", "failure", "(", "'Sync failed'", ",", "failure", ",", "LogLevel", ".", "error", ")", "return", "failure", "return", "(", "self", ".", "marathon_client", ".", "get_apps", "(", ")", ".", "addCallback", "(", "self", ".", "_apps_acme_domains", ")", ".", "addCallback", "(", "self", ".", "_filter_new_domains", ")", ".", "addCallback", "(", "self", ".", "_issue_certs", ")", ".", "addCallbacks", "(", "log_success", ",", "log_failure", ")", ")" ]
Fetch the list of apps from Marathon, find the domains that require certificates, and issue certificates for any domains that don't already have a certificate.
[ "Fetch", "the", "list", "of", "apps", "from", "Marathon", "find", "the", "domains", "that", "require", "certificates", "and", "issue", "certificates", "for", "any", "domains", "that", "don", "t", "already", "have", "a", "certificate", "." ]
python
valid
36.095238
frostming/atoml
atoml/decoder.py
https://github.com/frostming/atoml/blob/85414ef77777366887a819a05b496d5279296cd2/atoml/decoder.py#L28-L35
def contains_list(longer, shorter): """Check if longer list starts with shorter list""" if len(longer) <= len(shorter): return False for a, b in zip(shorter, longer): if a != b: return False return True
[ "def", "contains_list", "(", "longer", ",", "shorter", ")", ":", "if", "len", "(", "longer", ")", "<=", "len", "(", "shorter", ")", ":", "return", "False", "for", "a", ",", "b", "in", "zip", "(", "shorter", ",", "longer", ")", ":", "if", "a", "!=", "b", ":", "return", "False", "return", "True" ]
Check if longer list starts with shorter list
[ "Check", "if", "longer", "list", "starts", "with", "shorter", "list" ]
python
train
29.875
apache/airflow
airflow/jobs.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2071-L2101
def _task_instances_for_dag_run(self, dag_run, session=None): """ Returns a map of task instance key to task instance object for the tasks to run in the given dag run. :param dag_run: the dag run to get the tasks from :type dag_run: airflow.models.DagRun :param session: the database session object :type session: sqlalchemy.orm.session.Session """ tasks_to_run = {} if dag_run is None: return tasks_to_run # check if we have orphaned tasks self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session) # for some reason if we don't refresh the reference to run is lost dag_run.refresh_from_db() make_transient(dag_run) # TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf for ti in dag_run.get_task_instances(): # all tasks part of the backfill are scheduled to run if ti.state == State.NONE: ti.set_state(State.SCHEDULED, session=session) if ti.state != State.REMOVED: tasks_to_run[ti.key] = ti return tasks_to_run
[ "def", "_task_instances_for_dag_run", "(", "self", ",", "dag_run", ",", "session", "=", "None", ")", ":", "tasks_to_run", "=", "{", "}", "if", "dag_run", "is", "None", ":", "return", "tasks_to_run", "# check if we have orphaned tasks", "self", ".", "reset_state_for_orphaned_tasks", "(", "filter_by_dag_run", "=", "dag_run", ",", "session", "=", "session", ")", "# for some reason if we don't refresh the reference to run is lost", "dag_run", ".", "refresh_from_db", "(", ")", "make_transient", "(", "dag_run", ")", "# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf", "for", "ti", "in", "dag_run", ".", "get_task_instances", "(", ")", ":", "# all tasks part of the backfill are scheduled to run", "if", "ti", ".", "state", "==", "State", ".", "NONE", ":", "ti", ".", "set_state", "(", "State", ".", "SCHEDULED", ",", "session", "=", "session", ")", "if", "ti", ".", "state", "!=", "State", ".", "REMOVED", ":", "tasks_to_run", "[", "ti", ".", "key", "]", "=", "ti", "return", "tasks_to_run" ]
Returns a map of task instance key to task instance object for the tasks to run in the given dag run. :param dag_run: the dag run to get the tasks from :type dag_run: airflow.models.DagRun :param session: the database session object :type session: sqlalchemy.orm.session.Session
[ "Returns", "a", "map", "of", "task", "instance", "key", "to", "task", "instance", "object", "for", "the", "tasks", "to", "run", "in", "the", "given", "dag", "run", "." ]
python
test
37.064516
cltk/cltk
cltk/corpus/arabic/utils/pyarabic/araby.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/arabic/utils/pyarabic/araby.py#L904-L945
def waznlike(word1, wazn): """If the word1 is like a wazn (pattern), the letters must be equal, the wazn has FEH, AIN, LAM letters. this are as generic letters. The two words can be full vocalized, or partial vocalized @param word1: input word @type word1: unicode @param wazn: given word template وزن @type wazn: unicode @return: if two words have similar vocalization @rtype: Boolean """ stack1 = stack.Stack(word1) stack2 = stack.Stack(wazn) root = stack.Stack() last1 = stack1.pop() last2 = stack2.pop() vowels = HARAKAT while last1 != None and last2 != None: if last1 == last2 and last2 not in (FEH, AIN, LAM): last1 = stack1.pop() last2 = stack2.pop() elif last1 not in vowels and last2 in (FEH, AIN, LAM): root.push(last1) # ~ print "t" last1 = stack1.pop() last2 = stack2.pop() elif last1 in vowels and last2 not in vowels: last1 = stack1.pop() elif last1 not in vowels and last2 in vowels: last2 = stack2.pop() else: break # reverse the root letters root.items.reverse() # ~ print " the root is ", root.items#"".join(root.items) if not (stack1.is_empty() and stack2.is_empty()): return False else: return True
[ "def", "waznlike", "(", "word1", ",", "wazn", ")", ":", "stack1", "=", "stack", ".", "Stack", "(", "word1", ")", "stack2", "=", "stack", ".", "Stack", "(", "wazn", ")", "root", "=", "stack", ".", "Stack", "(", ")", "last1", "=", "stack1", ".", "pop", "(", ")", "last2", "=", "stack2", ".", "pop", "(", ")", "vowels", "=", "HARAKAT", "while", "last1", "!=", "None", "and", "last2", "!=", "None", ":", "if", "last1", "==", "last2", "and", "last2", "not", "in", "(", "FEH", ",", "AIN", ",", "LAM", ")", ":", "last1", "=", "stack1", ".", "pop", "(", ")", "last2", "=", "stack2", ".", "pop", "(", ")", "elif", "last1", "not", "in", "vowels", "and", "last2", "in", "(", "FEH", ",", "AIN", ",", "LAM", ")", ":", "root", ".", "push", "(", "last1", ")", "# ~ print \"t\"", "last1", "=", "stack1", ".", "pop", "(", ")", "last2", "=", "stack2", ".", "pop", "(", ")", "elif", "last1", "in", "vowels", "and", "last2", "not", "in", "vowels", ":", "last1", "=", "stack1", ".", "pop", "(", ")", "elif", "last1", "not", "in", "vowels", "and", "last2", "in", "vowels", ":", "last2", "=", "stack2", ".", "pop", "(", ")", "else", ":", "break", "# reverse the root letters", "root", ".", "items", ".", "reverse", "(", ")", "# ~ print \" the root is \", root.items#\"\".join(root.items)", "if", "not", "(", "stack1", ".", "is_empty", "(", ")", "and", "stack2", ".", "is_empty", "(", ")", ")", ":", "return", "False", "else", ":", "return", "True" ]
If the word1 is like a wazn (pattern), the letters must be equal, the wazn has FEH, AIN, LAM letters. this are as generic letters. The two words can be full vocalized, or partial vocalized @param word1: input word @type word1: unicode @param wazn: given word template وزن @type wazn: unicode @return: if two words have similar vocalization @rtype: Boolean
[ "If", "the", "word1", "is", "like", "a", "wazn", "(", "pattern", ")", "the", "letters", "must", "be", "equal", "the", "wazn", "has", "FEH", "AIN", "LAM", "letters", ".", "this", "are", "as", "generic", "letters", ".", "The", "two", "words", "can", "be", "full", "vocalized", "or", "partial", "vocalized" ]
python
train
32
Microsoft/LightGBM
python-package/lightgbm/plotting.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/plotting.py#L30-L141
def plot_importance(booster, ax=None, height=0.2, xlim=None, ylim=None, title='Feature importance', xlabel='Feature importance', ylabel='Features', importance_type='split', max_num_features=None, ignore_zero=True, figsize=None, grid=True, precision=None, **kwargs): """Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot importance.') if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') importance = booster.feature_importance(importance_type=importance_type) feature_name = booster.feature_name() if not len(importance): raise ValueError("Booster's feature_importance is empty.") tuples = sorted(zip_(feature_name, importance), key=lambda x: x[1]) if ignore_zero: tuples = [x for x in tuples if x[1] > 0] if max_num_features is not None and max_num_features > 0: tuples = tuples[-max_num_features:] labels, values = zip_(*tuples) if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) for x, y in zip_(values, ylocs): ax.text(x + 1, y, _float2str(x, precision) if importance_type == 'gain' else x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax
[ "def", "plot_importance", "(", "booster", ",", "ax", "=", "None", ",", "height", "=", "0.2", ",", "xlim", "=", "None", ",", "ylim", "=", "None", ",", "title", "=", "'Feature importance'", ",", "xlabel", "=", "'Feature importance'", ",", "ylabel", "=", "'Features'", ",", "importance_type", "=", "'split'", ",", "max_num_features", "=", "None", ",", "ignore_zero", "=", "True", ",", "figsize", "=", "None", ",", "grid", "=", "True", ",", "precision", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "MATPLOTLIB_INSTALLED", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "else", ":", "raise", "ImportError", "(", "'You must install matplotlib to plot importance.'", ")", "if", "isinstance", "(", "booster", ",", "LGBMModel", ")", ":", "booster", "=", "booster", ".", "booster_", "elif", "not", "isinstance", "(", "booster", ",", "Booster", ")", ":", "raise", "TypeError", "(", "'booster must be Booster or LGBMModel.'", ")", "importance", "=", "booster", ".", "feature_importance", "(", "importance_type", "=", "importance_type", ")", "feature_name", "=", "booster", ".", "feature_name", "(", ")", "if", "not", "len", "(", "importance", ")", ":", "raise", "ValueError", "(", "\"Booster's feature_importance is empty.\"", ")", "tuples", "=", "sorted", "(", "zip_", "(", "feature_name", ",", "importance", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "if", "ignore_zero", ":", "tuples", "=", "[", "x", "for", "x", "in", "tuples", "if", "x", "[", "1", "]", ">", "0", "]", "if", "max_num_features", "is", "not", "None", "and", "max_num_features", ">", "0", ":", "tuples", "=", "tuples", "[", "-", "max_num_features", ":", "]", "labels", ",", "values", "=", "zip_", "(", "*", "tuples", ")", "if", "ax", "is", "None", ":", "if", "figsize", "is", "not", "None", ":", "_check_not_tuple_of_2_elements", "(", "figsize", ",", "'figsize'", ")", "_", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "figsize", ")", "ylocs", "=", "np", ".", "arange", "(", "len", "(", "values", ")", ")", "ax", ".", "barh", "(", "ylocs", ",", "values", ",", "align", "=", "'center'", ",", "height", "=", "height", ",", "*", "*", "kwargs", ")", "for", "x", ",", "y", "in", "zip_", "(", "values", ",", "ylocs", ")", ":", "ax", ".", "text", "(", "x", "+", "1", ",", "y", ",", "_float2str", "(", "x", ",", "precision", ")", "if", "importance_type", "==", "'gain'", "else", "x", ",", "va", "=", "'center'", ")", "ax", ".", "set_yticks", "(", "ylocs", ")", "ax", ".", "set_yticklabels", "(", "labels", ")", "if", "xlim", "is", "not", "None", ":", "_check_not_tuple_of_2_elements", "(", "xlim", ",", "'xlim'", ")", "else", ":", "xlim", "=", "(", "0", ",", "max", "(", "values", ")", "*", "1.1", ")", "ax", ".", "set_xlim", "(", "xlim", ")", "if", "ylim", "is", "not", "None", ":", "_check_not_tuple_of_2_elements", "(", "ylim", ",", "'ylim'", ")", "else", ":", "ylim", "=", "(", "-", "1", ",", "len", "(", "values", ")", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "if", "title", "is", "not", "None", ":", "ax", ".", "set_title", "(", "title", ")", "if", "xlabel", "is", "not", "None", ":", "ax", ".", "set_xlabel", "(", "xlabel", ")", "if", "ylabel", "is", "not", "None", ":", "ax", ".", "set_ylabel", "(", "ylabel", ")", "ax", ".", "grid", "(", "grid", ")", "return", "ax" ]
Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances.
[ "Plot", "model", "s", "feature", "importances", "." ]
python
train
36.767857
inasafe/inasafe
safe/gui/tools/help/metadata_converter_help.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/help/metadata_converter_help.py#L19-L31
def metadata_converter_help(): """Help message for metadata converter Dialog. .. versionadded:: 4.3 :returns: A message object containing helpful information. :rtype: messaging.message.Message """ message = m.Message() message.add(m.Brand()) message.add(heading()) message.add(content()) return message
[ "def", "metadata_converter_help", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "m", ".", "Brand", "(", ")", ")", "message", ".", "add", "(", "heading", "(", ")", ")", "message", ".", "add", "(", "content", "(", ")", ")", "return", "message" ]
Help message for metadata converter Dialog. .. versionadded:: 4.3 :returns: A message object containing helpful information. :rtype: messaging.message.Message
[ "Help", "message", "for", "metadata", "converter", "Dialog", "." ]
python
train
25.538462
pygobject/pgi
pgi/overrides/Gtk.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/Gtk.py#L1625-L1630
def get_previous(self): """Returns the previous :obj:`Gtk.TreeModelRow` or None""" prev_iter = self.model.iter_previous(self.iter) if prev_iter: return TreeModelRow(self.model, prev_iter)
[ "def", "get_previous", "(", "self", ")", ":", "prev_iter", "=", "self", ".", "model", ".", "iter_previous", "(", "self", ".", "iter", ")", "if", "prev_iter", ":", "return", "TreeModelRow", "(", "self", ".", "model", ",", "prev_iter", ")" ]
Returns the previous :obj:`Gtk.TreeModelRow` or None
[ "Returns", "the", "previous", ":", "obj", ":", "Gtk", ".", "TreeModelRow", "or", "None" ]
python
train
36.5
DLR-RM/RAFCON
source/rafcon/gui/models/auto_backup.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/auto_backup.py#L398-L401
def write_backup_meta_data(self): """Write the auto backup meta data into the current tmp-storage path""" auto_backup_meta_file = os.path.join(self._tmp_storage_path, FILE_NAME_AUTO_BACKUP) storage.storage_utils.write_dict_to_json(self.meta, auto_backup_meta_file)
[ "def", "write_backup_meta_data", "(", "self", ")", ":", "auto_backup_meta_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_tmp_storage_path", ",", "FILE_NAME_AUTO_BACKUP", ")", "storage", ".", "storage_utils", ".", "write_dict_to_json", "(", "self", ".", "meta", ",", "auto_backup_meta_file", ")" ]
Write the auto backup meta data into the current tmp-storage path
[ "Write", "the", "auto", "backup", "meta", "data", "into", "the", "current", "tmp", "-", "storage", "path" ]
python
train
71.25
numenta/htmresearch
projects/sequence_prediction/reberGrammar/reberSequencePrediction_LSTM.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/reberGrammar/reberSequencePrediction_LSTM.py#L161-L199
def runExperiment(): """ Experiment 1: Calculate error rate as a function of training sequence numbers :return: """ trainSeqN = [5, 10, 20, 50, 100, 200] rptPerCondition = 5 correctRateAll = np.zeros((len(trainSeqN), rptPerCondition)) missRateAll = np.zeros((len(trainSeqN), rptPerCondition)) fpRateAll = np.zeros((len(trainSeqN), rptPerCondition)) for i in xrange(len(trainSeqN)): for rpt in xrange(rptPerCondition): train_seed = 1 numTrainSequence = trainSeqN[i] net = initializeLSTMnet() net = trainLSTMnet(net, numTrainSequence, seedSeq=train_seed) (correctRate, missRate, fpRate) = testLSTMnet(net, numTestSequence, seedSeq=train_seed+rpt) correctRateAll[i, rpt] = correctRate missRateAll[i, rpt] = missRate fpRateAll[i, rpt] = fpRate np.savez('result/reberSequenceLSTM.npz', correctRateAll=correctRateAll, missRateAll=missRateAll, fpRateAll=fpRateAll, trainSeqN=trainSeqN) plt.figure() plt.subplot(2,2,1) plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*') plt.xlabel(' Training Sequence Number') plt.ylabel(' Hit Rate - Best Match (%)') plt.subplot(2,2,2) plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*') plt.xlabel(' Training Sequence Number') plt.ylabel(' Miss Rate (%)') plt.subplot(2,2,3) plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*') plt.xlabel(' Training Sequence Number') plt.ylabel(' False Positive Rate (%)') plt.savefig('result/ReberSequence_LSTMperformance.pdf')
[ "def", "runExperiment", "(", ")", ":", "trainSeqN", "=", "[", "5", ",", "10", ",", "20", ",", "50", ",", "100", ",", "200", "]", "rptPerCondition", "=", "5", "correctRateAll", "=", "np", ".", "zeros", "(", "(", "len", "(", "trainSeqN", ")", ",", "rptPerCondition", ")", ")", "missRateAll", "=", "np", ".", "zeros", "(", "(", "len", "(", "trainSeqN", ")", ",", "rptPerCondition", ")", ")", "fpRateAll", "=", "np", ".", "zeros", "(", "(", "len", "(", "trainSeqN", ")", ",", "rptPerCondition", ")", ")", "for", "i", "in", "xrange", "(", "len", "(", "trainSeqN", ")", ")", ":", "for", "rpt", "in", "xrange", "(", "rptPerCondition", ")", ":", "train_seed", "=", "1", "numTrainSequence", "=", "trainSeqN", "[", "i", "]", "net", "=", "initializeLSTMnet", "(", ")", "net", "=", "trainLSTMnet", "(", "net", ",", "numTrainSequence", ",", "seedSeq", "=", "train_seed", ")", "(", "correctRate", ",", "missRate", ",", "fpRate", ")", "=", "testLSTMnet", "(", "net", ",", "numTestSequence", ",", "seedSeq", "=", "train_seed", "+", "rpt", ")", "correctRateAll", "[", "i", ",", "rpt", "]", "=", "correctRate", "missRateAll", "[", "i", ",", "rpt", "]", "=", "missRate", "fpRateAll", "[", "i", ",", "rpt", "]", "=", "fpRate", "np", ".", "savez", "(", "'result/reberSequenceLSTM.npz'", ",", "correctRateAll", "=", "correctRateAll", ",", "missRateAll", "=", "missRateAll", ",", "fpRateAll", "=", "fpRateAll", ",", "trainSeqN", "=", "trainSeqN", ")", "plt", ".", "figure", "(", ")", "plt", ".", "subplot", "(", "2", ",", "2", ",", "1", ")", "plt", ".", "semilogx", "(", "trainSeqN", ",", "100", "*", "np", ".", "mean", "(", "correctRateAll", ",", "1", ")", ",", "'-*'", ")", "plt", ".", "xlabel", "(", "' Training Sequence Number'", ")", "plt", ".", "ylabel", "(", "' Hit Rate - Best Match (%)'", ")", "plt", ".", "subplot", "(", "2", ",", "2", ",", "2", ")", "plt", ".", "semilogx", "(", "trainSeqN", ",", "100", "*", "np", ".", "mean", "(", "missRateAll", ",", "1", ")", ",", "'-*'", ")", "plt", ".", "xlabel", "(", "' Training Sequence Number'", ")", "plt", ".", "ylabel", "(", "' Miss Rate (%)'", ")", "plt", ".", "subplot", "(", "2", ",", "2", ",", "3", ")", "plt", ".", "semilogx", "(", "trainSeqN", ",", "100", "*", "np", ".", "mean", "(", "fpRateAll", ",", "1", ")", ",", "'-*'", ")", "plt", ".", "xlabel", "(", "' Training Sequence Number'", ")", "plt", ".", "ylabel", "(", "' False Positive Rate (%)'", ")", "plt", ".", "savefig", "(", "'result/ReberSequence_LSTMperformance.pdf'", ")" ]
Experiment 1: Calculate error rate as a function of training sequence numbers :return:
[ "Experiment", "1", ":", "Calculate", "error", "rate", "as", "a", "function", "of", "training", "sequence", "numbers", ":", "return", ":" ]
python
train
38.282051
bitesofcode/projexui
projexui/widgets/xchartwidget/xchartscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartscene.py#L347-L353
def leaveEvent( self, event ): """ Toggles the display for the tracker item. """ item = self.trackerItem() if ( item ): item.setVisible(False)
[ "def", "leaveEvent", "(", "self", ",", "event", ")", ":", "item", "=", "self", ".", "trackerItem", "(", ")", "if", "(", "item", ")", ":", "item", ".", "setVisible", "(", "False", ")" ]
Toggles the display for the tracker item.
[ "Toggles", "the", "display", "for", "the", "tracker", "item", "." ]
python
train
27.714286
boriel/zxbasic
arch/zx48k/backend/__8bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__8bit.py#L521-L533
def _gtu8(ins): """ Compares & pops top 2 operands out of the stack, and checks if the 1st operand > 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 8 bit unsigned version """ output = _8bit_oper(ins.quad[2], ins.quad[3], reversed_=True) output.append('cp h') output.append('sbc a, a') output.append('push af') return output
[ "def", "_gtu8", "(", "ins", ")", ":", "output", "=", "_8bit_oper", "(", "ins", ".", "quad", "[", "2", "]", ",", "ins", ".", "quad", "[", "3", "]", ",", "reversed_", "=", "True", ")", "output", ".", "append", "(", "'cp h'", ")", "output", ".", "append", "(", "'sbc a, a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output" ]
Compares & pops top 2 operands out of the stack, and checks if the 1st operand > 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 8 bit unsigned version
[ "Compares", "&", "pops", "top", "2", "operands", "out", "of", "the", "stack", "and", "checks", "if", "the", "1st", "operand", ">", "2nd", "operand", "(", "top", "of", "the", "stack", ")", ".", "Pushes", "0", "if", "False", "1", "if", "True", "." ]
python
train
29.230769